summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2012-05-11 11:19:01 +0000
committerbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2012-05-11 11:19:01 +0000
commit10c7be7ea6e54fc16864f455ffd8e57404b1a467 (patch)
treeee70b35cdded91a6e9f721e4c5cbaedad09528ad /gcc
parentd59974987297588b3031ef2f2ae409c5bd858bd0 (diff)
downloadgcc-10c7be7ea6e54fc16864f455ffd8e57404b1a467.tar.gz
2012-05-11 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk rev 187397 using svnmerge gimple_seq are disappearing! [gcc/] 2012-05-11 Basile Starynkevitch <basile@starynkevitch.net> {{for merge with trunk svn 187397, since gimple_seq are disappearing in GCC 4.8}} * melt-runtime.h (melt_gt_ggc_mx_gimple_seq_d): New declaration (gt_ggc_mx_gimple_seq_d): Macro defined when GCC 4.8 only. * melt-runtime.c (melt_gt_ggc_mx_gimple_seq_d): New function, defined for GCC 4.8 only. * melt/warmelt-debug.melt (melt_debug_fun): Add cast in our warning diagnostic to avoid a warning. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@187401 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog1603
-rw-r--r--gcc/ChangeLog.MELT13
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in48
-rw-r--r--gcc/ada/ChangeLog97
-rw-r--r--gcc/ada/gcc-interface/Makefile.in30
-rw-r--r--gcc/ada/gcc-interface/cuintp.c7
-rw-r--r--gcc/ada/gcc-interface/decl.c679
-rw-r--r--gcc/ada/gcc-interface/gigi.h62
-rw-r--r--gcc/ada/gcc-interface/misc.c4
-rw-r--r--gcc/ada/gcc-interface/trans.c327
-rw-r--r--gcc/ada/gcc-interface/utils.c776
-rw-r--r--gcc/ada/gcc-interface/utils2.c24
-rw-r--r--gcc/alias.c7
-rw-r--r--gcc/basic-block.h65
-rw-r--r--gcc/bb-reorder.c82
-rw-r--r--gcc/builtins.c215
-rw-r--r--gcc/c-decl.c6
-rw-r--r--gcc/c-family/ChangeLog32
-rw-r--r--gcc/c-family/c-common.c119
-rw-r--r--gcc/c-family/c.opt2
-rw-r--r--gcc/c-tree.h4
-rw-r--r--gcc/calls.c39
-rw-r--r--gcc/cfgcleanup.c14
-rw-r--r--gcc/cfgexpand.c17
-rw-r--r--gcc/cfglayout.c53
-rw-r--r--gcc/cfgrtl.c61
-rw-r--r--gcc/cgraph.c577
-rw-r--r--gcc/cgraph.h59
-rw-r--r--gcc/cgraphbuild.c9
-rw-r--r--gcc/cgraphclones.c876
-rw-r--r--gcc/cgraphunit.c379
-rw-r--r--gcc/collect2.c25
-rw-r--r--gcc/collect2.h1
-rw-r--r--gcc/combine-stack-adj.c3
-rw-r--r--gcc/combine.c13
-rw-r--r--gcc/common.opt2
-rw-r--r--gcc/common/config/s390/s390-common.c6
-rw-r--r--gcc/config.in8
-rw-r--r--gcc/config/alpha/alpha.c73
-rw-r--r--gcc/config/alpha/alpha.h2
-rw-r--r--gcc/config/alpha/alpha.md23
-rw-r--r--gcc/config/alpha/elf.h15
-rw-r--r--gcc/config/alpha/vms.h14
-rw-r--r--gcc/config/arm/arm-protos.h2
-rw-r--r--gcc/config/arm/arm.c208
-rw-r--r--gcc/config/arm/arm.h24
-rw-r--r--gcc/config/arm/arm.md15
-rw-r--r--gcc/config/avr/avr-devices.c45
-rw-r--r--gcc/config/avr/avr-protos.h1
-rw-r--r--gcc/config/avr/avr.c176
-rw-r--r--gcc/config/avr/avr.h12
-rw-r--r--gcc/config/avr/elf.h5
-rw-r--r--gcc/config/avr/gen-avr-mmcu-texi.c73
-rw-r--r--gcc/config/avr/t-avr20
-rw-r--r--gcc/config/bfin/bfin.c15
-rw-r--r--gcc/config/bfin/bfin.h3
-rw-r--r--gcc/config/c6x/c6x.c6
-rw-r--r--gcc/config/cr16/cr16.c2
-rw-r--r--gcc/config/cr16/cr16.h2
-rw-r--r--gcc/config/cris/cris.c54
-rw-r--r--gcc/config/cris/cris.md8
-rw-r--r--gcc/config/darwin.c4
-rw-r--r--gcc/config/epiphany/epiphany.c38
-rw-r--r--gcc/config/epiphany/epiphany.md6
-rw-r--r--gcc/config/fr30/fr30.c3
-rw-r--r--gcc/config/frv/frv.c24
-rw-r--r--gcc/config/h8300/h8300.c19
-rw-r--r--gcc/config/i386/cpuid.h1
-rw-r--r--gcc/config/i386/driver-i386.c8
-rw-r--r--gcc/config/i386/i386-c.c5
-rw-r--r--gcc/config/i386/i386.c367
-rw-r--r--gcc/config/i386/i386.h18
-rw-r--r--gcc/config/i386/i386.md337
-rw-r--r--gcc/config/i386/i386.opt4
-rw-r--r--gcc/config/i386/sse.md182
-rw-r--r--gcc/config/i386/sync.md49
-rw-r--r--gcc/config/ia64/hpux.h7
-rw-r--r--gcc/config/ia64/ia64-protos.h1
-rw-r--r--gcc/config/ia64/ia64.c298
-rw-r--r--gcc/config/ia64/ia64.md22
-rw-r--r--gcc/config/ia64/linux.h3
-rw-r--r--gcc/config/iq2000/iq2000.c19
-rw-r--r--gcc/config/lm32/lm32.c2
-rw-r--r--gcc/config/m32c/bitops.md4
-rw-r--r--gcc/config/m32c/m32c.c6
-rw-r--r--gcc/config/m32r/m32r.c11
-rw-r--r--gcc/config/m68k/linux.h3
-rw-r--r--gcc/config/m68k/m68k-protos.h1
-rw-r--r--gcc/config/m68k/m68k.c59
-rw-r--r--gcc/config/m68k/m68k.h7
-rw-r--r--gcc/config/m68k/m68k.md17
-rw-r--r--gcc/config/mcore/mcore.c9
-rw-r--r--gcc/config/mcore/mcore.md8
-rw-r--r--gcc/config/mep/mep.c20
-rw-r--r--gcc/config/microblaze/microblaze.c7
-rw-r--r--gcc/config/mips/mips.c88
-rw-r--r--gcc/config/mips/mips.md8
-rw-r--r--gcc/config/mips/t-vxworks2
-rw-r--r--gcc/config/mmix/mmix.c27
-rw-r--r--gcc/config/mn10300/mn10300.c11
-rw-r--r--gcc/config/moxie/moxie.c4
-rw-r--r--gcc/config/moxie/moxie.h4
-rw-r--r--gcc/config/pa/pa.c50
-rw-r--r--gcc/config/pa/pa.md8
-rw-r--r--gcc/config/pdp11/pdp11.c4
-rw-r--r--gcc/config/picochip/picochip.c2
-rw-r--r--gcc/config/rs6000/rs6000.c265
-rw-r--r--gcc/config/rs6000/rs6000.h2
-rw-r--r--gcc/config/rs6000/rs6000.md52
-rw-r--r--gcc/config/rx/rx.c8
-rw-r--r--gcc/config/s390/s390.c49
-rw-r--r--gcc/config/s390/s390.h5
-rw-r--r--gcc/config/s390/s390.md21
-rw-r--r--gcc/config/score/score.c6
-rw-r--r--gcc/config/sh/sh.c39
-rw-r--r--gcc/config/sh/sh.md60
-rw-r--r--gcc/config/sol2-protos.h3
-rw-r--r--gcc/config/sol2.c12
-rw-r--r--gcc/config/sol2.h6
-rw-r--r--gcc/config/sparc/linux.h2
-rw-r--r--gcc/config/sparc/linux64.h4
-rw-r--r--gcc/config/sparc/sparc.c60
-rw-r--r--gcc/config/sparc/sparc.h9
-rw-r--r--gcc/config/sparc/sparc.md2
-rw-r--r--gcc/config/spu/spu.c16
-rw-r--r--gcc/config/spu/spu.h2
-rw-r--r--gcc/config/stormy16/stormy16.c8
-rw-r--r--gcc/config/tilegx/tilegx.c9
-rw-r--r--gcc/config/tilegx/tilegx.h3
-rw-r--r--gcc/config/tilepro/tilepro.c9
-rw-r--r--gcc/config/tilepro/tilepro.h3
-rw-r--r--gcc/config/v850/v850.c12
-rw-r--r--gcc/config/v850/v850.md14
-rw-r--r--gcc/config/vax/elf.h6
-rw-r--r--gcc/config/vax/vax.c18
-rw-r--r--gcc/config/vax/vax.h5
-rw-r--r--gcc/config/xtensa/xtensa.c8
-rwxr-xr-xgcc/configure39
-rw-r--r--gcc/configure.ac14
-rw-r--r--gcc/coretypes.h7
-rw-r--r--gcc/cp/ChangeLog77
-rw-r--r--gcc/cp/call.c179
-rw-r--r--gcc/cp/cp-tree.h21
-rw-r--r--gcc/cp/cvt.c268
-rw-r--r--gcc/cp/cxx-pretty-print.c1
-rw-r--r--gcc/cp/decl.c8
-rw-r--r--gcc/cp/decl2.c4
-rw-r--r--gcc/cp/parser.c63
-rw-r--r--gcc/cp/pt.c38
-rw-r--r--gcc/cp/semantics.c25
-rw-r--r--gcc/cp/tree.c4
-rw-r--r--gcc/cp/typeck.c55
-rw-r--r--gcc/cp/typeck2.c6
-rw-r--r--gcc/cse.c8
-rw-r--r--gcc/cselib.c6
-rw-r--r--gcc/diagnostic.c8
-rw-r--r--gcc/diagnostic.h3
-rw-r--r--gcc/doc/avr-mmcu.texi74
-rw-r--r--gcc/doc/extend.texi129
-rw-r--r--gcc/doc/fragments.texi25
-rw-r--r--gcc/doc/gcov.texi6
-rw-r--r--gcc/doc/invoke.texi84
-rw-r--r--gcc/doc/md.texi19
-rw-r--r--gcc/doc/tm.texi5
-rw-r--r--gcc/doc/tm.texi.in5
-rw-r--r--gcc/dse.c7
-rw-r--r--gcc/dwarf2out.c411
-rw-r--r--gcc/dwarf2out.h1
-rw-r--r--gcc/emit-rtl.c9
-rw-r--r--gcc/except.c8
-rw-r--r--gcc/explow.c77
-rw-r--r--gcc/expmed.c2
-rw-r--r--gcc/expr.c255
-rw-r--r--gcc/flags.h3
-rw-r--r--gcc/fold-const.c85
-rw-r--r--gcc/fortran/ChangeLog87
-rw-r--r--gcc/fortran/decl.c5
-rw-r--r--gcc/fortran/expr.c3
-rw-r--r--gcc/fortran/gfortran.texi31
-rw-r--r--gcc/fortran/interface.c47
-rw-r--r--gcc/fortran/intrinsic.texi30
-rw-r--r--gcc/fortran/match.c195
-rw-r--r--gcc/fortran/resolve.c68
-rw-r--r--gcc/fortran/simplify.c27
-rw-r--r--gcc/fortran/symbol.c3
-rw-r--r--gcc/fortran/trans-array.c40
-rw-r--r--gcc/fortran/trans-common.c2
-rw-r--r--gcc/fortran/trans-decl.c4
-rw-r--r--gcc/fortran/trans-expr.c40
-rw-r--r--gcc/fortran/trans-intrinsic.c128
-rw-r--r--gcc/fortran/trans-stmt.c51
-rw-r--r--gcc/fortran/trans-types.c3
-rw-r--r--gcc/fortran/trans.h4
-rw-r--r--gcc/function.c8
-rw-r--r--gcc/function.h2
-rw-r--r--gcc/gcc-ar.c15
-rw-r--r--gcc/gcc.c17
-rw-r--r--gcc/gcov-io.h6
-rw-r--r--gcc/genattr.c7
-rw-r--r--gcc/genattrtab.c668
-rw-r--r--gcc/gengtype.c4
-rw-r--r--gcc/genmultilib45
-rw-r--r--gcc/gimple-fold.c52
-rw-r--r--gcc/gimple-iterator.c227
-rw-r--r--gcc/gimple-low.c37
-rw-r--r--gcc/gimple-pretty-print.c11
-rw-r--r--gcc/gimple.c130
-rw-r--r--gcc/gimple.h609
-rw-r--r--gcc/gimplify.c385
-rw-r--r--gcc/go/ChangeLog16
-rw-r--r--gcc/go/gccgo.texi28
-rw-r--r--gcc/go/go-c.h4
-rw-r--r--gcc/go/go-gcc.cc8
-rw-r--r--gcc/go/go-lang.c13
-rw-r--r--gcc/go/gofrontend/backend.h16
-rw-r--r--gcc/go/gofrontend/export.cc35
-rw-r--r--gcc/go/gofrontend/export.h6
-rw-r--r--gcc/go/gofrontend/expressions.cc14
-rw-r--r--gcc/go/gofrontend/go.cc35
-rw-r--r--gcc/go/gofrontend/gogo-tree.cc82
-rw-r--r--gcc/go/gofrontend/gogo.cc266
-rw-r--r--gcc/go/gofrontend/gogo.h141
-rw-r--r--gcc/go/gofrontend/import.cc104
-rw-r--r--gcc/go/gofrontend/lex.cc4
-rw-r--r--gcc/go/gofrontend/parse.cc93
-rw-r--r--gcc/go/gofrontend/parse.h9
-rw-r--r--gcc/go/gofrontend/types.cc98
-rw-r--r--gcc/go/gofrontend/unsafe.cc2
-rw-r--r--gcc/go/lang.opt4
-rw-r--r--gcc/graphite-sese-to-poly.c29
-rw-r--r--gcc/ifcvt.c3
-rw-r--r--gcc/ipa-cp.c1
-rw-r--r--gcc/ipa-inline.c61
-rw-r--r--gcc/ipa-prop.c5
-rw-r--r--gcc/ipa.c353
-rw-r--r--gcc/ira.c12
-rw-r--r--gcc/jump.c4
-rw-r--r--gcc/langhooks.c16
-rw-r--r--gcc/lower-subreg.c17
-rw-r--r--gcc/lto-streamer-in.c3
-rw-r--r--gcc/lto-wrapper.c18
-rw-r--r--gcc/lto/ChangeLog13
-rw-r--r--gcc/lto/lang.opt4
-rw-r--r--gcc/lto/lto.c4
-rw-r--r--gcc/melt-run.proto.h3
-rw-r--r--gcc/melt-runtime.c12
-rw-r--r--gcc/melt-runtime.h15
-rw-r--r--gcc/melt/warmelt-debug.melt7
-rw-r--r--gcc/omp-low.c145
-rw-r--r--gcc/optabs.c9
-rw-r--r--gcc/opts.c4
-rw-r--r--gcc/passes.c5
-rw-r--r--gcc/postreload.c3
-rw-r--r--gcc/predict.c378
-rw-r--r--gcc/predict.def10
-rw-r--r--gcc/print-tree.c3
-rw-r--r--gcc/read-md.c43
-rw-r--r--gcc/read-md.h2
-rw-r--r--gcc/recog.c17
-rw-r--r--gcc/reload.c14
-rw-r--r--gcc/reload.h3
-rw-r--r--gcc/reload1.c30
-rw-r--r--gcc/reorg.c32
-rw-r--r--gcc/rtl.def4
-rw-r--r--gcc/rtl.h4
-rw-r--r--gcc/rtlanal.c14
-rw-r--r--gcc/sched-deps.c6
-rw-r--r--gcc/sel-sched-dump.c2
-rw-r--r--gcc/simplify-rtx.c16
-rw-r--r--gcc/stmt.c3
-rw-r--r--gcc/stor-layout.c116
-rw-r--r--gcc/target.def6
-rw-r--r--gcc/testsuite/ChangeLog332
-rw-r--r--gcc/testsuite/c-c++-common/pr43772.c45
-rw-r--r--gcc/testsuite/c-c++-common/pr51712.c18
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-err2.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/static_assert7.C20
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic132.C27
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/localclass3.C11
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/namespace-2.C10
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/nested-3.C10
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/thunk1.C11
-rw-r--r--gcc/testsuite/g++.dg/lto/pr42987_0.C22
-rw-r--r--gcc/testsuite/g++.dg/lto/pr42987_1.C14
-rw-r--r--gcc/testsuite/g++.dg/lto/pr52605_0.C39
-rw-r--r--gcc/testsuite/g++.dg/opt/vrp3-aux.cc21
-rw-r--r--gcc/testsuite/g++.dg/opt/vrp3.C47
-rw-r--r--gcc/testsuite/g++.dg/opt/vrp3.h9
-rw-r--r--gcc/testsuite/g++.dg/other/final2.C27
-rw-r--r--gcc/testsuite/g++.dg/parse/error26.C2
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr19807.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-6.C6
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr53163.c34
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr53187.c11
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr53226.c13
-rw-r--r--gcc/testsuite/gcc.dg/builtin-stringop-chk-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.c8
-rw-r--r--gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.h10
-rw-r--r--gcc/testsuite/gcc.dg/fixed-point/composite-type.c2
-rw-r--r--gcc/testsuite/gcc.dg/fixed-point/operator-bitwise.c2
-rw-r--r--gcc/testsuite/gcc.dg/fold-bitand-4.c16
-rw-r--r--gcc/testsuite/gcc.dg/lto/pr53214_0.c8
-rw-r--r--gcc/testsuite/gcc.dg/pr53153.c61
-rw-r--r--gcc/testsuite/gcc.dg/pr53174.c67
-rw-r--r--gcc/testsuite/gcc.dg/predict-1.c27
-rw-r--r--gcc/testsuite/gcc.dg/predict-2.c27
-rw-r--r--gcc/testsuite/gcc.dg/predict-3.c25
-rw-r--r--gcc/testsuite/gcc.dg/predict-4.c19
-rw-r--r--gcc/testsuite/gcc.dg/predict-5.c25
-rw-r--r--gcc/testsuite/gcc.dg/predict-6.c25
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr53144.c21
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr53168.c28
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr53272-1.c39
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr53272-2.c39
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-27.c7
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-30.c27
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr53185.c16
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/pr52633.c13
-rw-r--r--gcc/testsuite/gcc.target/arm/pr53187.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-add-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-add-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-and-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-and-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-cmpxchg-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-cmpxchg-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-or-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-or-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-sub-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-sub-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-xadd-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-xadd-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-xchg-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-xchg-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-xor-acq-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/hle-xor-rel-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/pr53249.c25
-rw-r--r--gcc/testsuite/gcc.target/i386/xop-imul32widen-vector.c2
-rw-r--r--gcc/testsuite/gcc.target/ia64/pr48496.c24
-rw-r--r--gcc/testsuite/gcc.target/ia64/pr52657.c44
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr53199.c50
-rw-r--r--gcc/testsuite/gcc.target/s390/20030123-1.c2
-rw-r--r--gcc/testsuite/gfortran.dg/class_array_13.f9026
-rw-r--r--gcc/testsuite/gfortran.dg/constructor_7.f9048
-rw-r--r--gcc/testsuite/gfortran.dg/constructor_8.f9022
-rw-r--r--gcc/testsuite/gfortran.dg/mod_large_1.f9016
-rw-r--r--gcc/testsuite/gfortran.dg/mod_sign0_1.f9054
-rw-r--r--gcc/testsuite/gfortran.dg/pointer_intent_7.f9045
-rw-r--r--gcc/testsuite/gfortran.dg/pr52621.f902
-rw-r--r--gcc/testsuite/gfortran.dg/public_private_module_5.f9040
-rw-r--r--gcc/testsuite/gfortran.dg/pure_formal_3.f9028
-rw-r--r--gcc/testsuite/gfortran.dg/select_type_26.f03110
-rw-r--r--gcc/testsuite/gfortran.dg/select_type_27.f03115
-rw-r--r--gcc/testsuite/gfortran.dg/select_type_28.f0336
-rw-r--r--gcc/testsuite/gfortran.dg/typebound_operator_15.f9078
-rw-r--r--gcc/testsuite/gfortran.dg/vect/fast-math-rnflow-trs2a2.f90 (renamed from gcc/testsuite/gfortran.dg/vect/rnflow-trs2a2.f90)0
-rw-r--r--gcc/testsuite/gnat.dg/discr36.adb19
-rw-r--r--gcc/testsuite/gnat.dg/discr36.ads12
-rw-r--r--gcc/testsuite/gnat.dg/discr36_pkg.adb10
-rw-r--r--gcc/testsuite/gnat.dg/discr36_pkg.ads7
-rw-r--r--gcc/testsuite/gnat.dg/lto11.adb20
-rw-r--r--gcc/testsuite/gnat.dg/lto11.ads9
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming1.ads (renamed from gcc/testsuite/gnat.dg/specs/renamings.ads)6
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming2.ads11
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming2_pkg1.ads17
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming2_pkg2.ads14
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming2_pkg3.ads25
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming2_pkg4.adb12
-rw-r--r--gcc/testsuite/gnat.dg/specs/renaming2_pkg4.ads25
-rw-r--r--gcc/testsuite/gnat.dg/warn7.adb18
-rw-r--r--gcc/tlink.c6
-rw-r--r--gcc/toplev.c8
-rw-r--r--gcc/trans-mem.c12
-rw-r--r--gcc/tree-cfg.c94
-rw-r--r--gcc/tree-complex.c11
-rw-r--r--gcc/tree-dfa.c21
-rw-r--r--gcc/tree-diagnostic.c18
-rw-r--r--gcc/tree-eh.c88
-rw-r--r--gcc/tree-emutls.c2
-rw-r--r--gcc/tree-flow-inline.h13
-rw-r--r--gcc/tree-flow.h20
-rw-r--r--gcc/tree-inline.c13
-rw-r--r--gcc/tree-mudflap.c10
-rw-r--r--gcc/tree-nested.c65
-rw-r--r--gcc/tree-phinodes.c47
-rw-r--r--gcc/tree-predcom.c2
-rw-r--r--gcc/tree-sra.c20
-rw-r--r--gcc/tree-ssa-address.c18
-rw-r--r--gcc/tree-ssa-ccp.c35
-rw-r--r--gcc/tree-ssa-dce.c4
-rw-r--r--gcc/tree-ssa-dse.c18
-rw-r--r--gcc/tree-ssa-forwprop.c128
-rw-r--r--gcc/tree-ssa-loop-im.c2
-rw-r--r--gcc/tree-ssa-loop-ivopts.c33
-rw-r--r--gcc/tree-ssa-loop-prefetch.c14
-rw-r--r--gcc/tree-ssa-phiopt.c13
-rw-r--r--gcc/tree-ssa-pre.c267
-rw-r--r--gcc/tree-ssa-sccvn.c30
-rw-r--r--gcc/tree-ssanames.c59
-rw-r--r--gcc/tree-switch-conversion.c35
-rw-r--r--gcc/tree-vect-data-refs.c19
-rw-r--r--gcc/tree-vect-loop-manip.c95
-rw-r--r--gcc/tree-vect-loop.c41
-rw-r--r--gcc/tree-vect-patterns.c284
-rw-r--r--gcc/tree-vect-stmts.c36
-rw-r--r--gcc/tree-vectorizer.h6
-rw-r--r--gcc/tree-vrp.c25
-rw-r--r--gcc/tree.c51
-rw-r--r--gcc/tree.h18
-rw-r--r--gcc/var-tracking.c19
-rw-r--r--gcc/varasm.c8
-rw-r--r--gcc/varpool.c40
416 files changed, 14395 insertions, 6851 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 972f3cc0b5e..e6f59c48457 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,1593 @@
+2012-05-11 Jan Hubicka <jh@suse.cz>
+
+ PR bootstrap/53300
+ * varpool.c (varpool_assemble_decl): Also output constat pool entries
+ that output_constant_pool missed.
+
+2012-05-11 Mingjie Xing <mingjie.xing@gmail.com>
+
+ * config/mips/t-vxworks: Change MUTLILIB_EXTRA_OPTS to
+ MULTILIB_EXTRA_OPTS.
+
+2012-05-11 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/53291
+ * config/i386/i386.md (xtest): Use NE condition in ix86_expand_setcc.
+
+2012-05-11 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.md (*movti_internal_rex64): Avoid MOVAPS size
+ optimization for TARGET_AVX.
+ (*movti_internal_sse): Ditto.
+ (*movdi_internal_rex64): Handle TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL.
+ (*movdi_internal): Ditto.
+ (*movsi_internal): Ditto.
+ (*movtf_internal): Avoid MOVAPS size optimization for TARGET_AVX.
+ (*movdf_internal_rex64): Ditto.
+ (*movfd_internal): Ditto.
+ (*movsf_internal): Ditto.
+ * config/i386/sse.md (mov<mode>): Handle TARGET_SSE_LOAD0_BY_PXOR.
+
+2012-05-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * dwarf2out.c (add_byte_size_attribute) <RECORD_TYPE>: Handle variable
+ reference as size attribute.
+
+2012-05-10 Eric Botcazou <ebotcazou@adacore.com>
+ Tristan Gingold <gingold@adacore.com>
+
+ * doc/md.texi (Standard Names): Document probe_stack_address.
+ * explow.c (emit_stack_probe): Handle probe_stack_address.
+ * config/ia64/ia64.md (UNSPECV_PROBE_STACK_ADDRESS): New constant.
+ (UNSPECV_PROBE_STACK_RANGE): Likewise.
+ (probe_stack_address): New insn.
+ (probe_stack_range): Likewise.
+ * config/ia64/ia64.c: Include common/common-target.h.
+ (ia64_compute_frame_size): Mark r2 and r3 as used if static stack
+ checking is enabled.
+ (ia64_emit_probe_stack_range): New function.
+ (output_probe_stack_range): Likewise.
+ (ia64_expand_prologue): Invoke ia64_emit_probe_stack_range if static
+ builtin stack checking is enabled.
+ (rtx_needs_barrier) <UNSPEC_VOLATILE>: Handle UNSPECV_PROBE_STACK_RANGE
+ and UNSPECV_PROBE_STACK_ADDRESS.
+ (unknown_for_bundling_p): New predicate.
+ (group_barrier_needed): Use important_for_bundling_p.
+ (ia64_dfa_new_cycle): Use unknown_for_bundling_p.
+ (issue_nops_and_insn): Likewise.
+ (bundling): Likewise.
+ (final_emit_insn_group_barriers): Likewise.
+ * config/ia64/ia64-protos.h (output_probe_stack_range): Declare.
+ * config/ia64/hpux.h (STACK_CHECK_STATIC_BUILTIN): Define.
+ (STACK_CHECK_PROTECT): Likewise.
+ * config/ia64/linux.h (STACK_CHECK_STATIC_BUILTIN): Likewise.
+
+2012-05-10 Jan Hubicka <jh@suse.cz>
+
+ * ipa-inline.c (update_all_callee_keys): Remove.
+ (inline_small_functions): Simplify priority updating.
+
+2012-05-10 Jan Hubicka <jh@suse.cz>
+
+ * ipa.c (symtab_remove_unreachable_nodes): Fix marking of clones.
+
+2012-05-10 Jan Hubicka <jh@suse.cz>
+
+ * cgraph.h (cgraph_remove_unreachable_nodes): Rename to ...
+ (symtab_remove_unreachable_nodes): ... this one.
+ * ipa-cp.c (ipcp_driver): Do not remove unreachable nodes.
+ * cgraphunit.c (ipa_passes): Update.
+ * cgraphclones.c (cgraph_materialize_all_clones): Update.
+ * cgraph.c (cgraph_release_function_body): Only turn initial
+ into error mark when initial was previously set.
+ * ipa-inline.c (ipa_inline): Update.
+ * ipa.c: Include ipa-inline.h
+ (enqueue_cgraph_node, enqueue_varpool_node): Remove.
+ (enqueue_node): New function.
+ (process_references): Update.
+ (symtab_remove_unreachable_nodes): Cleanup.
+ * passes.c (execute_todo, execute_one_pass): Update.
+
+2012-05-10 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR rtl-optimization/53125
+ * ira.c (ira): Call find_moveable_pseudos and
+ move_unallocated_pseudos if only ira_conflicts_p is true.
+
+2012-05-10 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.md (*movoi_internal_avx): Handle
+ TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL and TARGET_SSE_TYPELESS_STORES.
+ (*movti_internal_rex64): Handle TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL.
+ (*movti_internal_sse): Ditto.
+ (*movtf_internal): Ditto.
+ * config/i386/sse.md (ssePSmode): New mode attribute.
+ (*move<mode>_internal): Use ssePSmode.
+ (*<sse>_movu<ssemodesuffix><avxsizesuffix>): Ditto.
+ (*<sse2>_movdqu<avxsizesuffix>): Ditto.
+ * config/i386/i386.c (standard_sse_constant_opcode): Do not handle
+ TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL here.
+
+2012-05-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gimplify.c (gimplify_decl_expr): For a TYPE_DECL, also gimplify the
+ DECL_ORIGINAL_TYPE if it is present.
+
+2012-05-10 Nick Clifton <nickc@redhat.com>
+
+ PR target/53120
+ * config/m32c/bitops.md (bset_qi): Change operand 2 from having
+ a "0" constraint to being a (match_dup 0).
+
+2012-05-10 Richard Guenther <rguenther@suse.de>
+
+ * stor-layout.c (byte_from_pos): Amend comment.
+
+2012-05-10 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * doc/extend.texi (X86 Built-in Functions, __builtin_cpu_init):
+ Document requirement to call in constructors.
+
+ * config/i386/i386.c: Update comments for i386-cpuinfo.c name change.
+
+2012-05-10 Richard Guenther <rguenther@suse.de>
+
+ * tree.h (TYPE_IS_SIZETYPE): Remove.
+ * fold-const.c (int_const_binop_1): Remove TYPE_IS_SIZETYPE use.
+ (extract_muldiv_1): Likewise.
+ * gimple.c (gtc_visit): Likewise.
+ (gimple_types_compatible_p): Likewise.
+ (iterative_hash_canonical_type): Likewise.
+ (gimple_canonical_types_compatible_p): Likewise.
+ * gimplify.c (gimplify_one_sizepos): Likewise.
+ * print-tree.c (print_node): Likewise.
+ * stor-layout.c (initialize_sizetypes): Do not set TYPE_IS_SIZETYPE.
+
+2012-05-09 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/52908
+ * config/i386/sse.md (vec_widen_smult_hi_v4si): Expand using
+ xop_pmacsdqh insn pattern instead of xop_mulv2div2di3_high.
+ (vec_widen_smult_lo_v4si): Expand using xop_pmacsdql insn pattern
+ instead of xop_mulv2div2di3_low.
+ (xop_p<macs>dql): Fix vec_select selector.
+ (xop_p<macs>dqh): Ditto.
+ (xop_mulv2div2di3_low): Remove insn_and_split pattern.
+ (xop_mulv2div2di3_high): Ditto.
+
+2012-05-09 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c++/53289
+ * diagnostic.h (diagnostic_context): Add last_location.
+ * diagnostic.c (diagnostic_initialize): Initialize it.
+ (diagnostic_show_locus): Use it.
+
+2012-05-09 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ * doc/extend.texi (Function Attributes): Point xref to section
+ about Pragmas.
+
+2012-05-09 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.c (*movdf_internal_rex64): Remove
+ TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL handling from asm output code.
+ Calculate "mode" attribute according to
+ TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL flag.
+ (*movdf_internal): Ditto.
+
+2012-05-09 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/44141
+ * config/i386/i386.c (ix86_expand_vector_move_misalign): Do not handle
+ 128 bit vectors specially for TARGET_AVX. Emit sse2_movupd and
+ sse_movupd RTXes for TARGET_AVX, TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
+ or when optimizing for size.
+ * config/i386/sse.md (*mov<mode>_internal): Remove
+ TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL handling from asm output code.
+ Calculate "mode" attribute according to optimize_function_for_size_p
+ and TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL flag.
+ (*<sse>_movu<ssemodesuffix><avxsizesuffix>): Choose asm template
+ depending on the mode of the instruction. Calculate "mode" attribute
+ according to optimize_function_for_size_p, TARGET_SSE_TYPELESS_STORES
+ and TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL flags.
+ (*<sse2>_movdqu<avxsizesuffix>): Ditto.
+
+2012-05-09 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/53256
+ * config/avr/elf.h (ASM_DECLARE_FUNCTION_NAME): Remove.
+ * config/avr/avr-protos.h (avr_asm_declare_function_name): Remove.
+ * config/avr/avr.h (struct machine_function): Add attributes_checked_p.
+ * config/avr/avr.c (avr_asm_declare_function_name): Remove.
+ (expand_prologue): Move initialization of cfun->machine->is_naked,
+ is_interrupt, is_signal, is_OS_task, is_OS_main from here to...
+ (avr_set_current_function): ...this new static function.
+ (TARGET_SET_CURRENT_FUNCTION): New define.
+ (avr_function_ok_for_sibcall): Use cfun->machine->is_* instead of
+ checking attributes of current_function_decl.
+ (avr_regs_to_save): Ditto.
+ (signal_function_p): Rename to avr_signal_function_p.
+ (interrupt_function_p): Rename to avr_interrupt_function_p.
+
+ * doc/extend.texi (Function Attributes): Better explanation of
+ 'interrupt' and 'signal' for AVR. Move 'ifunc' down to establish
+ alphabetical order.
+
+2012-05-09 Michael Matz <matz@suse.de>
+
+ PR tree-optimization/53185
+ * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Disable
+ peeling when we see strided loads.
+
+2012-05-09 Matthias Klose <doko@ubuntu.com>
+
+ * gcc-ar.c (main): Don't check for execute bits for the plugin.
+
+2012-05-09 Ramana Radhakrishnan <ramana.radhakrishnan@linaro.org>
+
+ * tree-ssa-loop-ivopts.c (add_autoinc_candidates, get_address_cost):
+ Replace use of HAVE_{POST/PRE}_{INCREMENT/DECREMENT} with
+ USE_{LOAD/STORE}_{PRE/POST}_{INCREMENT/DECREMENT} appropriately.
+ * config/arm/arm.h (ARM_AUTOINC_VALID_FOR_MODE_P): New.
+ (USE_LOAD_POST_INCREMENT): Define.
+ (USE_LOAD_PRE_INCREMENT): Define.
+ (USE_LOAD_POST_DECREMENT): Define.
+ (USE_LOAD_PRE_DECREMENT): Define.
+ (USE_STORE_PRE_DECREMENT): Define.
+ (USE_STORE_PRE_INCREMENT): Define.
+ (USE_STORE_POST_DECREMENT): Define.
+ (USE_STORE_POST_INCREMENT): Define.
+ (arm_auto_incmodes): Add enumeration.
+ * config/arm/arm-protos.h (arm_autoinc_modes_ok_p): Declare.
+ * config/arm/arm.c (arm_autoinc_modes_ok_p): Define.
+
+2012-05-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/53226
+ * tree-ssa-forwprop.c (ssa_forward_propagate_and_combine): Remove
+ prev and prev_initialized vars, gimple_set_plf (stmt, GF_PLF_1, false)
+ before processing it and gimple_set_plf (stmt, GF_PLF_1, true) if it
+ doesn't need to be revisited, look for earliest stmt with
+ !gimple_plf (stmt, GF_PLF_1) if something changed.
+
+2012-05-09 Terry Guo <terry.guo@arm.com>
+
+ * genmultilib: Update copyright dates.
+ * doc/fragments.texi: Ditto.
+
+2012-05-09 Terry Guo <terry.guo@arm.com>
+
+ * Makefile.in (s-mlib): Add new argument MULTILIB_REQUIRED.
+ * genmultilib (MULTILIB_REQUIRED): New.
+ * doc/fragments.texi: Document the MULTILIB_REQUIRED.
+
+2012-05-09 Richard Guenther <rguenther@suse.de>
+
+ * tree-vectorizer.h (vect_loop_versioning): Adjust prototype.
+ (vect_do_peeling_for_loop_bound): Likewise.
+ (vect_do_peeling_for_alignment): Likewise.
+ * tree-vect-loop-manip.c (conservative_cost_threshold): Remove.
+ (vect_do_peeling_for_loop_bound): Get check_profitability and
+ threshold as parameters.
+ (vect_do_peeling_for_alignment): Likewise.
+ (vect_loop_versioning): Likewise.
+ * tree-vect-loop.c (vect_transform_loop): Compute check_profitability
+ and threshold here. Control where to put the check here.
+
+2012-05-09 Richard Sandiford <rdsandiford@googlemail.com>
+
+ PR middle-end/53249
+ * dwarf2out.h (get_address_mode): Move declaration to...
+ * rtl.h: ...here.
+ * dwarf2out.c (get_address_mode): Move definition to...
+ * rtlanal.c: ...here.
+ * var-tracking.c (get_address_mode): Delete.
+ * combine.c (find_split_point): Use get_address_mode instead of
+ targetm.addr_space.address_mode.
+ * cselib.c (cselib_record_sets): Likewise.
+ * dse.c (canon_address, record_store): Likewise.
+ * emit-rtl.c (adjust_address_1, offset_address): Likewise.
+ * expr.c (move_by_pieces, emit_block_move_via_loop, store_by_pieces)
+ (store_by_pieces_1, expand_assignment, store_expr, store_constructor)
+ (expand_expr_real_1): Likewise.
+ * ifcvt.c (noce_try_cmove_arith): Likewise.
+ * optabs.c (maybe_legitimize_operand_same_code): Likewise.
+ * reload.c (find_reloads): Likewise.
+ * sched-deps.c (sched_analyze_1, sched_analyze_2): Likewise.
+ * sel-sched-dump.c (debug_mem_addr_value): Likewise.
+
+2012-05-09 Maciej W. Rozycki <macro@codesourcery.com>
+
+ * config/mips/mips.c (mips16_gp_pseudo_reg): Remove line
+ information from the instruction produced.
+
+2012-05-09 Richard Guenther <rguenther@suse.de>
+
+ * stor-layout.c (bit_from_pos): Document.
+ (byte_from_pos): Likewise. Optimize.
+ (pos_from_bit): Likewise.
+ (normalize_offset): Use pos_from_bit instead of replicating it.
+
+2012-05-09 Alan Modra <amodra@gmail.com>
+
+ PR target/53271
+ * config/rs6000/rs6000.c (gen_frame_set): New function.
+ (gen_frame_load, gen_frame_store): New functions.
+ (rs6000_savres_rtx): Use the above.
+ (rs6000_emit_epilogue, rs6000_emit_prologue): Here too.
+ Correct mode used for CR2 in save/restore_world patterns.
+ Don't emit instructions for eh_return frame unwind reg info.
+
+2012-05-08 Jan Hubicka <jh@suse.cz>
+
+ * cgraphbuild.c (build_cgraph_edges): Do not finalize vars
+ with VALUE_EXPR.
+ * cgraph.h (varpool_can_remove_if_no_refs): Vars with VALUE_EXPR
+ are removable.
+ * toplev.c (wrapup_global_declaration_2): Vars with VALUE_EXPR
+ need to wrapup.
+ (compile_file): Do not output variables.
+ * cgraphbuild.c (varpool_finalize_decl): When var is finalized late,
+ output it.
+ * langhooks.c: Include timevar.h
+ (write_global_declarations): Finalize compilation unit after wrapup;
+ set timevars correctly.
+ * passes.c (rest_of_decl_compilation): Decls with VALUE_EXPR needs
+ not to be added to varpool.
+ * varpool.c (varpool_assemble_decl): Sanity check that we are called
+ only on cases where it makes sense; skip constant pool and value expr
+ vars.
+
+2012-05-08 David S. Miller <davem@davemloft.net>
+
+ * config/sparc/linux.h (LINK_SPEC): Don't pass "-Y" option.
+ * config/sparc/linux64.h (LINK_ARCH32_SPEC): Likewise.
+ * config/sparc/linux64.h (LINK_ARCH64_SPEC): Likewise.
+
+2012-05-08 Richard Sandiford <rdsandiford@googlemail.com>
+
+ PR rtl-optimization/53278
+ * lower-subreg.c (decompose_multiword_subregs): Remove left-over
+ speed_p code from earlier patch.
+
+2012-05-08 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/51244
+ * config/sh/sh.md (*branch_true, *branch_false): New insns.
+
+2012-05-08 Teresa Johnson <tejohnson@google.com>
+
+ * gcov-io.h (__gcov_reset, __gcov_dump): Declare.
+ * doc/gcov.texi: Add note on using __gcov_reset and __gcov_dump.
+
+2012-05-08 Jan Hubicka <jh@suse.cz>
+
+ * cgraph.c (cgraph_call_edge_duplication_hooks): Export.
+ (cgraph_create_node_1): Rename to ...
+ (cgraph_create_empty_node): ... this one; export.
+ (cgraph_create_node): Update.
+ (cgraph_set_call_stmt_including_clones): Move to cgraphclones.c
+ (cgraph_create_edge_including_clones): Likewise.
+ (cgraph_find_replacement_node): Likewise.
+ (cgraph_clone_edge): Likewise.
+ (cgraph_clone_node): Likewise.
+ (clone_function_name): Likewise.
+ (cgraph_create_virtual_clone): Likewise.
+ (cgraph_remove_node_and_inline_clones): Likewise.
+ (cgraph_redirect_edge_call_stmt_to_callee): Move here from cgraphunit.c
+ * cgraph.h: Reorder declarations so they match file of origin.
+ (cgraph_create_empty_node): Declare.
+ * cgraphunit.c (update_call_expr): Move to cgraphclones.c
+ (cgraph_copy_node_for_versioning): Likewise.
+ (cgraph_function_versioning): Likewise.
+ (cgraph_materialize_clone): Likewise.
+ (cgraph_redirect_edge_call_stmt_to_callee): Likewise.
+ (cgraph_materialize_all_clones): Likewise.
+ * cgraphclones.c: New file.
+ * Makefile.in: Update for cgraphclones.
+
+2012-05-08 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/53176
+ * config/i386/i386.c (ix86_set_reg_reg_cost): New function.
+ (ix86_rtx_costs): Handle SET.
+
+2012-05-08 Michael Matz <matz@suse.de>
+
+ * basic-block.h (struct rtl_bb_info): Remove visited member and
+ move head_ member to ...
+ (struct basic_block_def.basic_block_il_dependent): ... the new
+ member x, replacing but containing old member rtl.
+ (enum bb_flags): New BB_VISITED flag.
+ (BB_HEADER, BB_FOOTER): New macros.
+
+ * jump.c (mark_all_labels): Adjust.
+ * cfgcleanup.c (try_optimize_cfg): Adjust.
+ * cfglayout.c (record_effective_endpoints): Adjust.
+ (relink_block_chain): Ditto (and don't fiddle with visited).
+ (fixup_reorder_chain): Adjust.
+ (fixup_fallthru_exit_predecessor): Ditto.
+ (cfg_layout_duplicate_bb): Ditto.
+ * combine.c (update_cfg_for_uncondjump): Adjust.
+ * bb-reorder.c (struct bbro_basic_block_data_def): Add visited member.
+ (bb_visited_trace): New accessor.
+ (mark_bb_visited): Move in front.
+ (rotate_loop): Use bb_visited_trace.
+ (find_traces_1_round): Ditto.
+ (emit_barrier_after): Ditto.
+ (copy_bb): Ditto, and initialize visited on resize.
+ (reorder_basic_blocks): Initize visited member.
+ (duplicate_computed_gotos): Clear bb flags at start, use
+ BB_VISITED flags.
+
+ * cfgrtl.c (try_redirect_by_replacing_jump): Adjust.
+ (rtl_verify_flow_info_1): Ditto.
+ (cfg_layout_split_block): Ditto.
+ (cfg_layout_delete_block): Ditto.
+ (cfg_layout_merge_blocks): Ditto.
+ (init_rtl_bb_info): Adjust and initialize il.x.head_ member.
+
+2012-05-08 Hans-Peter Nilsson <hp@axis.com>
+
+ PR target/53272
+ * config/cris/cris.c (cris_normal_notice_update_cc): For TARGET_V32,
+ when a constant source operand matches an "I" constraint, the "no
+ CC0 change" applies to a register-destination only, not a
+ strict_low_part-destination.
+
+2012-05-08 Richard Guenther <rguenther@suse.de>
+
+ * fold-const.c (fold_binary_loc): Fold (X * CST1) & CST2
+ to zero or to (X * CST1) & CST2' when CST1 has trailing zeros.
+
+2012-05-08 Georg-Johann Lay <avr@gjlay.de>
+
+ * Makefile.in (TEXI_GCC_FILES): Add avr-mmcu.texi.
+
+ * doc/avr-mmcu.texi: New auto-generated file.
+ * doc/invoke.texi (AVR Options): Include avr-mmcu.texi in order
+ to document all valid -mmcu= arguments.
+
+ * config/avr/avr.h (arch_info_s): New struct definition.
+ * config/avr/avr-devices.c (avr_texinfo): New variable.
+ * config/avr/gen-avr-mmcu-texi.c: New file.
+ * config/avr/t-avr: New rules and dependencies to build avr-mmcu.texi.
+
+2012-05-08 Dehao Chen <dehao@google.com>
+
+ * predict.c (find_qualified_ssa_name): New.
+ (find_ssa_name_in_expr): New.
+ (find_ssa_name_in_assign_stmt): New.
+ (is_comparison_with_loop_invariant_p): New.
+ (is_bound_expr_similar): New.
+ (predict_iv_comparison): New.
+ (predict_loops): Add heuristic for loop-nested branches that compare an
+ induction variable to a loop bound variable.
+ * predict.def (PRED_LOOP_IV_COMPARE): New macro.
+
+2012-05-08 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.c (has_dispatch): Use TARGET_BDVER1 and
+ TARGET_BDVER2 defines where appropriate.
+
+2012-05-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ * configure.ac (PLUGIN_LD): Rename into...
+ (PLUGIN_LD_SUFFIX): ...this and strip the target_alias triplet.
+ * config.in: Regenerate.
+ * configure: Likewise.
+ * collect2.c (main): Set plugin_ld_suffix to PLUGIN_LD_SUFFIX.
+
+2012-05-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ * tree-dfa.c (get_ref_base_and_extent) <ARRAY_REF>: Do the offset
+ computation using the precision of the index type.
+ * gimple-fold.c (fold_const_aggregate_ref_1) <ARRAY_REF>: Likewise.
+ (fold_array_ctor_reference): Do index computations in the index type.
+
+2012-05-07 Georg-Johann Lay <avr@gjlay.de>
+
+ * config/avr/avr.c (avr_prologue_setup_frame): Fix mode passed
+ down to plus_constant.
+ (expand_epilogue): Ditto.
+
+2012-05-07 Steven Bosscher <steven@gcc.gnu.org>
+
+ * postreload.c (reload_cse_regs): Make static.
+ * reload.h (reload_cse_regs): Remove prototype.
+
+2012-05-07 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/alpha.md (clear_cache): New pattern.
+
+2012-05-07 Steven Bosscher <steven@gcc.gnu.org>
+
+ PR middle-end/53245
+ * gimplify.c (preprocess_case_label_vec_for_gimple): If low or high
+ is folded to a type boundary value, verify that the resulting case
+ label is still a care range.
+
+2012-05-07 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.md (ctz<mode>2): Emit rep;bsf
+ only for TARGET_GENERIC, when not optimizing for size.
+ (*ffs<mode>_1): Ditto.
+
+2012-05-07 Steven Bosscher <steven@gcc.gnu.org>
+
+ * tree-cfg.c (verify_gimple_switch): Tighten checks.
+
+2012-05-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/53239
+ * tree-vrp.c (get_value_range): Set VR of SSA_NAME_IS_DEFAULT_DEF
+ of DECL_BY_REFERENCE RESULT_DECL to nonnull.
+
+2012-05-07 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/53195
+ * tree-inline.c (setup_one_parameter): Properly add referenced
+ vars from the parameters new known value.
+
+2012-05-07 Steven Bosscher <steven@gcc.gnu.org>
+
+ * config/m68k/m68k.c (m68k_sched_branch_type): Remove.
+ (sched_branch_type): Remove.
+ (m68k_sched_md_init_global): Don't allocate it.
+ (m68k_sched_md_finish_global): Don't free it.
+ * config/m68k/m68k.h (m68k_sched_branch_type): Remove prototype.
+ * config/m68k/m68k.md: Set the type of insns using
+ m68k_sched_branch_type to bcc directly.
+
+2012-05-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config/sol2.c (solaris_override_options): New function.
+ * config/sol2-protos.h (solaris_override_options): Declare.
+ * config/sol2.h (SUBTARGET_OVERRIDE_OPTIONS): Define.
+
+2012-05-07 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-loop-prefetch.c (determine_loop_nest_reuse): Return
+ whether we failed to compute data dependences.
+ (loop_prefetch_arrays): Fail if we failed.
+
+2012-05-07 Uros Bizjak <ubizjak@gmail.com>
+ Paolo Bonzini <bonzini@gnu.org>
+
+ * config/i386/i386.md (ctz<mode>2): Emit rep;bsf even for !TARGET_BMI.
+ Emit bsf when optimizing for size.
+ (*ffs<mode>_1): Ditto.
+
+2012-05-07 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/53250
+ * config/sh/sh.c (sh_rtx_costs): Handle SET.
+
+2012-05-06 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/53227
+ * config/i386/i386.md (swap<mode>): Rename from *swap<mode>.
+ (bswapdi2): Split from bswap<mode>2. Use nonnimediate_operand
+ predicate for operand 1. Force operand 1 to register for TARGET_BSWAP.
+ (bswapsi2): Ditto.
+ (*bswapdi2_doubleword): New insn pattern.
+ (*bswap<mode>2): Rename from *bswap<mode>2_1.
+
+2012-05-06 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * config/mips/mips.c (mips_set_reg_reg_piece_cost): New function.
+ (mips_set_reg_reg_cost): Likewise.
+ (mips_rtx_costs): Handle SET.
+
+2012-05-06 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * lower-subreg.c (shift_cost): Use set_src_cost, avoiding the SET.
+ (compute_costs): Likewise for the zero extension. Use set_rtx_cost
+ to compute the cost of moves. Set the mode of the target register.
+
+2012-05-05 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * rtl.h (plus_constant, plus_constant_mode): Merge into a single
+ plus_constant function.
+ * explow.c (plus_constant, plus_constant_mode): Likewise. Assert
+ that the mode is sensible.
+ (use_anchored_address, round_push, allocate_dynamic_stack_space)
+ (probe_stack_range, anti_adjust_stack_and_probe): Update calls to
+ plus_constant.
+
+ * alias.c (canon_rtx): Likewise.
+ (init_alias_analysis): Likewise.
+ * builtins.c (expand_builtin_return_addr)
+ (expand_builtin_setjmp_setup, expand_builtin_longjmp)
+ (expand_builtin_nonlocal_goto, expand_builtin_update_setjmp_buf)
+ (expand_builtin_apply_args_1, expand_builtin_apply, expand_movstr)
+ (expand_builtin_stpcpy): Likewise.
+ * calls.c (save_fixed_argument_area, restore_fixed_argument_area)
+ (compute_argument_addresses, internal_arg_pointer_based_exp)
+ (expand_call, emit_library_call_value_1): Likewise.
+ * cfgexpand.c (expand_one_stack_var_at, expand_debug_expr): Likewise.
+ * combine-stack-adj.c (try_apply_stack_adjustment): Likewise.
+ * combine.c (combine_simplify_rtx, force_to_mode): Likewise.
+ * cse.c (insert_const_anchor, find_reg_offset_for_const)
+ (use_related_value, fold_rtx): Likewise.
+ * cselib.c (cselib_subst_to_values): Likewise.
+ * dse.c (record_store, check_mem_read_rtx): Likewise.
+ * dwarf2out.c (rtl_for_decl_location, gen_variable_die): Likewise.
+ * emit-rtl.c (adjust_address_1): Likewise.
+ * except.c (sjlj_emit_function_enter)
+ (expand_builtin_extract_return_addr)
+ (expand_builtin_frob_return_addr): Likewise.
+ * expmed.c (expand_divmod): Likewise.
+ * expr.c (move_by_pieces, store_by_pieces, store_by_pieces_1)
+ (emit_move_resolve_push, push_block, emit_push_insn, store_expr)
+ (expand_expr_addr_expr_1, expand_expr_real_1): Likewise.
+ * function.c (assign_stack_local_1)
+ (instantiate_virtual_regs_in_rtx): Likewise.
+ * optabs.c (prepare_cmp_insn): Likewise.
+ * recog.c (offsettable_address_addr_space_p): Likewise.
+ * reload.c (find_reloads_address, form_sum)
+ (find_reloads_subreg_address): Likewise.
+ * reload1.c (init_reload, eliminate_regs_1)
+ (eliminate_regs_in_insn): Likewise.
+ * simplify-rtx.c (simplify_unary_operation_1)
+ (simplify_binary_operation_1, simplify_plus_minus): Likewise.
+ * var-tracking.c (compute_cfa_pointer, prepare_call_arguments)
+ (vt_add_function_parameter): Likewise.
+
+ * config/alpha/alpha.h (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/alpha/vms.h (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/alpha/alpha.c (alpha_legitimize_address_1)
+ (get_unaligned_address, alpha_expand_unaligned_load)
+ (alpha_expand_unaligned_store, alpha_expand_unaligned_load_words)
+ (alpha_expand_unaligned_store_words, alpha_expand_block_clear)
+ (alpha_expand_builtin_establish_vms_condition_handler)
+ (alpha_setup_incoming_varargs, emit_frame_store_1)
+ (alpha_expand_prologue, alpha_expand_epilogue)
+ (alpha_use_linkage): Likewise.
+ * config/alpha/alpha.md: Likewise.
+
+ * config/arm/arm.c (arm_trampoline_init, legitimize_pic_address)
+ (arm_load_pic_register, arm_pic_static_addr, arm_legitimize_address)
+ (thumb_legitimize_address, arm_gen_load_multiple_1)
+ (arm_gen_store_multiple_1, arm_gen_multiple_op, gen_ldm_seq)
+ (gen_stm_seq, gen_const_stm_seq, arm_block_move_unaligned_straight)
+ (arm_block_move_unaligned_loop, arm_gen_movmemqi, arm_reload_in_hi)
+ (arm_reload_out_hi, arm_reorg, vfp_emit_fstmd, emit_multi_reg_push)
+ (emit_sfm, thumb_set_frame_pointer, arm_expand_prologue)
+ (thumb1_emit_multi_reg_push, thumb1_expand_prologue)
+ (thumb_expand_movmemqi, arm_set_return_address)
+ (thumb_set_return_address): Likewise.
+ * config/arm/arm.md: Likewise.
+
+ * config/avr/avr.c (avr_incoming_return_addr_rtx)
+ (avr_prologue_setup_frame, expand_epilogue)
+ (avr_const_address_lo16): Likewise.
+
+ * config/bfin/bfin.h (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/bfin/bfin.c (setup_incoming_varargs, bfin_load_pic_reg)
+ (bfin_expand_prologue, bfin_trampoline_init, bfin_expand_call)
+ (bfin_output_mi_thunk): Likewise.
+
+ * config/c6x/c6x.c (c6x_initialize_trampoline)
+ (c6x_output_mi_thunk): Likewise.
+
+ * config/cr16/cr16.h (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/cr16/cr16.c (cr16_create_dwarf_for_multi_push): Likewise.
+
+ * config/cris/cris.c (cris_return_addr_rtx, cris_split_movdx)
+ (cris_expand_prologue, cris_expand_epilogue, cris_gen_movem_load)
+ (cris_emit_movem_store, cris_trampoline_init): Likewise.
+ * config/cris/cris.md: Likewise.
+
+ * config/darwin.c (machopic_indirect_data_reference)
+ (machopic_legitimize_pic_address): Likewise.
+
+ * config/epiphany/epiphany.c (epiphany_emit_save_restore)
+ (epiphany_expand_prologue, epiphany_expand_epilogue)
+ (epiphany_trampoline_init): Likewise.
+ * config/epiphany/epiphany.md: Likewise.
+
+ * config/fr30/fr30.c (fr30_move_double): Likewise.
+
+ * config/frv/frv.c (frv_dwarf_store, frv_expand_prologue)
+ (frv_expand_block_move, frv_expand_block_clear, frv_return_addr_rtx)
+ (frv_index_memory, unspec_got_name, frv_find_base_term)
+ (frv_output_dwarf_dtprel): Likewise.
+
+ * config/h8300/h8300.c (h8300_push_pop, h8300_return_addr_rtx)
+ (h8300_swap_into_er6, h8300_swap_out_of_er6): Likewise.
+
+ * config/i386/i386.h (RETURN_ADDR_RTX): Likewise.
+ * config/i386/i386.c (setup_incoming_varargs_64)
+ (setup_incoming_varargs_ms_64, choose_baseaddr)
+ (ix86_emit_save_reg_using_mov, ix86_adjust_stack_and_probe)
+ (ix86_emit_probe_stack_range, ix86_expand_prologue)
+ (ix86_emit_restore_reg_using_pop, ix86_emit_leave)
+ (ix86_expand_epilogue, legitimize_pic_address, ix86_legitimize_address)
+ (ix86_split_long_move, ix86_expand_movmem, ix86_expand_setmem)
+ (ix86_static_chain, ix86_trampoline_init, x86_this_parameter)
+ (x86_output_mi_thunk): Likewise.
+ * config/i386/i386.md: Likewise.
+
+ * config/ia64/ia64.c (ia64_expand_load_address)
+ (ia64_expand_tls_address, ia64_expand_move, ia64_split_tmode)
+ (do_spill, ia64_trampoline_init): Likewise.
+
+ * config/iq2000/iq2000.c (iq2000_va_start)
+ (iq2000_emit_frame_related_store, iq2000_expand_prologue)
+ (iq2000_expand_eh_return, iq2000_setup_incoming_varargs)
+ (iq2000_print_operand, iq2000_legitimize_address): Likewise.
+
+ * config/lm32/lm32.c (lm32_setup_incoming_varargs): Likewise.
+
+ * config/m32c/m32c.c (m32c_return_addr_rtx)
+ (m32c_expand_insv): Likewise.
+
+ * config/m32r/m32r.c (m32r_setup_incoming_varargs)
+ (m32r_legitimize_pic_address, m32r_print_operand)
+ (m32r_print_operand_address): Likewise.
+
+ * config/m68k/linux.h (FINALIZE_TRAMPOLINE): Likewise.
+ * config/m68k/m68k.h (RETURN_ADDR_RTX): Likewise.
+ (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/m68k/m68k.c (m68k_emit_movem, m68k_expand_prologue)
+ (m68k_expand_epilogue, legitimize_pic_address)
+ (m68k_output_mi_thunk): Likewise.
+ * config/m68k/m68k.md: Likewise.
+
+ * config/mcore/mcore.c (mcore_expand_prolog): Likewise.
+ (mcore_expand_epilog): Likewise.
+ * config/mcore/mcore.md: Likewise.
+
+ * config/mep/mep.c (mep_allocate_initial_value)
+ (mep_expand_prologue, mep_expand_epilogue): Likewise.
+
+ * config/microblaze/microblaze.c (double_memory_operand)
+ (microblaze_block_move_loop): Likewise.
+
+ * config/mips/mips.c (mips_strip_unspec_address, mips_add_offset)
+ (mips_setup_incoming_varargs, mips_va_start, mips_block_move_loop)
+ (mips_print_operand, mips16e_save_restore_reg, mips_save_restore_reg)
+ (mips_expand_prologue, mips_epilogue_set_cfa)
+ (mips_expand_epilogue): Likewise.
+ * config/mips/mips.md: Likewise.
+
+ * config/mmix/mmix.c (mmix_dynamic_chain_address, mmix_return_addr_rtx)
+ (mmix_expand_prologue, mmix_expand_epilogue): Likewise.
+
+ * config/mn10300/mn10300.c (mn10300_gen_multiple_store)
+ (mn10300_builtin_saveregs, mn10300_trampoline_init): Likewise.
+
+ * config/moxie/moxie.h (INCOMING_RETURN_ADDR_RTX): Likewise.
+ (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/moxie/moxie.c (moxie_static_chain): Likewise.
+
+ * config/pa/pa.c (legitimize_pic_address, hppa_legitimize_address)
+ (store_reg, set_reg_plus_d, pa_expand_prologue, load_reg)
+ (pa_return_addr_rtx, hppa_builtin_saveregs)
+ (pa_trampoline_init): Likewise.
+ * config/pa/pa.md: Likewise.
+
+ * config/pdp11/pdp11.c (pdp11_expand_epilogue): Likewise.
+
+ * config/picochip/picochip.c (picochip_static_chain): Likewise.
+
+ * config/rs6000/rs6000.h (RS6000_SAVE_TOC): Likewise.
+ * config/rs6000/rs6000.c (rs6000_legitimize_address)
+ (setup_incoming_varargs, print_operand, rs6000_return_addr)
+ (rs6000_emit_eh_reg_restore, rs6000_emit_probe_stack_range)
+ (rs6000_emit_epilogue)
+ (rs6000_machopic_legitimize_pic_address): Likewise.
+
+ * config/rx/rx.c (gen_rx_rtsd_vector, gen_rx_popm_vector): Likewise.
+
+ * config/s390/s390.h (INITIAL_FRAME_ADDRESS_RTX): Likewise.
+ (DYNAMIC_CHAIN_ADDRESS): Likewise.
+ * config/s390/s390.c (s390_decompose_address, legitimize_pic_address)
+ (s390_delegitimize_address, print_operand, annotate_constant_pool_refs)
+ (replace_constant_pool_ref, s390_return_addr_rtx, s390_back_chain_rtx)
+ (save_fpr, restore_fpr, save_gprs, restore_gprs, s390_emit_prologue)
+ (s390_emit_epilogue, s390_function_profiler): Likewise.
+ * config/s390/s390.md: Likewise.
+
+ * config/score/score.c (score_add_offset, score_prologue): Likewise.
+
+ * config/sh/sh.c (expand_block_move, push_regs, sh_builtin_saveregs)
+ (sh_output_mi_thunk): Likewise.
+ * config/sh/sh.md: Likewise.
+
+ * config/sparc/sparc.h (DYNAMIC_CHAIN_ADDRESS, FRAME_ADDR_RTX)
+ (RETURN_ADDR_RTX, INCOMING_RETURN_ADDR_RTX): Likewise.
+ * config/sparc/sparc.c (sparc_legitimize_pic_address)
+ (sparc_emit_probe_stack_range, emit_save_or_restore_regs)
+ (emit_window_save, sparc_flat_expand_prologue, sparc_struct_value_rtx)
+ (emit_and_preserve): Likewise.
+ * config/sparc/sparc.md: Likewise.
+
+ * config/spu/spu.h (DYNAMIC_CHAIN_ADDRESS): Likewise.
+ * config/spu/spu.c (spu_expand_insv, spu_machine_dependent_reorg)
+ (spu_setup_incoming_varargs, ea_load_store_inline)
+ (spu_expand_load): Likewise.
+
+ * config/stormy16/stormy16.c (xstormy16_expand_prologue)
+ (combine_bnp): Likewise.
+
+ * config/tilegx/tilegx.h (DYNAMIC_CHAIN_ADDRESS): Likewise.
+ * config/tilegx/tilegx.c (tilegx_setup_incoming_varargs)
+ (tilegx_expand_unaligned_load, tilegx_trampoline_init): Likewise.
+
+ * config/tilepro/tilepro.h (DYNAMIC_CHAIN_ADDRESS): Likewise.
+ * config/tilepro/tilepro.c (tilepro_setup_incoming_varargs)
+ (tilepro_expand_unaligned_load, tilepro_trampoline_init): Likewise.
+
+ * config/v850/v850.c (expand_prologue, expand_epilogue): Likewise.
+ * config/v850/v850.md: Likewise.
+
+ * config/vax/elf.h (EH_RETURN_STACKADJ_RTX): Likewise.
+ (EH_RETURN_HANDLER_RTX): Likewise.
+ * config/vax/vax.h (DYNAMIC_CHAIN_ADDRESS, RETURN_ADDR_RTX): Likewise.
+ * config/vax/vax.c (vax_add_reg_cfa_offset, vax_expand_prologue)
+ (print_operand_address, vax_trampoline_init): Likewise.
+
+ * config/xtensa/xtensa.c (xtensa_expand_prologue, xtensa_return_addr)
+ (xtensa_function_value_regno_p): Likewise.
+
+2012-05-04 Andrew Pinski <apinski@cavium.com>
+
+ * expr.c (get_def_for_expr_class): New function.
+ (convert_tree_comp_to_rtx): New function.
+ (expand_cond_expr_using_cmove): New function.
+ (expand_expr_real_2 <case COND_EXPR>): Call
+ expand_cond_expr_using_cmove first and return if it succeeds.
+ Remove the check for HAVE_conditional_move since we should have
+ already converted it to a conditional move.
+ * config/i386/i386.c (ix86_expand_int_movcc): Disallow comparison
+ modes of DImode for 32bits and TImode.
+
+2012-05-04 Steven Bosscher <steven@gcc.gnu.org>
+
+ PR other/29442
+ * read-md.c (fprint_md_ptr_loc, fprint_c_condition): New functions.
+ (print_md_ptr_loc, print_c_condition): Use them.
+ * read-md.h (fprint_md_ptr_loc, fprint_c_condition): New prototypes.
+ * genattrtab.c (attr_file_name, dfa_file_name, latency_file_name,
+ attr_file, dfa_file, latency_file): New global variables.
+ (write_attr_valueq, write_attr_set, write_attr_case, write_attr_value,
+ write_upcase, write_indent, write_length_unit_log, write_test_expr,
+ write_attr_get, write_insn_cases, write_eligible_delay,
+ write_const_num_delay_slots): Accept FILE pointer and toss it around.
+ Update all callers.
+ (write_header, open_outfile, handle_arg): New funcions.
+ (make_automaton_attrs): Write prototypes as extern to the output
+ files.
+ (main): Use init_rtx_reader_args_cb with handle_arg to take 3 file
+ names from the command line. Open the output files and write out
+ internal functions for DFA functions to dfa_file_name, insn latency
+ functions to latency_file_name, and everything else to attr_file.
+ * Makefile.in (OBJS): Add insn-dfatab.o and insn-latencytab.o.
+ (BACKEND): Build libbackend first.
+ (MOSTLYCLEANFILES): Add insn-dfatab.c and insn-latencytab.c.
+ (.PRECIOUS): Likewise.
+ (insn-dfatab.o): New rule.
+ (insn-latencytab.o): New rule.
+ (simple_rtl_generated_c): Do not include insn-attrtab.c.
+ (s-attrtab): New rule.
+
+2012-05-04 Steven Bosscher <steven@gcc.gnu.org>
+
+ * rtl.def (ATTR_FLAG): Remove probability indicating flags.
+ * genattr.c (main): Remove ATTR_FLAG_likely, ATTR_FLAG_unlikely,
+ ATTR_FLAG_very_likely, and ATTR_FLAG_very_unlikely.
+ * reorg.c (get_jump_flags): Do not set the removed flags.
+
+ * doc/md.texi (attr_flag): Update for abovementioned changes.
+
+2012-05-04 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/53228
+ * config/i386/i386.h (X86_ARCH_CMOV): Rename from X86_ARCH_CMOVE.
+ (TARGET_CMOV): Rename from TARGET_CMOVE.
+ (TARGET_CMOVE): New define.
+ * config/i386/i386.c (ix86_option_override_internal): Use TARGET_CMOV.
+ Do not set TARGET_CMOVE here.
+
+2012-05-04 Dodji Seketeli <dodji@redhat.com>
+
+ Enable -Wunused-local-typedefs when -Wall or -Wunused is on
+ * opts.c (finish_options): Activate -Wunused-local-typedefs if
+ -Wunused is activated.
+ * doc/invoke.texi: Update blurb of -Wunused-local-typedefs.
+
+2012-05-04 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * config/s390/s390.md (*movmem_short, *clrmem_short)
+ (*cmpmem_short): Move the mode check from the insn condition to
+ the match_scratch.
+
+2012-05-04 Ulrich Weigand <ulrich.weigand@linaro.org>
+
+ PR tree-optimization/52633
+ * tree-vect-patterns.c (vect_vect_recog_func_ptrs): Swap order of
+ vect_recog_widen_shift_pattern and vect_recog_over_widening_pattern.
+ (vect_recog_over_widening_pattern): Remove handling of code that was
+ already detected as over-widening pattern. Remove special handling
+ of "unsigned" cases. Instead, support general case of conversion
+ of the shift result to another type.
+
+2012-05-04 Ulrich Weigand <ulrich.weigand@linaro.org>
+
+ * tree-vect-patterns.c (vect_single_imm_use): New function.
+ (vect_recog_widen_mult_pattern): Use it instead of open-coding loop.
+ (vect_recog_over_widening_pattern): Likewise.
+ (vect_recog_widen_shift_pattern): Likewise.
+
+2012-05-04 Ulrich Weigand <ulrich.weigand@linaro.org>
+
+ * tree-vect-patterns.c (vect_same_loop_or_bb_p): New function.
+ (vect_handle_widen_op_by_const): Use it instead of open-coding test.
+ (vect_recog_widen_mult_pattern): Likewise.
+ (vect_operation_fits_smaller_type): Likewise.
+ (vect_recog_over_widening_pattern): Likewise.
+ (vect_recog_widen_shift_pattern): Add to vect_same_loop_or_bb_p test.
+
+2012-05-04 Richard Guenther <rguenther@suse.de>
+
+ PR lto/50602
+ * lto-wrapper.c (merge_and_complain): Complain about mismatches
+ of -freg-struct-return and -fpcc-struct-return.
+ (run_gcc): Pass through -freg-struct-return and -fpcc-struct-return
+ from the input file options and ignore those from the link
+ command line.
+
+2012-05-04 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/53168
+ * tree-ssa-pre.c (phi_translate_1): Only handle type-punned
+ memory reads when the result is a constant we can pun.
+
+2012-05-04 Richard Guenther <rguenther@suse.de>
+
+ * common.opt (flto-report): Do not mark as Optimization.
+
+2012-05-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR target/48496
+ * recog.c (constrain_operands): If extra constraints are present, also
+ accept pseudo-registers with equivalent memory locations during reload.
+
+2012-05-04 Olivier Hainque <hainque@adacore.com>
+
+ * collect2.c (may_unlink_output_file): New global.
+ (maybe_unlink): Honor it.
+ * collect2.h: Add extern for it.
+ * tlink.c (do_tlink): Set it to true if the link succeeded.
+
+2012-05-04 Olivier Hainque <hainque@adacore.com>
+
+ * gcc.c (eval_spec_function): Finalize/restore the current string
+ obstack state as part of the context push/pop operations.
+
+2012-05-04 Bin Cheng <bin.cheng@arm.com>
+
+ PR rtl-optimization/52804
+ * reload1.c (reload_reg_reaches_end_p): Check whether successor
+ reload with type RELOAD_FOR_INPUT_ADDRESS kills reload register
+ of current one with type RELOAD_FOR_INPADDR_ADDRESS.
+ Same stands for reloads with type RELOAD_FOR_OUTPUT_ADDRESS and
+ RELOAD_FOR_OUTADDR_ADDRESS.
+
+2012-05-04 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c++/24985
+ * tree-diagnostic.c (maybe_unwind_expanded_macro_loc): Show caret
+ for macro expansion.
+
+2012-05-03 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ * flags.h (flag_permissive): Do not declare.
+ * diagnostic.c (diagnostic_report_diagnostic): Handle fpermissive
+ option specially.
+ * toplev.c (flag_permissive): Do not define.
+ * c-tree.h (system_header_p): Delete unused.
+
+2012-05-03 David S. Miller <davem@davemloft.net>
+
+ PR target/52684
+ * config/sparc/sparc.c (emit_soft_tfmode_libcall): If we pass a
+ MEM directly into a libcall, mark it's MEM_EXPR as addressable.
+ (sparc_emit_float_lib_cmp): Likewise.
+
+2012-05-03 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ PR target/53199
+ * config/rs6000/rs6000.md (bswapdi splitters): If
+ -mavoid-indexed-addresses (or -mcpu=power6 which sets it by
+ default) is used, generate an alternate sequence that does not
+ depend on using indexed addressing.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * gengtype.c (write_types): Fix warning message.
+ (write_local): Likewise.
+
+2012-05-02 Jason Merrill <jason@redhat.com>
+
+ * dwarf2out.c (struct external_ref, build_local_stub): New.
+ (hash_external_ref, external_ref_eq, lookup_external_ref): New.
+ (optimize_external_refs, optimize_external_refs_1): New.
+ (change_AT_die_ref): New.
+ (clone_as_declaration): Add DW_AT_signature when cloning a declaration.
+ (build_abbrev_table): Take the external refs hashtable.
+ (output_comp_unit): Get it from optimize_external_refs and pass it in.
+
+2012-05-03 Jan Hubicka <jh@suse.cz>
+
+ PR middle-end/53093
+ * tree-emutls.c (new_emutls_decl): Fix handling of aliases.
+
+2012-05-03 Jan Hubicka <jh@suse.cz>
+
+ PR middle-end/53106
+ * ipa.c (cgraph_remove_unreachable_nodes): Fix handling of clones.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * dwarf2out.c (die_struct): Add comdat_type_p flag. Use it instead of
+ use_debug_types to discriminate the die_id union.
+ (print_die, assign_symbol_names, copy_decls_walk): Likewise.
+ (build_abbrev_table, output_die): Likewise.
+ (prune_unused_types_walk_attribs): Likewise.
+ (generate_type_signature, copy_declaration_context): Set it.
+ (remove_child_or_replace_with_skeleton): Set it.
+ (dwarf2out_start_source_file, dwarf2out_end_source_file): Don't
+ check use_debug_types.
+ (dwarf2out_finish): Do break_out_includes after .debug_types.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * dwarf2out.c (modified_type_die): Use scope_die_for.
+ (gen_type_die_with_usage, dwarf2out_finish): Likewise.
+ (uses_local_type_r, uses_local_type): New.
+ (scope_die_for): Keep a type that uses a local type in local scope.
+ Use get_context_die for namespace and type scope.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * config/i386/i386.c (ix86_code_end): Set DECL_IGNORED_P on the
+ pc thunk.
+ * dwarf2out.c (output_aranges): Skip DECL_IGNORED_P functions.
+ (dwarf2out_finish): Likewise.
+
+2012-05-03 Martin Jambor <mjambor@suse.cz>
+
+ * builtins.c (get_object_alignment_1): Return whether we can determine
+ the alignment or conservatively assume byte alignment. Return the
+ alignment by reference. Use get_pointer_alignment_1 for dereference
+ alignment.
+ (get_pointer_alignment_1): Return whether we can determine the
+ alignment or conservatively assume byte alignment. Return the
+ alignment by reference. Use get_ptr_info_alignment to get SSA name
+ alignment.
+ (get_object_alignment): Update call to get_object_alignment_1.
+ (get_object_or_type_alignment): Likewise, fall back to type alignment
+ only when it returned false.
+ (get_pointer_alignment): Update call to get_pointer_alignment_1.
+ * fold-const.c (get_pointer_modulus_and_residue): Update call to
+ get_object_alignment_1.
+ * ipa-prop.c (ipa_modify_call_arguments): Update call to
+ get_pointer_alignment_1.
+ * tree-sra.c (build_ref_for_offset): Likewise, fall back to the type
+ of MEM_REF or TARGET_MEM_REF only when it returns false.
+ * tree-ssa-ccp.c (get_value_from_alignment): Update call to
+ get_object_alignment_1.
+ (ccp_finalize): Use set_ptr_info_alignment.
+ * tree.h (get_object_alignment_1): Update declaration.
+ (get_pointer_alignment_1): Likewise.
+ * gimple-pretty-print.c (dump_gimple_phi): Use get_ptr_info_alignment.
+ (dump_gimple_stmt): Likewise.
+ * tree-flow.h (ptr_info_def): Updated comments of fields align and
+ misalign.
+ (get_ptr_info_alignment): Declared.
+ (mark_ptr_info_alignment_unknown): Likewise.
+ (set_ptr_info_alignment): Likewise.
+ (adjust_ptr_info_misalignment): Likewise.
+ * tree-ssa-address.c (copy_ref_info): Use new access functions to get
+ and set alignment of SSA names.
+ * tree-ssa-loop-ivopts.c (rewrite_use_nonlinear_expr): Call
+ mark_ptr_info_alignment_unknown.
+ * tree-ssanames.c (get_ptr_info_alignment): New function.
+ (mark_ptr_info_alignment_unknown): Likewise.
+ (set_ptr_info_alignment): Likewise.
+ (adjust_ptr_info_misalignment): Likewise.
+ (get_ptr_info): Call mark_ptr_info_alignment_unknown.
+ * tree-vect-data-refs.c (vect_create_addr_base_for_vector_ref):
+ Likewise.
+ (bump_vector_ptr): Likewise.
+ * tree-vect-stmts.c (create_array_ref): Use set_ptr_info_alignment.
+ (vectorizable_store): Likewise.
+ (vectorizable_load): Likewise.
+
+2012-05-03 Michael Matz <matz@suse.de>
+
+ * basic-block.h (struct rtl_bb_info, struct gimple_bb_info): Move
+ in front of basic_block_def.
+ (struct basic_block_def): Make il.gimple the full struct, not a
+ pointer.
+ (__assert_gimple_bb_smaller_rtl_bb): Asserting typedef.
+
+ * cfgexpand.c (expand_gimple_basic_block): Clear all il.gimple members.
+ * gimple-iterator.c (gimple_stmt_iterator): Don't special case
+ NULL il.gimple, which can't happen anymore.
+ * gimple.h (bb_seq): il.gimple can't be NULL.
+ (bb_seq_add): Ditto.
+ (set_bb_seq): Adjust.
+ (gsi_start_bb, gsi_last_bb): Tidy.
+ * lto-streamer-in.c (make_new_block): Don't zero members that
+ are zeroed already, don't allocate a gimple_bb_info.
+ * tree-cfg.c (create_bb): Don't allocate a gimple_bb_info.
+ (remove_bb): Clear all il.gimple members.
+ (gimple_verify_flow_info): Adjust for flat il.gimple.
+ * tree-flow-inline.h (phi_nodes, phi_nodes_ptr, set_phi_nodes): Adjust.
+
+ * coretypes.h (const_gimple_seq): Remove typedef.
+ * gimple.h (gimple_seq_first): Take gimple_seq.
+ (gimple_seq_first_stmt): Ditto.
+ (gimple_seq_last): Ditto.
+ (gimple_seq_last_stmt): Ditto.
+ (gimple_seq_empty_p): Ditto.
+
+2012-05-03 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-pre.c (valid_in_sets): Remove checking of trapping
+ operations.
+ (prune_clobbered_mems): Do it here. Do not uselessly sort expressions.
+ (compute_avail): Do not add possibly trapping operations to
+ EXP_GEN if they might not be executed in the block.
+
+2012-05-03 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/alpha/elf.h (MAX_OFILE_ALIGNMENT): Remove.
+
+2012-05-03 Steven Bosscher <steven@gcc.gnu.org>
+
+ * tree-switch-conversion.c (gen_inbound_check): Free post-dominance
+ information as early as possible. Update dominance info instead of
+ discarding it.
+
+2012-05-03 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-pre.c (debug_bitmap_sets_for): New function.
+ (union_contains_value): Remove.
+ (vro_valid_in_sets): Likewise.
+ (op_valid_in_sets): New function.
+ (valid_in_sets): Use op_valid_in_sets.
+ (insert_into_preds_of_block): Move dumping ...
+ (do_regular_insertion): ... here.
+ (do_partial_partial_insertion): ... and here. Dump that
+ we've found a partial partial redundancy.
+ (insert): Dump the current insert iteration.
+
+2012-05-03 Jakub Jelinek <jakub@redhat.com>
+
+ PR plugins/53126
+ * gcc-ar.c (main): If GCC_EXEC_PREFIX is set in env,
+ append program name to it and pass that as first argument
+ to make_relative_prefix. Always pass standard_libexec_prefix
+ as last argument to make_relative_prefix. If
+ make_relative_prefix returns NULL, fall back to
+ standard_libexec_prefix.
+
+ PR debug/53174
+ * tree-predcom.c (remove_stmt): Call reset_debug_uses on stmts being
+ removed.
+
+ PR target/53187
+ * config/arm/arm.c (arm_select_cc_mode): If x has MODE_CC class
+ mode, return that mode.
+
+ PR target/53194
+ * config/i386/i386-c.c (ix86_target_macros_internal): Don't
+ define __ATOMIC_HLE_* macros here.
+ (ix86_target_macros): But here, using cpp_define_formatted.
+
+2012-05-03 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/53144
+ * tree-ssa-sccvn.c (vn_reference_lookup_or_insert_constant_for_pieces):
+ Rename to ...
+ (vn_reference_lookup_or_insert_for_pieces): ... this. Properly deal
+ with SSA name values.
+ (vn_reference_lookup_3): Adjust callers.
+
+2012-05-03 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
+
+ * config/i386/driver-i386.c (host_detect_local_cpu): Reset
+ has_fma4 for AMD processors with both fma3 and fma4 support.
+
+2012-05-03 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ PR target/53201
+ * config/i386/driver-i386.c (host_detect_local_cpu): Add space to
+ "-mno-hle".
+
+2012-05-03 Michael Matz <matz@suse.de>
+
+ PR bootstrap/53197
+ * tree-ssa-dse.c (dse_optimize_stmt): Take pointer to iterator.
+ (dse_enter_block): Properly iterate the whole sequence even
+ if the last statement was removed.
+
+2012-05-02 Steven Bosscher <steven@gcc.gnu.org>
+
+ * config/alpha/vms.h (SUBTARGET_OVERRIDE_OPTIONS): For pic code,
+ unset flag_jump_tables.
+ * stmt.c (expand_case): Remove special flag_pic case conditional
+ on ASM_OUTPUT_ADDR_DIFF_ELT not being defined.
+
+2012-05-02 Ulrich Weigand <ulrich.weigand@linaro.org>
+
+ * common/config/s390/s390-common.c (s390_option_optimization_table):
+ Enable -fsched-pressure using -fsched-pressure-algorithm=model by
+ default when optimizing.
+
+2012-05-02 Martin Jambor <mjambor@suse.cz>
+
+ PR lto/52605
+ * dwarf2out.c (dwarf2out_decl): Only lookup die representing context
+ of a variable when the contect is a function.
+
+2012-05-02 Michael Matz <matz@suse.de>
+
+ * coretypes.h (gimple_seq, const_gimple_seq): Typedef as gimple.
+ * gimple.h (struct gimple_seq_node_d, struct gimple_seq_d): Remove.
+ (const_gimple_seq_node): Remove.
+ (gimple_seq_node): Typedef as gimple.
+ (struct gimple_statement_base): Add next and prev members,
+ adjust all WORD markers in using structs.
+ (union gimple_statement_d): Link via gsbase.next field for GC and PCH.
+ (gimple_seq_first, gimple_seq_first_stmt, gimple_seq_last,
+ gimple_seq_last_stmt): Adjust as gimple_seq, gimple_seq_node and
+ gimple are the same.
+ (gimple_seq_set_last, gimple_seq_set_first): Don't allocate
+ gimple_seq, adjust.
+ (gimple_init_singleton): New function.
+ (gsi_start_1, gsi_last_1, gsi_end_p, gsi_one_before_end_p): Adjust.
+ (gsi_next, gsi_prev): Adjust, handle prev cyclic list correctly.
+ (gsi_stmt): Adjust.
+ (gsi_stmt_ptr): Remove.
+ (enum gimple_alloc_kind): Remove gimple_alloc_kind_seq member.
+
+ * gimple-iterator.c (update_bb_for_stmts): Take last parameter
+ again, adjust for above changes.
+ (update_call_edge_frequencies): Adjust for above changes.
+ (gsi_insert_seq_nodes_before): Rewrite for new data structure.
+ (gsi_insert_seq_nodes_after): Ditto.
+ (gsi_split_seq_after): Ditto.
+ (gsi_set_stmt): Ditto.
+ (gsi_split_seq_before): Ditto.
+ (gsi_remove): Ditto.
+ (gsi_insert_seq_before_without_update): Don't free sequence.
+ (gsi_insert_seq_after_without_update): Ditto.
+ (gsi_replace): Assert some more invariants.
+ (gsi_insert_before_without_update, gsi_insert_after_without_update):
+ Tidy.
+ (gsi_for_stmt): Don't search for stmt.
+ (gsi_insert_on_edge_immediate): Tidy.
+
+ * gimple.c (gimple_alloc_k): Remove "sequences".
+ (gimple_seq_cache): Remove.
+ (gimple_alloc_stat): Make stmt a singleton sequence.
+ (gimple_seq_alloc, gimple_seq_free): Remove.
+ (gimple_assign_set_rhs_with_ops_1): Ensure new stmt is a singleton.
+ (gimple_copy): Ditto.
+ * gimplify.c (gimplify_cleanup_point_expr): Use gsi_set_stmt,
+ create iterator from correct sequence.
+ * tree-phinodes.c (make_phi_node): Make stmt a singleton.
+
+2012-05-02 Michael Matz <matz@suse.de>
+
+ * gimple.h (gimple_stmt_iterator <seq>): Make it be pointer to
+ gimple_seq.
+ (gimple_seq_set_last, gimple_seq_set_first): Take pointer to
+ sequence, lazily allocate it.
+ (bb_seq_addr): New function.
+ (gsi_start_1): Rename from gsi_start, but take pointer to sequence.
+ (gsi_start): Macro to wrap gsi_start_1 taking pointer of argument.
+ (gsi_none): New function.
+ (gsi_start_bb): Adjust.
+ (gsi_last_1): Rename from gsi_last, but take pointer to sequence.
+ (gsi_last): Macro to wrap gsi_last_1 taking pointer of argument.
+ (gsi_last_bb): Adjust.
+ (gsi_seq): Adjust.
+ * tree-flow-inline.h (phi_nodes_ptr): New function.
+
+ * gimple-iterator.c (gsi_insert_seq_nodes_before): Adjust to
+ datastructure and interface change.
+ (gsi_insert_seq_before_without_update): Ditto.
+ (gsi_insert_seq_nodes_after): Ditto.
+ (gsi_insert_seq_after_without_update): Ditto.
+ (gsi_split_seq_after): Ditto, don't use gimple_seq_alloc.
+ (gsi_split_seq_before): Ditto.
+ (gsi_start_phis): Adjust.
+ * tree-vect-loop.c (vect_determine_vectorization_factor): Use gsi_none.
+ (vect_transform_loop): Ditto.
+ * gimple.c (gimple_seq_add_stmt, gimple_seq_add_seq,
+ gimple_seq_copy): Don't use gimple_seq_alloc.
+ * gimplify.c (gimple_seq_add_stmt_without_update): Ditto.
+ (gimplify_seq_add_seq): Ditto.
+ * lto-streamer-in.c (make_new_block): Ditto.
+ * tree-cfg.c (create_bb): Ditto.
+ * tree-sra.c (initialize_parameter_reductions): Ditto.
+
+2012-05-02 Michael Matz <matz@suse.de>
+
+ * gimple.h (gimple_seq_first, gimple_seq_first_stmt, gimple_seq_last,
+ gimple_seq_last_stmt, gimple_seq_set_last, gimple_seq_set_first,
+ gimple_seq_empty_p, gimple_seq_alloc_with_stmt, bb_seq,
+ set_bb_seq): Move down to after gimple_statement_d definition.
+
+2012-05-02 Michael Matz <matz@suse.de>
+
+ * gimple-fold.c (gimplify_and_update_call_from_tree): Use
+ gsi_replace_with_seq, instead of inserting itself.
+ * gimple-iterator.c (gsi_replace_with_seq): New function.
+ * tree-ssa-forwprop.c (forward_propagate_comparison): Take
+ iterator instead of statement, advance it.
+ (ssa_forward_propagate_and_combine): Adjust call to above.
+
+2012-05-02 Michael Matz <matz@suse.de>
+
+ * tree-phinodes.c (add_phi_node_to_bb): Tidy, don't use
+ gimple_seq_alloc.
+ * omp-low.c (finalize_task_copyfn): Don't use gimple_seq_alloc.
+ * tree-nested.c (walk_gimple_omp_for): Ditto.
+ * trans-mem.c (lower_transaction): Ditto.
+ * tree-eh.c (do_return_redirection): Ditto.
+ (do_goto_redirection): Ditto.
+ (lower_try_finally_switch): Ditto.
+ * gimplify.c (gimplify_stmt): Ditto.
+ (gimplify_scan_omp_clauses): Ditto.
+ (gimplify_omp_for): Ditto.
+ (gimplify_function_tree): Ditto.
+ * gimple-fold.c (gimplify_and_update_call_from_tree): Ditto.
+ * tree-mudflap.c (mf_decl_cache_locals): Ditto.
+ (mf_build_check_statement_for): Ditto.
+ (mx_register_decls): Ditto.
+ * graphite-sese-to-poly.c (remove_invariant_phi): Ditto,
+ and don't use itertors to append.
+ (insert_stmts): Ditto.
+ (insert_out_of_ssa_copy): Ditto.
+ (insert_out_of_ssa_copy_on_edge): Ditto.
+
+2012-05-02 Michael Matz <matz@suse.de>
+
+ * gimple.h (gimple_bind_body_ptr): New function.
+ (gimple_bind_body): Use it.
+ (gimple_catch_handler): Use gimple_catch_handler_ptr.
+ (gimple_eh_filter_failure_ptr): New function.
+ (gimple_eh_filter_failure): Use it.
+ (gimple_eh_else_n_body_ptr): New function.
+ (gimple_eh_else_n_body): Use it.
+ (gimple_eh_else_e_body_ptr): New function.
+ (gimple_eh_else_e_body): Use it.
+ (gimple_try_eval_ptr): New function.
+ (gimple_try_eval): Use it.
+ (gimple_try_cleanup_ptr): New function.
+ (gimple_try_cleanup): Use it.
+ (gimple_wce_cleanup_ptr): New function.
+ (gimple_wce_cleanup): Use it.
+ (gimple_omp_body_ptr): New function.
+ (gimple_omp_body): Use it.
+ (gimple_omp_for_pre_body_ptr): New function.
+ (gimple_omp_for_pre_body): Use it.
+ (gimple_transaction_body_ptr): New function.
+ (gimple_transaction_body): Use it.
+ (gsi_split_seq_before): Adjust to return nothing and take pointer
+ to sequence.
+ (gsi_set_stmt): Declare.
+ (gsi_replace_with_seq): Declare.
+ (walk_gimple_seq_mod): Declare.
+ * function.h (struct function <gimple_body>): Use typedef gimple_seq.
+
+ * gimple-iterator.c (gsi_set_stmt): New function.
+ (gsi_split_seq_before): Return new sequence via pointer argument.
+ (gsi_replace): Use gsi_set_stmt.
+
+ * tree-ssa-loop-im.c (move_computations_stmt): First remove
+ then insert stmt.
+ * tree-complex.c (update_complex_components_on_edge): Don't copy gsi.
+ * tree-phinodes.c (resize_phi_node): Don't resize stmt in-place,
+ return new stmt.
+ (reserve_phi_args_for_new_edge): Change call to above,
+ use gsi_set_stmt.
+
+ * omp-low.c (lower_omp): Change prototype to take pointer to sequence.
+ (lower_rec_input_clauses): Use gimple_seq_add_seq instead of
+ iterators. Adjust call to lower_omp.
+ (lower_lastprivate_clauses): Adjust call to lower_omp.
+ (lower_reduction_clauses): Ditto.
+ (expand_omp_taskreg): Nullify non-cfg body of child_fn.
+ (lower_omp_sections): Don't explicitely count sequence length,
+ nullify lowered bodies earlier, ensure stmts are part of only
+ one sequence, adjust call to lower_omp.
+ (lower_omp_single): Ensure stmts are part of only one sequence,
+ adjust call to lower_omp.
+ (lower_omp_master): Ditto.
+ (lower_omp_ordered): Ditto.
+ (lower_omp_critical): Ditto.
+ (lower_omp_for): Ditto.
+ (lower_omp_taskreg): Ditto, tidy.
+ (lower_omp_1): Adjust calls to lower_omp.
+ (execute_lower_omp): Ditto.
+ (lower_omp): Adjust to take pointer to sequence.
+ (diagnose_sb_2): Use walk_gimple_seq_mod.
+ (diagnose_omp_structured_block_errors): Ditto and set possibly
+ changed function body.
+ * gimple-low.c (lower_function_body): Set function body after
+ it stabilizes.
+ (lower_sequence): Adjust to take pointer to sequence.
+ (lower_omp_directive): Ensure stmt isn't put twice into the
+ sequence, adjust call to lower_sequence.
+ (lower_stmt): Adjust calls to lower_sequence.
+ (lower_gimple_bind): Ditto.
+ (gimple_try_catch_may_fallthru): Call gsi_start with lvalue.
+ * tree-nested.c (walk_body): Take pointer to sequence, use
+ walk_gimple_seq_mod.
+ (walk_function): Adjust call to walk_body, set possibly changed body.
+ (walk_gimple_omp_for): Adjust calls to walk_body.
+ (convert_nonlocal_omp_clauses): Ditto.
+ (convert_nonlocal_reference_stmt): Ditto.
+ (convert_local_omp_clauses): Ditto.
+ (convert_local_reference_stmt): Ditto.
+ (convert_tramp_reference_stmt): Ditto.
+ (convert_gimple_call): Ditto.
+ (convert_nl_goto_reference): Use local iterator copy.
+ * gimple.c (walk_gimple_seq_mod): Renamed from walk_gimple_seq,
+ but taking pointer to sequence, ensure gsi_start is called with
+ callers lvalue.
+ (walk_gimple_seq): New wrapper around walk_gimple_seq_mod,
+ asserting that the sequence head didn't change.
+ (walk_gimple_stmt): Replace all calls to walk_gimple_seq with
+ walk_gimple_seq_mod.
+ * trans-mem.c (lower_transaction): Use walk_gimple_seq_mod.
+ (execute_lower_tm): Ditto, and set possibly changed body.
+ * tree-eh.c (lower_eh_constructs_1): Take pointer to sequence,
+ call gsi_start with that lvalue.
+ (replace_goto_queue_stmt_list): Ditto.
+ (replace_goto_queue_1): Adjust calls to replace_goto_queue_stmt_list.
+ (replace_goto_queue): Ditto.
+ (lower_try_finally_nofallthru): Adjust calls to lower_eh_constructs_1.
+ (lower_try_finally_onedest): Ditto.
+ (lower_try_finally_copy): Ditto.
+ (lower_try_finally_switch): Ditto.
+ (lower_try_finally): Ditto.
+ (lower_eh_filter): Ditto.
+ (lower_eh_must_not_throw): Ditto.
+ (lower_cleanup): Ditto.
+ (lower_eh_constructs_2): Ditto.
+ (lower_catch): Ditto, and ensure gsi_start is called with lvalue.
+ (lower_eh_constructs): Adjust calls to lower_eh_constructs_1, and
+ set possibly changed body.
+ (optimize_double_finally): Call gsi_start with lvalue.
+
+ * tree-cfg.c (make_blocks): Adjust call to gsi_split_seq_before.
+ (gimple_split_block): Ditto.
+ (gimple_merge_blocks): Use gsi_start_phis.
+ (move_stmt_r): Use walk_gimple_seq_mod.
+ * tree-ssa-dse.c (dse_enter_block): Use gsi_last_bb.
+ * cgraphbuild.c (build_cgraph_edges): Use gsi_start_phis.
+ (rebuild_cgraph_edges): Ditto.
+ (cgraph_rebuild_references): Ditto.
+ * ipa-prop.c (ipa_analyze_params_uses): Ditto.
+ * tree-inline.c (copy_phis_for_bb): Ditto.
+ * tree-ssa-dce.c: Ditto.
+
+ * cgraphunit.c (cgraph_analyze_function): Use gimple_has_body_p.
+
+2012-05-02 Kirill Yukhin <kirill.yukhin@intel.com>
+ Andi Kleen <ak@linux.intel.com>
+
+ * coretypes.h (MEMMODEL_MASK): New.
+ * builtins.c (get_memmodel): Add val. Call target.memmodel_check
+ and return new variable.
+ (expand_builtin_atomic_exchange): Mask memmodel values.
+ (expand_builtin_atomic_compare_exchange): Ditto.
+ (expand_builtin_atomic_load): Ditto.
+ (expand_builtin_atomic_store): Ditto.
+ (expand_builtin_atomic_clear): Ditto.
+ * doc/extend.texi: Mention port-dependent memory model flags.
+ * config/i386/cpuid.h (bit_HLE): New.
+ * config/i386/driver-i386.c (host_detect_local_cpu): Detect
+ HLE support.
+ * config/i386/i386-protos.h (ix86_generate_hle_prefix): New.
+ * config/i386/i386-c.c (ix86_target_macros_internal): Set HLE defines.
+ (ix86_target_string)<-mhle>: New.
+ (ix86_valid_target_attribute_inner_p) <OPT_mhle>: Ditto.
+ * config/i386/i386.c (ix86_target_string) <OPTION_MASK_ISA_HLE>: New.
+ (ix86_valid_target_attribute_inner_p) <OPT_mhle>: Ditto.
+ (ix86_option_override_internal) <PTA_HLE>: New switch, set it
+ enabled for generic, generic64 and core-avx2.
+ (ix86_print_operand): Generate HLE lock prefixes.
+ (ix86_memmodel_check): New.
+ (TARGET_MEMMODEL_CHECK): Ditto.
+ * config/i386/i386.h (OPTION_ISA_HLE): Ditto.
+ (IX86_HLE_ACQUIRE): Ditto.
+ (IX86_HLE_RELEASE): Ditto.
+ * config/i386/i386.h (ix86_generate_hle_prefix): Ditto.
+ * config/i386/i386.opt (mhle): Ditto.
+ * config/i386/sync.md(atomic_compare_and_swap<mode>): Pass
+ success model to instruction emitter.
+ (atomic_fetch_add<mode>): Ditto.
+ (atomic_exchange<mode>): Ditto.
+ (atomic_add<mode>): Ditto.
+ (atomic_sub<mode>): Ditto.
+ (atomic_<code><mode>): Ditto.
+ (*atomic_compare_and_swap_doubledi_pic): Ditto.
+ (atomic_compare_and_swap_single<mode>): Define and use argument
+ for success model.
+ (atomic_compare_and_swap_double<mode>): Ditto.
+ * configure.ac: Check if assembler support HLE prefixes.
+ * configure: Regenerate.
+ * config.in: Ditto.
+
+2012-05-02 Steven Bosscher <steven@gcc.gnu.org>
+
+ PR middle-end/53153
+ * gimplify.c (preprocess_case_label_vec_for_gimple): New function,
+ split out from ...
+ (gimplify_switch_expr): ... here.
+ * gimple.h (preprocess_case_label_vec_for_gimple): Add prototype.
+ * tree-ssa-forwprop.c (simplify_gimple_switch_label_vec): New function
+ to clean up case labels with values outside the index type range.
+ (simplify_gimple_switch): Call it if something changed.
+ Remove strange and unnecessary assert.
+
+2012-05-02 Richard Guenther <rguenther@suse.de>
+
+ * fold-const.c (div_if_zero_remainder): sizetypes no longer
+ sign-extend.
+ (int_const_binop_1): New worker for int_const_binop with
+ overflowable parameter. Pass it through to force_fit_type_double.
+ (int_const_binop): Wrap around int_const_binop_1 with overflowable
+ equal to one.
+ (size_binop_loc): Call int_const_binop_1 with overflowable equal
+ to minus one, forcing overflow detection for even unsigned types.
+ (extract_muldiv_1): Remove bogus TYPE_IS_SIZETYPE special-casing.
+ (fold_binary_loc): Call try_move_mult_to_index with signed offset.
+ * stor-layout.c (initialize_sizetypes): sizetypes no longer
+ sign-extend.
+ (layout_type): For zero-sized arrays ignore overflow on the
+ size calculations.
+ * tree-ssa-ccp.c (bit_value_unop_1): Likewise.
+ (bit_value_binop_1): Likewise.
+ * tree.c (double_int_to_tree): Likewise.
+ (double_int_fits_to_tree_p): Likewise.
+ (force_fit_type_double): Likewise.
+ (host_integerp): Likewise.
+ (int_fits_type_p): Likewise.
+ * varasm.c (output_constructor_regular_field): Sign-extend the
+ field-offset to cater for negative offsets produced by the Ada
+ frontend.
+ * omp-low.c (extract_omp_for_data): Convert the loop step to
+ signed for pointer adjustments.
+
+2012-05-02 Richard Guenther <rguenther@suse.de>
+
+ * tree.c (valid_constant_size_p): New function.
+ * tree.h (valid_constant_size_p): Declare.
+ * cfgexpand.c (expand_one_var): Adjust check for too large
+ variables by using valid_constant_size_p.
+ * varasm.c (assemble_variable): Likewise.
+
+ * c-decl.c (grokdeclarator): Properly check for sizes that
+ cover more than half of the address-space.
+
2012-05-02 Jakub Jelinek <jakub@redhat.com>
+ PR tree-optimization/53163
+ * tree-ssa-phiopt.c (cond_if_else_store_replacement): Don't ignore
+ return value from compute_all_dependences.
+
PR rtl-optimization/53160
* ree.c (combine_reaching_defs): Handle the case where cand->insn
has been modified by ree pass already.
@@ -32,6 +1620,7 @@
2012-05-01 Kenneth Zadeck <zadeck@naturalbridge.com>
Richard Sandiford <r.sandiford@uk.ibm.com>
+ PR rtl-optimization/52543
* Makefile.in (lower-subreg.o, target-globals.o): Depend on
lower-subreg.h.
* lower-subreg.h: New file.
@@ -62,15 +1651,15 @@
Update calls to simple_move and find_decomposable_shift_zext.
2012-05-01 Ian Bolton <ian.bolton@arm.com>
- Sameera Deshpande <sameera.deshpande@arm.com>
- Greta Yorsh <greta.yorsh@arm.com>
+ Sameera Deshpande <sameera.deshpande@arm.com>
+ Greta Yorsh <greta.yorsh@arm.com>
* config/arm/arm-protos.h (thumb_unexpanded_epilogue): Rename to...
- (thumb1_unexpanded_epilogue): ...this.
+ (thumb1_unexpanded_epilogue): ...this.
* config/arm/arm.c (thumb_unexpanded_epilogue): Rename to...
- (thumb1_unexpanded_epilogue): ...this.
+ (thumb1_unexpanded_epilogue): ...this.
* config/arm/arm.md (thumb_unexpanded_epilogue): Rename to...
- (thumb1_unexpanded_epilogue): ...this.
+ (thumb1_unexpanded_epilogue): ...this.
2012-05-01 Richard Earnshaw <rearnsha@arm.com>
@@ -106,7 +1695,7 @@
(update_edge_key): Ditto.
(flatten_function): Ditto.
(ipa_inline): Ditto.
- (inlinw_always_inline_functions): Ditto.
+ (inline_always_inline_functions): Ditto.
(early_inline_small_functions): Ditto.
2012-04-30 Uros Bizjak <ubizjak@gmail.com>
@@ -361,7 +1950,7 @@
* config/pa/pa.c (pa_legitimate_constant_p): Don't put function labels
in constant pool.
-2012-04-27 Ollie Wild <aaw@google.com>
+2012-04-27 Ollie Wild <aaw@google.com>
* doc/invoke.texi (Wliteral-suffix): Document new option.
diff --git a/gcc/ChangeLog.MELT b/gcc/ChangeLog.MELT
index 5c492062a5e..365b63a04f7 100644
--- a/gcc/ChangeLog.MELT
+++ b/gcc/ChangeLog.MELT
@@ -1,5 +1,18 @@
2012-05-11 Basile Starynkevitch <basile@starynkevitch.net>
+ {{for merge with trunk svn 187397, since gimple_seq are
+ disappearing in GCC 4.8}}
+
+ * melt-runtime.h (melt_gt_ggc_mx_gimple_seq_d): New declaration
+ (gt_ggc_mx_gimple_seq_d): Macro defined when GCC 4.8 only.
+
+ * melt-runtime.c (melt_gt_ggc_mx_gimple_seq_d): New function,
+ defined for GCC 4.8 only.
+
+ * melt/warmelt-debug.melt (melt_debug_fun): Add cast in our
+ warning diagnostic to avoid a warning.
+
+2012-05-11 Basile Starynkevitch <basile@starynkevitch.net>
* melt-module.mk (MELT_AUTOHOST_H): New.
(MELTGCC_BUILD_WITH_CXX): Use it.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 8832539b94c..28996773f30 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20120502
+20120511
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 23dfc69311e..1e577409c72 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1147,8 +1147,10 @@ C_OBJS = c-lang.o c-family/stub-objc.o $(C_AND_OBJC_OBJS)
OBJS = \
insn-attrtab.o \
insn-automata.o \
+ insn-dfatab.o \
insn-emit.o \
insn-extract.o \
+ insn-latencytab.o \
insn-modes.o \
insn-opinit.o \
insn-output.o \
@@ -1182,6 +1184,7 @@ OBJS = \
cgraph.o \
cgraphbuild.o \
cgraphunit.o \
+ cgraphclones.o \
combine.o \
combine-stack-adj.o \
compare-elim.o \
@@ -1474,7 +1477,7 @@ ALL_HOST_BACKEND_OBJS = $(GCC_OBJS) $(OBJS) $(OBJS-libcommon) \
# compilation or not.
ALL_HOST_OBJS = $(ALL_HOST_FRONTEND_OBJS) $(ALL_HOST_BACKEND_OBJS)
-BACKEND = main.o @TREEBROWSER@ libbackend.a libcommon-target.a libcommon.a \
+BACKEND = libbackend.a main.o @TREEBROWSER@ libcommon-target.a libcommon.a \
$(CPPLIB) $(LIBDECNUMBER)
@@ -1487,8 +1490,8 @@ MELT_RUNTIME_C= $(srcdir)/melt-runtime.c
MOSTLYCLEANFILES = insn-flags.h insn-config.h insn-codes.h \
insn-output.c insn-recog.c insn-emit.c insn-extract.c insn-peep.c \
- insn-attr.h insn-attr-common.h insn-attrtab.c insn-opinit.c \
- insn-preds.c insn-constants.h \
+ insn-attr.h insn-attr-common.h insn-attrtab.c insn-dfatab.c \
+ insn-latencytab.c insn-opinit.c insn-preds.c insn-constants.h \
tm-preds.h tm-constrs.h checksum-options \
tree-check.h min-insn-modes.c insn-modes.c insn-modes.h \
genrtl.h gt-*.h gtype-*.h gtype-desc.c gtyp*-input.list \
@@ -1841,10 +1844,11 @@ s-mlib: $(srcdir)/genmultilib Makefile
"$(MULTILIB_EXTRA_OPTS)" \
"$(MULTILIB_EXCLUSIONS)" \
"$(MULTILIB_OSDIRNAMES)" \
+ "$(MULTILIB_REQUIRED)" \
"@enable_multilib@" \
> tmp-mlib.h; \
else \
- $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' no \
+ $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' '' no\
> tmp-mlib.h; \
fi
$(SHELL) $(srcdir)/../move-if-change tmp-mlib.h multilib.h
@@ -3023,6 +3027,14 @@ cgraphunit.o : cgraphunit.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
gt-cgraphunit.h tree-iterator.h $(COVERAGE_H) $(TREE_DUMP_H) \
tree-pretty-print.h gimple-pretty-print.h ipa-inline.h $(IPA_UTILS_H) \
$(LTO_STREAMER_H) output.h $(REGSET_H) $(EXCEPT_H)
+cgraphclones.o : cgraphclones.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(TREE_H) langhooks.h $(TREE_INLINE_H) toplev.h $(DIAGNOSTIC_CORE_H) $(FLAGS_H) $(GGC_H) \
+ $(TARGET_H) $(CGRAPH_H) intl.h pointer-set.h $(FUNCTION_H) $(GIMPLE_H) \
+ $(TREE_FLOW_H) $(TREE_PASS_H) debug.h $(DIAGNOSTIC_H) \
+ $(FIBHEAP_H) output.h $(PARAMS_H) $(RTL_H) $(TIMEVAR_H) $(IPA_PROP_H) \
+ gt-cgraphunit.h tree-iterator.h $(COVERAGE_H) $(TREE_DUMP_H) \
+ tree-pretty-print.h gimple-pretty-print.h ipa-inline.h $(IPA_UTILS_H) \
+ $(LTO_STREAMER_H) output.h $(REGSET_H) $(EXCEPT_H)
cgraphbuild.o : cgraphbuild.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(TREE_H) langhooks.h $(CGRAPH_H) intl.h pointer-set.h $(GIMPLE_H) \
$(TREE_FLOW_H) $(TREE_PASS_H) $(IPA_UTILS_H) $(EXCEPT_H) \
@@ -3556,7 +3568,8 @@ $(common_out_object_file): $(common_out_file) $(CONFIG_H) $(SYSTEM_H) \
.PRECIOUS: insn-config.h insn-flags.h insn-codes.h insn-constants.h \
insn-emit.c insn-recog.c insn-extract.c insn-output.c insn-peep.c \
- insn-attr.h insn-attr-common.h insn-attrtab.c insn-preds.c
+ insn-attr.h insn-attr-common.h insn-attrtab.c insn-dfatab.c \
+ insn-latencytab.c insn-preds.c
# Dependencies for the md file. The first time through, we just assume
# the md file itself and the generated dependency file (in order to get
@@ -3575,7 +3588,11 @@ insn-attrtab.o : insn-attrtab.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
insn-config.h $(DIAGNOSTIC_CORE_H) $(RECOG_H) $(TM_P_H) $(FLAGS_H)
insn-automata.o : insn-automata.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(RTL_H) $(REGS_H) output.h $(INSN_ATTR_H) \
- insn-config.h toplev.h $(DIAGNOSTIC_CORE_H) $(RECOG_H) $(TM_P_H) $(FLAGS_H) $(EMIT_RTL_H)
+ insn-config.h toplev.h $(DIAGNOSTIC_CORE_H) $(RECOG_H) \
+ $(TM_P_H) $(FLAGS_H) $(EMIT_RTL_H)
+insn-dfatab.o : insn-dfatab.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(RTL_H) $(REGS_H) output.h $(INSN_ATTR_H) \
+ insn-config.h $(DIAGNOSTIC_CORE_H) $(RECOG_H) $(TM_P_H) $(FLAGS_H)
insn-emit.o : insn-emit.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(TM_P_H) $(FUNCTION_H) $(EXPR_H) $(OPTABS_H) \
dfp.h $(FLAGS_H) output.h insn-config.h hard-reg-set.h $(RECOG_H) \
@@ -3584,6 +3601,9 @@ insn-emit.o : insn-emit.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
insn-enums.o : insn-enums.c $(CONFIG_H) $(SYSTEM_H) insn-constants.h
insn-extract.o : insn-extract.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(RTL_H) $(DIAGNOSTIC_CORE_H) insn-config.h $(RECOG_H)
+insn-latencytab.o : insn-latencytab.c $(CONFIG_H) $(SYSTEM_H) \
+ coretypes.h $(TM_H) $(RTL_H) $(REGS_H) output.h $(INSN_ATTR_H) \
+ insn-config.h $(DIAGNOSTIC_CORE_H) $(RECOG_H) $(TM_P_H) $(FLAGS_H)
insn-modes.o : insn-modes.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(MACHMODE_H)
insn-opinit.o : insn-opinit.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
@@ -3616,7 +3636,7 @@ insn-recog.o : insn-recog.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
simple_rtl_generated_h = insn-attr.h insn-attr-common.h insn-codes.h \
insn-config.h insn-flags.h
-simple_rtl_generated_c = insn-attrtab.c insn-automata.c insn-emit.c \
+simple_rtl_generated_c = insn-automata.c insn-emit.c \
insn-extract.c insn-opinit.c insn-output.c \
insn-peep.c insn-recog.c
@@ -3653,6 +3673,17 @@ s-check : build/gencheck$(build_exeext)
$(SHELL) $(srcdir)/../move-if-change tmp-check.h tree-check.h
$(STAMP) s-check
+# genattrtab produces three files: tmp-{attrtab.c,dfatab.c,latencytab.c}
+insn-attrtab.c insn-dfatab.c insn-latencytab.c: s-attrtab ; @true
+s-attrtab : $(MD_DEPS) build/genattrtab$(build_exeext) \
+ insn-conditions.md
+ $(RUN_GEN) build/genattrtab$(build_exeext) $(md_file) insn-conditions.md \
+ -Atmp-attrtab.c -Dtmp-dfatab.c -Ltmp-latencytab.c
+ $(SHELL) $(srcdir)/../move-if-change tmp-attrtab.c insn-attrtab.c
+ $(SHELL) $(srcdir)/../move-if-change tmp-dfatab.c insn-dfatab.c
+ $(SHELL) $(srcdir)/../move-if-change tmp-latencytab.c insn-latencytab.c
+ $(STAMP) s-attrtab
+
# gencondmd doesn't use the standard naming convention.
build/gencondmd.c: s-conditions; @true
s-conditions: $(MD_DEPS) build/genconditions$(build_exeext)
@@ -3821,6 +3852,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/tree-parloops.c \
$(srcdir)/omp-low.c \
$(srcdir)/targhooks.c $(out_file) $(srcdir)/passes.c $(srcdir)/cgraphunit.c \
+ $(srcdir)/cgraphclones.c \
$(srcdir)/tree-ssa-propagate.c \
$(srcdir)/tree-phinodes.c \
$(srcdir)/lto-symtab.c \
@@ -4313,7 +4345,7 @@ TEXI_GCC_FILES = gcc.texi gcc-common.texi gcc-vers.texi frontends.texi \
standards.texi invoke.texi extend.texi md.texi objc.texi \
gcov.texi trouble.texi bugreport.texi service.texi \
contribute.texi compat.texi funding.texi gnu.texi gpl_v3.texi \
- fdl.texi contrib.texi cppenv.texi cppopts.texi \
+ fdl.texi contrib.texi cppenv.texi cppopts.texi avr-mmcu.texi \
implement-c.texi implement-cxx.texi arm-neon-intrinsics.texi
# we explicitly use $(srcdir)/doc/tm.texi here to avoid confusion with
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index e733b68f68d..d24810fed89 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,98 @@
+2012-05-10 Richard Guenther <rguenther@suse.de>
+
+ * gcc-interface/cuintp.c (UI_From_gnu): Remove TYPE_IS_SIZETYPE use.
+
+2012-05-06 Tristan Gingold <gingold@adacore.com>
+
+ * gcc-interface/trans.c (gigi): Decorate reraise_zcx_decl.
+
+2012-05-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.c (gnat_to_gnu_entity) <object>: In the renaming
+ case, use the padded type if the renamed object has an unconstrained
+ type with default discriminant.
+
+2012-05-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/trans.c (Loop_Statement_to_gnu): Also handle invariant
+ conditions with only one bound.
+ (Raise_Error_to_gnu): Likewise. New function extracted from...
+ (gnat_to_gnu) <N_Raise_Constraint_Error>: ...here. Call above function
+ in regular mode only.
+
+2012-05-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/misc.c (gnat_post_options): Disable caret by default.
+
+2012-05-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/gigi.h (make_packable_type): Declare.
+ (make_type_from_size): Likewise.
+ (relate_alias_sets): Likewise.
+ (maybe_pad_type): Adjust.
+ (init_gnat_to_gnu): Delete.
+ (destroy_gnat_to_gnu): Likewise.
+ (init_dummy_type): Likewise.
+ (destroy_dummy_type): Likewise.
+ (init_gnat_utils): Declare.
+ (destroy_gnat_utils): Likewise.
+ (ceil_pow2): New inline function.
+ * gcc-interface/decl.c (gnat_to_gnu_entity): Use ceil_pow2.
+ <object>: Pass True for the final processing of alignment and size.
+ <E_Subprogram_Type>: Only create the TYPE_DECL for a padded return
+ type if necessary.
+ (round_up_to_align): Delete.
+ (ceil_alignment): Likewise.
+ (relate_alias_sets): Move to...
+ (make_aligning_type): Likewise.
+ (make_packable_type): Likewise.
+ (maybe_pad_type): Likewise.
+ (make_type_from_size): Likewise.
+ * gcc-interface/utils.c (MAX_BITS_PER_WORD): Delete.
+ (struct pad_type_hash): New type.
+ (pad_type_hash_table): New static variable.
+ (init_gnat_to_gnu): Merge into...
+ (init_dummy_type): Likewise.
+ (init_gnat_utils): ...this. New function.
+ (destroy_gnat_to_gnu): Merge into...
+ (destroy_dummy_type): Likewise.
+ (destroy_gnat_utils): ...this. New function.
+ (pad_type_hash_marked_p): New function.
+ (pad_type_hash_hash): Likewise.
+ (pad_type_hash_eq): Likewise.
+ (relate_alias_sets): ...here.
+ (make_aligning_type): Likewise.
+ (make_packable_type): Likewise.
+ (maybe_pad_type): Likewise. Change same_rm_size parameter into
+ set_rm_size; do not set TYPE_ADA_SIZE if it is false. Do not set
+ null as Ada size. Do not set TYPE_VOLATILE on the padded type. If it
+ is complete and has constant size, canonicalize it. Bail out earlier
+ if a warning need not be issued.
+ (make_type_from_size): Likewise.
+ <INTEGER_TYPE>: Bail out if size is too large
+ (gnat_types_compatible_p): Do not deal with padded types.
+ (convert): Compare main variants for padded types.
+ * gcc-interface/trans.c (gigi): Call {init|destroy}_gnat_utils.
+ (gnat_to_gnu): Do not convert at the end for a call to a function that
+ returns an unconstrained type with default discriminant.
+ (Attribute_to_gnu) <Attr_Size>: Simplify handling of padded objects.
+ * gcc-interface/utils2.c (build_binary_op) <MODIFY_EXPR>: Likewise.
+ Do not use the padded type if it is BLKmode and the inner type is
+ non-BLKmode.
+
+2012-05-02 Pascal Obry <obry@adacore.com>
+
+ Revert
+ 2012-02-24 Dave Korn <dave.korn.cygwin@gmail.com>
+
+ * gcc-interface/Makefile.in (WIN_SO_PREFIX [windows targets]): New
+ Windows-specific make variable.
+ (WIN_SO_INSTALL_DIR [windows targets]): Likewise.
+ (install-gnatlib): Respect the above during installation when set,
+ and also install any windows import library that has been built.
+ (gnatlib-shared-win32): Use WIN_SO_PREFIX to name output DLL and also
+ build a corresponding import library.
+
2012-04-30 Jan Hubicka <jh@suse.cz>
* gcc-interface/utils.c (rest_of_subprog_body_compilation): Update
@@ -1599,7 +1694,7 @@
2012-02-22 Hristian Kirtchev <kirtchev@adacore.com>
- * exp_ch7.adb (Create_Finalizer): Suppress elaboration checks on
+ * exp_ch7.adb (Create_Finalizer): Suppress elaboration checks on
stack-related finalizers.
2012-02-22 Ed Schonberg <schonberg@adacore.com>
diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in
index 5c4acda5388..37e551092d9 100644
--- a/gcc/ada/gcc-interface/Makefile.in
+++ b/gcc/ada/gcc-interface/Makefile.in
@@ -1547,19 +1547,16 @@ ifeq ($(strip $(filter-out cygwin% mingw32% pe,$(osys))),)
# the Cygwin port has always been a CygMing frankenhybrid and it is
# a long-term project to disentangle them.
ifeq ($(strip $(filter-out cygwin%,$(osys))),)
- WIN_SO_PREFIX=cyg
LIBGNAT_TARGET_PAIRS = \
s-memory.adb<s-memory.adb \
g-socthi.ads<g-socthi.ads \
g-socthi.adb<g-socthi.adb
else
- WIN_SO_PREFIX=lib
LIBGNAT_TARGET_PAIRS = \
s-memory.adb<s-memory-mingw.adb \
g-socthi.ads<g-socthi-mingw.ads \
g-socthi.adb<g-socthi-mingw.adb
endif
- WIN_SO_INSTALL_DIR = $(bindir)
LIBGNAT_TARGET_PAIRS += \
a-dirval.adb<a-dirval-mingw.adb \
a-excpol.adb<a-excpol-abort.adb \
@@ -2500,14 +2497,11 @@ install-gnatlib: ../stamp-gnatlib-$(RTSDIR)
# for shared libraries on some targets, e.g. on HP-UX where the x
# permission is required.
# Also install the .dSYM directories if they exist (these directories
-# contain the debug information for the shared libraries on darwin),
-# and the windows import libraries if they exist.
- libpfx=$(if $(WIN_SO_PREFIX),$(WIN_SO_PREFIX),lib); \
- librtlobjdir=$(if $(WIN_SO_INSTALL_DIR),$(WIN_SO_INSTALL_DIR),$(ADA_RTL_OBJ_DIR)); \
+# contain the debug information for the shared libraries on darwin)
for file in gnat gnarl; do \
- if [ -f $(RTSDIR)/$${libpfx}$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) ]; then \
- $(INSTALL) $(RTSDIR)/$${libpfx}$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) \
- $(DESTDIR)$${librtlobjdir}; \
+ if [ -f $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) ]; then \
+ $(INSTALL) $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(DESTDIR)$(ADA_RTL_OBJ_DIR); \
fi; \
if [ -f $(RTSDIR)/lib$${file}$(soext) ]; then \
$(LN_S) lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) \
@@ -2517,10 +2511,6 @@ install-gnatlib: ../stamp-gnatlib-$(RTSDIR)
$(CP) -r $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext).dSYM \
$(DESTDIR)$(ADA_RTL_OBJ_DIR); \
fi; \
- if [ -f $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext)$(arext) ]; then \
- $(INSTALL_DATA) $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext)$(arext) \
- $(DESTDIR)$(ADA_RTL_OBJ_DIR); \
- fi; \
done
# This copy must be done preserving the date on the original file.
for file in $(RTSDIR)/*.ad?; do \
@@ -2726,18 +2716,16 @@ gnatlib-shared-win32:
cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
| sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -shared -shared-libgcc \
$(PICFLAG_FOR_TARGET) \
- -o $(WIN_SO_PREFIX)gnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
- -Wl,-out-implib,libgnat$(hyphen)$(LIBRARY_VERSION)$(soext)$(arext) \
+ -o libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
$(GNATRTL_NONTASKING_OBJS) $(LIBGNAT_OBJS) \
- $(SO_OPTS)$(WIN_SO_PREFIX)gnat$(hyphen)$(LIBRARY_VERSION)$(soext) $(MISCLIB)
+ $(SO_OPTS)libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) $(MISCLIB)
cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
| sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -shared -shared-libgcc \
$(PICFLAG_FOR_TARGET) \
- -o $(WIN_SO_PREFIX)gnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
- -Wl,-out-implib,libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext)$(arext) \
+ -o libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
$(GNATRTL_TASKING_OBJS) \
- $(SO_OPTS)$(WIN_SO_PREFIX)gnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
- $(THREADSLIB) -L. -lgnat$(hyphen)$(LIBRARY_VERSION)
+ $(SO_OPTS)libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(THREADSLIB) -Wl,libgnat$(hyphen)$(LIBRARY_VERSION)$(soext)
gnatlib-shared-darwin:
$(MAKE) $(FLAGS_TO_PASS) \
diff --git a/gcc/ada/gcc-interface/cuintp.c b/gcc/ada/gcc-interface/cuintp.c
index 31ed801e63c..e077d9ce009 100644
--- a/gcc/ada/gcc-interface/cuintp.c
+++ b/gcc/ada/gcc-interface/cuintp.c
@@ -6,7 +6,7 @@
* *
* C Implementation File *
* *
- * Copyright (C) 1992-2010, Free Software Foundation, Inc. *
+ * Copyright (C) 1992-2012, Free Software Foundation, Inc. *
* *
* GNAT is free software; you can redistribute it and/or modify it under *
* terms of the GNU General Public License as published by the Free Soft- *
@@ -177,10 +177,7 @@ UI_From_gnu (tree Input)
in a signed 64-bit integer. */
if (host_integerp (Input, 0))
return UI_From_Int (TREE_INT_CST_LOW (Input));
- else if (TREE_INT_CST_HIGH (Input) < 0
- && TYPE_UNSIGNED (gnu_type)
- && !(TREE_CODE (gnu_type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (gnu_type)))
+ else if (TREE_INT_CST_HIGH (Input) < 0 && TYPE_UNSIGNED (gnu_type))
return No_Uint;
#endif
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index 333d33b307e..97ade5e6159 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -126,15 +126,6 @@ DEF_VEC_ALLOC_O(variant_desc,heap);
static GTY ((if_marked ("tree_int_map_marked_p"),
param_is (struct tree_int_map))) htab_t annotate_value_cache;
-enum alias_set_op
-{
- ALIAS_SET_COPY,
- ALIAS_SET_SUBSET,
- ALIAS_SET_SUPERSET
-};
-
-static void relate_alias_sets (tree, tree, enum alias_set_op);
-
static bool allocatable_size_p (tree, bool);
static void prepend_one_attribute_to (struct attrib **,
enum attr_type, tree, tree, Node_Id);
@@ -144,7 +135,6 @@ static bool type_has_variable_size (tree);
static tree elaborate_expression_1 (tree, Entity_Id, tree, bool, bool);
static tree elaborate_expression_2 (tree, Entity_Id, tree, bool, bool,
unsigned int);
-static tree make_packable_type (tree, bool);
static tree gnat_to_gnu_component_type (Entity_Id, bool, bool);
static tree gnat_to_gnu_param (Entity_Id, Mechanism_Type, Entity_Id, bool,
bool *);
@@ -165,9 +155,7 @@ static VEC(variant_desc,heap) *build_variant_list (tree,
VEC(variant_desc,heap) *);
static tree validate_size (Uint, tree, Entity_Id, enum tree_code, bool, bool);
static void set_rm_size (Uint, tree, Entity_Id);
-static tree make_type_from_size (tree, tree, bool);
static unsigned int validate_alignment (Uint, Entity_Id, unsigned int);
-static unsigned int ceil_alignment (unsigned HOST_WIDE_INT);
static void check_ok_for_atomic (tree, Entity_Id, bool);
static tree create_field_decl_from (tree, tree, tree, tree, tree,
VEC(subst_pair,heap) *);
@@ -838,7 +826,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
else if (compare_tree_int (TYPE_SIZE (gnu_type), align_cap) > 0)
align = align_cap;
else
- align = ceil_alignment (tree_low_cst (TYPE_SIZE (gnu_type), 1));
+ align = ceil_pow2 (tree_low_cst (TYPE_SIZE (gnu_type), 1));
/* But make sure not to under-align the object. */
if (align <= TYPE_ALIGN (gnu_type))
@@ -921,8 +909,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
tree orig_type = gnu_type;
gnu_type = maybe_pad_type (gnu_type, gnu_size, align, gnat_entity,
- false, false, definition,
- gnu_size ? true : false);
+ false, false, definition, true);
/* If a padding record was made, declare it now since it will
never be declared otherwise. This is necessary to ensure
@@ -951,6 +938,14 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
gnu_type = TREE_TYPE (gnu_expr);
}
+ /* Or else, if the renamed object has an unconstrained type with
+ default discriminant, use the padded type. */
+ else if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_expr))
+ && TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_expr)))
+ == gnu_type
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type)))
+ gnu_type = TREE_TYPE (gnu_expr);
+
/* Case 1: If this is a constant renaming stemming from a function
call, treat it as a normal object whose initial value is what
is being renamed. RM 3.3 says that the result of evaluating a
@@ -2942,7 +2937,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
= validate_alignment (Alignment (gnat_entity), gnat_entity, 0);
else if (Is_Atomic (gnat_entity))
TYPE_ALIGN (gnu_type)
- = esize >= BITS_PER_WORD ? BITS_PER_WORD : ceil_alignment (esize);
+ = esize >= BITS_PER_WORD ? BITS_PER_WORD : ceil_pow2 (esize);
/* If a type needs strict alignment, the minimum size will be the
type size instead of the RM size (see validate_size). Cap the
alignment, lest it causes this type size to become too large. */
@@ -4163,6 +4158,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
mechanism to avoid copying too much data when it returns. */
if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_return_type)))
{
+ tree orig_type = gnu_return_type;
+
gnu_return_type
= maybe_pad_type (gnu_return_type,
max_size (TYPE_SIZE (gnu_return_type),
@@ -4172,8 +4169,11 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
/* Declare it now since it will never be declared otherwise.
This is necessary to ensure that its subtrees are properly
marked. */
- create_type_decl (TYPE_NAME (gnu_return_type), gnu_return_type,
- NULL, true, debug_info_p, gnat_entity);
+ if (gnu_return_type != orig_type
+ && !DECL_P (TYPE_NAME (gnu_return_type)))
+ create_type_decl (TYPE_NAME (gnu_return_type),
+ gnu_return_type, NULL, true,
+ debug_info_p, gnat_entity);
return_by_invisi_ref_p = true;
}
@@ -4700,7 +4700,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
if (operand_equal_p (TYPE_SIZE (gnu_type), gnu_size, 0)
&& operand_equal_p (rm_size (gnu_type), gnu_size, 0))
- gnu_size = 0;
+ gnu_size = NULL_TREE;
}
/* If the alignment hasn't already been processed and this is
@@ -4763,6 +4763,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
gnu_entity_name = DECL_NAME (gnu_entity_name);
}
+ /* Now set the RM size of the type. We cannot do it before padding
+ because we need to accept arbitrary RM sizes on integral types. */
set_rm_size (RM_Size (gnat_entity), gnu_type, gnat_entity);
/* If we are at global level, GCC will have applied variable_size to
@@ -5843,83 +5845,6 @@ elaborate_entity (Entity_Id gnat_entity)
}
}
-/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
- If this is a multi-dimensional array type, do this recursively.
-
- OP may be
- - ALIAS_SET_COPY: the new set is made a copy of the old one.
- - ALIAS_SET_SUPERSET: the new set is made a superset of the old one.
- - ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
-
-static void
-relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
-{
- /* Remove any padding from GNU_OLD_TYPE. It doesn't matter in the case
- of a one-dimensional array, since the padding has the same alias set
- as the field type, but if it's a multi-dimensional array, we need to
- see the inner types. */
- while (TREE_CODE (gnu_old_type) == RECORD_TYPE
- && (TYPE_JUSTIFIED_MODULAR_P (gnu_old_type)
- || TYPE_PADDING_P (gnu_old_type)))
- gnu_old_type = TREE_TYPE (TYPE_FIELDS (gnu_old_type));
-
- /* Unconstrained array types are deemed incomplete and would thus be given
- alias set 0. Retrieve the underlying array type. */
- if (TREE_CODE (gnu_old_type) == UNCONSTRAINED_ARRAY_TYPE)
- gnu_old_type
- = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_old_type))));
- if (TREE_CODE (gnu_new_type) == UNCONSTRAINED_ARRAY_TYPE)
- gnu_new_type
- = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_new_type))));
-
- if (TREE_CODE (gnu_new_type) == ARRAY_TYPE
- && TREE_CODE (TREE_TYPE (gnu_new_type)) == ARRAY_TYPE
- && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_new_type)))
- relate_alias_sets (TREE_TYPE (gnu_new_type), TREE_TYPE (gnu_old_type), op);
-
- switch (op)
- {
- case ALIAS_SET_COPY:
- /* The alias set shouldn't be copied between array types with different
- aliasing settings because this can break the aliasing relationship
- between the array type and its element type. */
-#ifndef ENABLE_CHECKING
- if (flag_strict_aliasing)
-#endif
- gcc_assert (!(TREE_CODE (gnu_new_type) == ARRAY_TYPE
- && TREE_CODE (gnu_old_type) == ARRAY_TYPE
- && TYPE_NONALIASED_COMPONENT (gnu_new_type)
- != TYPE_NONALIASED_COMPONENT (gnu_old_type)));
-
- TYPE_ALIAS_SET (gnu_new_type) = get_alias_set (gnu_old_type);
- break;
-
- case ALIAS_SET_SUBSET:
- case ALIAS_SET_SUPERSET:
- {
- alias_set_type old_set = get_alias_set (gnu_old_type);
- alias_set_type new_set = get_alias_set (gnu_new_type);
-
- /* Do nothing if the alias sets conflict. This ensures that we
- never call record_alias_subset several times for the same pair
- or at all for alias set 0. */
- if (!alias_sets_conflict_p (old_set, new_set))
- {
- if (op == ALIAS_SET_SUBSET)
- record_alias_subset (old_set, new_set);
- else
- record_alias_subset (new_set, old_set);
- }
- }
- break;
-
- default:
- gcc_unreachable ();
- }
-
- record_component_aliases (gnu_new_type);
-}
-
/* Return true if the size represented by GNU_SIZE can be handled by an
allocation. If STATIC_P is true, consider only what can be done with a
static allocation. */
@@ -6211,471 +6136,6 @@ elaborate_expression_2 (tree gnu_expr, Entity_Id gnat_entity, tree gnu_name,
unit_align);
}
-/* Create a record type that contains a SIZE bytes long field of TYPE with a
- starting bit position so that it is aligned to ALIGN bits, and leaving at
- least ROOM bytes free before the field. BASE_ALIGN is the alignment the
- record is guaranteed to get. */
-
-tree
-make_aligning_type (tree type, unsigned int align, tree size,
- unsigned int base_align, int room)
-{
- /* We will be crafting a record type with one field at a position set to be
- the next multiple of ALIGN past record'address + room bytes. We use a
- record placeholder to express record'address. */
- tree record_type = make_node (RECORD_TYPE);
- tree record = build0 (PLACEHOLDER_EXPR, record_type);
-
- tree record_addr_st
- = convert (sizetype, build_unary_op (ADDR_EXPR, NULL_TREE, record));
-
- /* The diagram below summarizes the shape of what we manipulate:
-
- <--------- pos ---------->
- { +------------+-------------+-----------------+
- record =>{ |############| ... | field (type) |
- { +------------+-------------+-----------------+
- |<-- room -->|<- voffset ->|<---- size ----->|
- o o
- | |
- record_addr vblock_addr
-
- Every length is in sizetype bytes there, except "pos" which has to be
- set as a bit position in the GCC tree for the record. */
- tree room_st = size_int (room);
- tree vblock_addr_st = size_binop (PLUS_EXPR, record_addr_st, room_st);
- tree voffset_st, pos, field;
-
- tree name = TYPE_NAME (type);
-
- if (TREE_CODE (name) == TYPE_DECL)
- name = DECL_NAME (name);
- name = concat_name (name, "ALIGN");
- TYPE_NAME (record_type) = name;
-
- /* Compute VOFFSET and then POS. The next byte position multiple of some
- alignment after some address is obtained by "and"ing the alignment minus
- 1 with the two's complement of the address. */
- voffset_st = size_binop (BIT_AND_EXPR,
- fold_build1 (NEGATE_EXPR, sizetype, vblock_addr_st),
- size_int ((align / BITS_PER_UNIT) - 1));
-
- /* POS = (ROOM + VOFFSET) * BIT_PER_UNIT, in bitsizetype. */
- pos = size_binop (MULT_EXPR,
- convert (bitsizetype,
- size_binop (PLUS_EXPR, room_st, voffset_st)),
- bitsize_unit_node);
-
- /* Craft the GCC record representation. We exceptionally do everything
- manually here because 1) our generic circuitry is not quite ready to
- handle the complex position/size expressions we are setting up, 2) we
- have a strong simplifying factor at hand: we know the maximum possible
- value of voffset, and 3) we have to set/reset at least the sizes in
- accordance with this maximum value anyway, as we need them to convey
- what should be "alloc"ated for this type.
-
- Use -1 as the 'addressable' indication for the field to prevent the
- creation of a bitfield. We don't need one, it would have damaging
- consequences on the alignment computation, and create_field_decl would
- make one without this special argument, for instance because of the
- complex position expression. */
- field = create_field_decl (get_identifier ("F"), type, record_type, size,
- pos, 1, -1);
- TYPE_FIELDS (record_type) = field;
-
- TYPE_ALIGN (record_type) = base_align;
- TYPE_USER_ALIGN (record_type) = 1;
-
- TYPE_SIZE (record_type)
- = size_binop (PLUS_EXPR,
- size_binop (MULT_EXPR, convert (bitsizetype, size),
- bitsize_unit_node),
- bitsize_int (align + room * BITS_PER_UNIT));
- TYPE_SIZE_UNIT (record_type)
- = size_binop (PLUS_EXPR, size,
- size_int (room + align / BITS_PER_UNIT));
-
- SET_TYPE_MODE (record_type, BLKmode);
- relate_alias_sets (record_type, type, ALIAS_SET_COPY);
-
- /* Declare it now since it will never be declared otherwise. This is
- necessary to ensure that its subtrees are properly marked. */
- create_type_decl (name, record_type, NULL, true, false, Empty);
-
- return record_type;
-}
-
-/* Return the result of rounding T up to ALIGN. */
-
-static inline unsigned HOST_WIDE_INT
-round_up_to_align (unsigned HOST_WIDE_INT t, unsigned int align)
-{
- t += align - 1;
- t /= align;
- t *= align;
- return t;
-}
-
-/* TYPE is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE that is being used
- as the field type of a packed record if IN_RECORD is true, or as the
- component type of a packed array if IN_RECORD is false. See if we can
- rewrite it either as a type that has a non-BLKmode, which we can pack
- tighter in the packed record case, or as a smaller type. If so, return
- the new type. If not, return the original type. */
-
-static tree
-make_packable_type (tree type, bool in_record)
-{
- unsigned HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE (type), 1);
- unsigned HOST_WIDE_INT new_size;
- tree new_type, old_field, field_list = NULL_TREE;
-
- /* No point in doing anything if the size is zero. */
- if (size == 0)
- return type;
-
- new_type = make_node (TREE_CODE (type));
-
- /* Copy the name and flags from the old type to that of the new.
- Note that we rely on the pointer equality created here for
- TYPE_NAME to look through conversions in various places. */
- TYPE_NAME (new_type) = TYPE_NAME (type);
- TYPE_JUSTIFIED_MODULAR_P (new_type) = TYPE_JUSTIFIED_MODULAR_P (type);
- TYPE_CONTAINS_TEMPLATE_P (new_type) = TYPE_CONTAINS_TEMPLATE_P (type);
- if (TREE_CODE (type) == RECORD_TYPE)
- TYPE_PADDING_P (new_type) = TYPE_PADDING_P (type);
-
- /* If we are in a record and have a small size, set the alignment to
- try for an integral mode. Otherwise set it to try for a smaller
- type with BLKmode. */
- if (in_record && size <= MAX_FIXED_MODE_SIZE)
- {
- TYPE_ALIGN (new_type) = ceil_alignment (size);
- new_size = round_up_to_align (size, TYPE_ALIGN (new_type));
- }
- else
- {
- unsigned HOST_WIDE_INT align;
-
- /* Do not try to shrink the size if the RM size is not constant. */
- if (TYPE_CONTAINS_TEMPLATE_P (type)
- || !host_integerp (TYPE_ADA_SIZE (type), 1))
- return type;
-
- /* Round the RM size up to a unit boundary to get the minimal size
- for a BLKmode record. Give up if it's already the size. */
- new_size = TREE_INT_CST_LOW (TYPE_ADA_SIZE (type));
- new_size = round_up_to_align (new_size, BITS_PER_UNIT);
- if (new_size == size)
- return type;
-
- align = new_size & -new_size;
- TYPE_ALIGN (new_type) = MIN (TYPE_ALIGN (type), align);
- }
-
- TYPE_USER_ALIGN (new_type) = 1;
-
- /* Now copy the fields, keeping the position and size as we don't want
- to change the layout by propagating the packedness downwards. */
- for (old_field = TYPE_FIELDS (type); old_field;
- old_field = DECL_CHAIN (old_field))
- {
- tree new_field_type = TREE_TYPE (old_field);
- tree new_field, new_size;
-
- if (RECORD_OR_UNION_TYPE_P (new_field_type)
- && !TYPE_FAT_POINTER_P (new_field_type)
- && host_integerp (TYPE_SIZE (new_field_type), 1))
- new_field_type = make_packable_type (new_field_type, true);
-
- /* However, for the last field in a not already packed record type
- that is of an aggregate type, we need to use the RM size in the
- packable version of the record type, see finish_record_type. */
- if (!DECL_CHAIN (old_field)
- && !TYPE_PACKED (type)
- && RECORD_OR_UNION_TYPE_P (new_field_type)
- && !TYPE_FAT_POINTER_P (new_field_type)
- && !TYPE_CONTAINS_TEMPLATE_P (new_field_type)
- && TYPE_ADA_SIZE (new_field_type))
- new_size = TYPE_ADA_SIZE (new_field_type);
- else
- new_size = DECL_SIZE (old_field);
-
- new_field
- = create_field_decl (DECL_NAME (old_field), new_field_type, new_type,
- new_size, bit_position (old_field),
- TYPE_PACKED (type),
- !DECL_NONADDRESSABLE_P (old_field));
-
- DECL_INTERNAL_P (new_field) = DECL_INTERNAL_P (old_field);
- SET_DECL_ORIGINAL_FIELD_TO_FIELD (new_field, old_field);
- if (TREE_CODE (new_type) == QUAL_UNION_TYPE)
- DECL_QUALIFIER (new_field) = DECL_QUALIFIER (old_field);
-
- DECL_CHAIN (new_field) = field_list;
- field_list = new_field;
- }
-
- finish_record_type (new_type, nreverse (field_list), 2, false);
- relate_alias_sets (new_type, type, ALIAS_SET_COPY);
- SET_DECL_PARALLEL_TYPE (TYPE_STUB_DECL (new_type),
- DECL_PARALLEL_TYPE (TYPE_STUB_DECL (type)));
-
- /* If this is a padding record, we never want to make the size smaller
- than what was specified. For QUAL_UNION_TYPE, also copy the size. */
- if (TYPE_IS_PADDING_P (type) || TREE_CODE (type) == QUAL_UNION_TYPE)
- {
- TYPE_SIZE (new_type) = TYPE_SIZE (type);
- TYPE_SIZE_UNIT (new_type) = TYPE_SIZE_UNIT (type);
- new_size = size;
- }
- else
- {
- TYPE_SIZE (new_type) = bitsize_int (new_size);
- TYPE_SIZE_UNIT (new_type)
- = size_int ((new_size + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
- }
-
- if (!TYPE_CONTAINS_TEMPLATE_P (type))
- SET_TYPE_ADA_SIZE (new_type, TYPE_ADA_SIZE (type));
-
- compute_record_mode (new_type);
-
- /* Try harder to get a packable type if necessary, for example
- in case the record itself contains a BLKmode field. */
- if (in_record && TYPE_MODE (new_type) == BLKmode)
- SET_TYPE_MODE (new_type,
- mode_for_size_tree (TYPE_SIZE (new_type), MODE_INT, 1));
-
- /* If neither the mode nor the size has shrunk, return the old type. */
- if (TYPE_MODE (new_type) == BLKmode && new_size >= size)
- return type;
-
- return new_type;
-}
-
-/* Ensure that TYPE has SIZE and ALIGN. Make and return a new padded type
- if needed. We have already verified that SIZE and TYPE are large enough.
- GNAT_ENTITY is used to name the resulting record and to issue a warning.
- IS_COMPONENT_TYPE is true if this is being done for the component type
- of an array. IS_USER_TYPE is true if we must complete the original type.
- DEFINITION is true if this type is being defined. SAME_RM_SIZE is true
- if the RM size of the resulting type is to be set to SIZE too; otherwise,
- it's set to the RM size of the original type. */
-
-tree
-maybe_pad_type (tree type, tree size, unsigned int align,
- Entity_Id gnat_entity, bool is_component_type,
- bool is_user_type, bool definition, bool same_rm_size)
-{
- tree orig_rm_size = same_rm_size ? NULL_TREE : rm_size (type);
- tree orig_size = TYPE_SIZE (type);
- tree record, field;
-
- /* If TYPE is a padded type, see if it agrees with any size and alignment
- we were given. If so, return the original type. Otherwise, strip
- off the padding, since we will either be returning the inner type
- or repadding it. If no size or alignment is specified, use that of
- the original padded type. */
- if (TYPE_IS_PADDING_P (type))
- {
- if ((!size
- || operand_equal_p (round_up (size,
- MAX (align, TYPE_ALIGN (type))),
- round_up (TYPE_SIZE (type),
- MAX (align, TYPE_ALIGN (type))),
- 0))
- && (align == 0 || align == TYPE_ALIGN (type)))
- return type;
-
- if (!size)
- size = TYPE_SIZE (type);
- if (align == 0)
- align = TYPE_ALIGN (type);
-
- type = TREE_TYPE (TYPE_FIELDS (type));
- orig_size = TYPE_SIZE (type);
- }
-
- /* If the size is either not being changed or is being made smaller (which
- is not done here and is only valid for bitfields anyway), show the size
- isn't changing. Likewise, clear the alignment if it isn't being
- changed. Then return if we aren't doing anything. */
- if (size
- && (operand_equal_p (size, orig_size, 0)
- || (TREE_CODE (orig_size) == INTEGER_CST
- && tree_int_cst_lt (size, orig_size))))
- size = NULL_TREE;
-
- if (align == TYPE_ALIGN (type))
- align = 0;
-
- if (align == 0 && !size)
- return type;
-
- /* If requested, complete the original type and give it a name. */
- if (is_user_type)
- create_type_decl (get_entity_name (gnat_entity), type,
- NULL, !Comes_From_Source (gnat_entity),
- !(TYPE_NAME (type)
- && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
- && DECL_IGNORED_P (TYPE_NAME (type))),
- gnat_entity);
-
- /* We used to modify the record in place in some cases, but that could
- generate incorrect debugging information. So make a new record
- type and name. */
- record = make_node (RECORD_TYPE);
- TYPE_PADDING_P (record) = 1;
-
- if (Present (gnat_entity))
- TYPE_NAME (record) = create_concat_name (gnat_entity, "PAD");
-
- TYPE_VOLATILE (record)
- = Present (gnat_entity) && Treat_As_Volatile (gnat_entity);
-
- TYPE_ALIGN (record) = align;
- TYPE_SIZE (record) = size ? size : orig_size;
- TYPE_SIZE_UNIT (record)
- = convert (sizetype,
- size_binop (CEIL_DIV_EXPR, TYPE_SIZE (record),
- bitsize_unit_node));
-
- /* If we are changing the alignment and the input type is a record with
- BLKmode and a small constant size, try to make a form that has an
- integral mode. This might allow the padding record to also have an
- integral mode, which will be much more efficient. There is no point
- in doing so if a size is specified unless it is also a small constant
- size and it is incorrect to do so if we cannot guarantee that the mode
- will be naturally aligned since the field must always be addressable.
-
- ??? This might not always be a win when done for a stand-alone object:
- since the nominal and the effective type of the object will now have
- different modes, a VIEW_CONVERT_EXPR will be required for converting
- between them and it might be hard to overcome afterwards, including
- at the RTL level when the stand-alone object is accessed as a whole. */
- if (align != 0
- && RECORD_OR_UNION_TYPE_P (type)
- && TYPE_MODE (type) == BLKmode
- && !TYPE_BY_REFERENCE_P (type)
- && TREE_CODE (orig_size) == INTEGER_CST
- && !TREE_OVERFLOW (orig_size)
- && compare_tree_int (orig_size, MAX_FIXED_MODE_SIZE) <= 0
- && (!size
- || (TREE_CODE (size) == INTEGER_CST
- && compare_tree_int (size, MAX_FIXED_MODE_SIZE) <= 0)))
- {
- tree packable_type = make_packable_type (type, true);
- if (TYPE_MODE (packable_type) != BLKmode
- && align >= TYPE_ALIGN (packable_type))
- type = packable_type;
- }
-
- /* Now create the field with the original size. */
- field = create_field_decl (get_identifier ("F"), type, record, orig_size,
- bitsize_zero_node, 0, 1);
- DECL_INTERNAL_P (field) = 1;
-
- /* Do not emit debug info until after the auxiliary record is built. */
- finish_record_type (record, field, 1, false);
-
- /* Set the same size for its RM size if requested; otherwise reuse
- the RM size of the original type. */
- SET_TYPE_ADA_SIZE (record, same_rm_size ? size : orig_rm_size);
-
- /* Unless debugging information isn't being written for the input type,
- write a record that shows what we are a subtype of and also make a
- variable that indicates our size, if still variable. */
- if (TREE_CODE (orig_size) != INTEGER_CST
- && TYPE_NAME (record)
- && TYPE_NAME (type)
- && !(TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
- && DECL_IGNORED_P (TYPE_NAME (type))))
- {
- tree marker = make_node (RECORD_TYPE);
- tree name = TYPE_NAME (record);
- tree orig_name = TYPE_NAME (type);
-
- if (TREE_CODE (name) == TYPE_DECL)
- name = DECL_NAME (name);
-
- if (TREE_CODE (orig_name) == TYPE_DECL)
- orig_name = DECL_NAME (orig_name);
-
- TYPE_NAME (marker) = concat_name (name, "XVS");
- finish_record_type (marker,
- create_field_decl (orig_name,
- build_reference_type (type),
- marker, NULL_TREE, NULL_TREE,
- 0, 0),
- 0, true);
-
- add_parallel_type (record, marker);
-
- if (definition && size && TREE_CODE (size) != INTEGER_CST)
- TYPE_SIZE_UNIT (marker)
- = create_var_decl (concat_name (name, "XVZ"), NULL_TREE, sizetype,
- TYPE_SIZE_UNIT (record), false, false, false,
- false, NULL, gnat_entity);
- }
-
- rest_of_record_type_compilation (record);
-
- /* If the size was widened explicitly, maybe give a warning. Take the
- original size as the maximum size of the input if there was an
- unconstrained record involved and round it up to the specified alignment,
- if one was specified. But don't do it if we are just annotating types
- and the type is tagged, since tagged types aren't fully laid out in this
- mode. */
- if (CONTAINS_PLACEHOLDER_P (orig_size))
- orig_size = max_size (orig_size, true);
-
- if (align)
- orig_size = round_up (orig_size, align);
-
- if (Present (gnat_entity)
- && size
- && TREE_CODE (size) != MAX_EXPR
- && TREE_CODE (size) != COND_EXPR
- && !operand_equal_p (size, orig_size, 0)
- && !(TREE_CODE (size) == INTEGER_CST
- && TREE_CODE (orig_size) == INTEGER_CST
- && (TREE_OVERFLOW (size)
- || TREE_OVERFLOW (orig_size)
- || tree_int_cst_lt (size, orig_size)))
- && !(type_annotate_only && Is_Tagged_Type (Etype (gnat_entity))))
- {
- Node_Id gnat_error_node = Empty;
-
- if (Is_Packed_Array_Type (gnat_entity))
- gnat_entity = Original_Array_Type (gnat_entity);
-
- if ((Ekind (gnat_entity) == E_Component
- || Ekind (gnat_entity) == E_Discriminant)
- && Present (Component_Clause (gnat_entity)))
- gnat_error_node = Last_Bit (Component_Clause (gnat_entity));
- else if (Present (Size_Clause (gnat_entity)))
- gnat_error_node = Expression (Size_Clause (gnat_entity));
-
- /* Generate message only for entities that come from source, since
- if we have an entity created by expansion, the message will be
- generated for some other corresponding source entity. */
- if (Comes_From_Source (gnat_entity))
- {
- if (Present (gnat_error_node))
- post_error_ne_tree ("{^ }bits of & unused?",
- gnat_error_node, gnat_entity,
- size_diffop (size, orig_size));
- else if (is_component_type)
- post_error_ne_tree ("component of& padded{ by ^ bits}?",
- gnat_entity, gnat_entity,
- size_diffop (size, orig_size));
- }
- }
-
- return record;
-}
-
/* Given a GNU tree and a GNAT list of choices, generate an expression to test
the value passed against the list of choices. */
@@ -8245,95 +7705,6 @@ set_rm_size (Uint uint_size, tree gnu_type, Entity_Id gnat_entity)
SET_TYPE_ADA_SIZE (gnu_type, size);
}
-/* Given a type TYPE, return a new type whose size is appropriate for SIZE.
- If TYPE is the best type, return it. Otherwise, make a new type. We
- only support new integral and pointer types. FOR_BIASED is true if
- we are making a biased type. */
-
-static tree
-make_type_from_size (tree type, tree size_tree, bool for_biased)
-{
- unsigned HOST_WIDE_INT size;
- bool biased_p;
- tree new_type;
-
- /* If size indicates an error, just return TYPE to avoid propagating
- the error. Likewise if it's too large to represent. */
- if (!size_tree || !host_integerp (size_tree, 1))
- return type;
-
- size = tree_low_cst (size_tree, 1);
-
- switch (TREE_CODE (type))
- {
- case INTEGER_TYPE:
- case ENUMERAL_TYPE:
- case BOOLEAN_TYPE:
- biased_p = (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_BIASED_REPRESENTATION_P (type));
-
- /* Integer types with precision 0 are forbidden. */
- if (size == 0)
- size = 1;
-
- /* Only do something if the type is not a packed array type and
- doesn't already have the proper size. */
- if (TYPE_IS_PACKED_ARRAY_TYPE_P (type)
- || (TYPE_PRECISION (type) == size && biased_p == for_biased))
- break;
-
- biased_p |= for_biased;
- if (size > LONG_LONG_TYPE_SIZE)
- size = LONG_LONG_TYPE_SIZE;
-
- if (TYPE_UNSIGNED (type) || biased_p)
- new_type = make_unsigned_type (size);
- else
- new_type = make_signed_type (size);
- TREE_TYPE (new_type) = TREE_TYPE (type) ? TREE_TYPE (type) : type;
- SET_TYPE_RM_MIN_VALUE (new_type,
- convert (TREE_TYPE (new_type),
- TYPE_MIN_VALUE (type)));
- SET_TYPE_RM_MAX_VALUE (new_type,
- convert (TREE_TYPE (new_type),
- TYPE_MAX_VALUE (type)));
- /* Copy the name to show that it's essentially the same type and
- not a subrange type. */
- TYPE_NAME (new_type) = TYPE_NAME (type);
- TYPE_BIASED_REPRESENTATION_P (new_type) = biased_p;
- SET_TYPE_RM_SIZE (new_type, bitsize_int (size));
- return new_type;
-
- case RECORD_TYPE:
- /* Do something if this is a fat pointer, in which case we
- may need to return the thin pointer. */
- if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2)
- {
- enum machine_mode p_mode = mode_for_size (size, MODE_INT, 0);
- if (!targetm.valid_pointer_mode (p_mode))
- p_mode = ptr_mode;
- return
- build_pointer_type_for_mode
- (TYPE_OBJECT_RECORD_TYPE (TYPE_UNCONSTRAINED_ARRAY (type)),
- p_mode, 0);
- }
- break;
-
- case POINTER_TYPE:
- /* Only do something if this is a thin pointer, in which case we
- may need to return the fat pointer. */
- if (TYPE_IS_THIN_POINTER_P (type) && size >= POINTER_SIZE * 2)
- return
- build_pointer_type (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (type)));
- break;
-
- default:
- break;
- }
-
- return type;
-}
-
/* ALIGNMENT is a Uint giving the alignment specified for GNAT_ENTITY,
a type or object whose present alignment is ALIGN. If this alignment is
valid, return it. Otherwise, give an error and return ALIGN. */
@@ -8426,14 +7797,6 @@ validate_alignment (Uint alignment, Entity_Id gnat_entity, unsigned int align)
return align;
}
-
-/* Return the smallest alignment not less than SIZE. */
-
-static unsigned int
-ceil_alignment (unsigned HOST_WIDE_INT size)
-{
- return (unsigned int) 1 << (floor_log2 (size - 1) + 1);
-}
/* Verify that OBJECT, a type or decl, is something we can implement
atomically. If not, give an error for GNAT_ENTITY. COMP_P is true
diff --git a/gcc/ada/gcc-interface/gigi.h b/gcc/ada/gcc-interface/gigi.h
index fb1106f793e..e2aac80b665 100644
--- a/gcc/ada/gcc-interface/gigi.h
+++ b/gcc/ada/gcc-interface/gigi.h
@@ -123,18 +123,48 @@ extern tree get_minimal_subprog_decl (Entity_Id gnat_entity);
extern tree make_aligning_type (tree type, unsigned int align, tree size,
unsigned int base_align, int room);
+/* TYPE is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE that is being used
+ as the field type of a packed record if IN_RECORD is true, or as the
+ component type of a packed array if IN_RECORD is false. See if we can
+ rewrite it either as a type that has a non-BLKmode, which we can pack
+ tighter in the packed record case, or as a smaller type. If so, return
+ the new type. If not, return the original type. */
+extern tree make_packable_type (tree type, bool in_record);
+
+/* Given a type TYPE, return a new type whose size is appropriate for SIZE.
+ If TYPE is the best type, return it. Otherwise, make a new type. We
+ only support new integral and pointer types. FOR_BIASED is true if
+ we are making a biased type. */
+extern tree make_type_from_size (tree type, tree size_tree, bool for_biased);
+
/* Ensure that TYPE has SIZE and ALIGN. Make and return a new padded type
if needed. We have already verified that SIZE and TYPE are large enough.
GNAT_ENTITY is used to name the resulting record and to issue a warning.
- IS_COMPONENT_TYPE is true if this is being done for the component type
- of an array. IS_USER_TYPE is true if we must complete the original type.
- DEFINITION is true if this type is being defined. SAME_RM_SIZE is true
- if the RM size of the resulting type is to be set to SIZE too; otherwise,
- it's set to the RM size of the original type. */
+ IS_COMPONENT_TYPE is true if this is being done for the component type of
+ an array. IS_USER_TYPE is true if the original type needs to be completed.
+ DEFINITION is true if this type is being defined. SET_RM_SIZE is true if
+ the RM size of the resulting type is to be set to SIZE too. */
extern tree maybe_pad_type (tree type, tree size, unsigned int align,
Entity_Id gnat_entity, bool is_component_type,
bool is_user_type, bool definition,
- bool same_rm_size);
+ bool set_rm_size);
+
+enum alias_set_op
+{
+ ALIAS_SET_COPY,
+ ALIAS_SET_SUBSET,
+ ALIAS_SET_SUPERSET
+};
+
+/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
+ If this is a multi-dimensional array type, do this recursively.
+
+ OP may be
+ - ALIAS_SET_COPY: the new set is made a copy of the old one.
+ - ALIAS_SET_SUPERSET: the new set is made a superset of the old one.
+ - ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
+extern void relate_alias_sets (tree gnu_new_type, tree gnu_old_type,
+ enum alias_set_op op);
/* Given a GNU tree and a GNAT list of choices, generate an expression to test
the value passed against the list of choices. */
@@ -497,11 +527,11 @@ extern tree convert_to_index_type (tree expr);
/* Routines created solely for the tree translator's sake. Their prototypes
can be changed as desired. */
-/* Initialize the association of GNAT nodes to GCC trees. */
-extern void init_gnat_to_gnu (void);
+/* Initialize data structures of the utils.c module. */
+extern void init_gnat_utils (void);
-/* Destroy the association of GNAT nodes to GCC trees. */
-extern void destroy_gnat_to_gnu (void);
+/* Destroy data structures of the utils.c module. */
+extern void destroy_gnat_utils (void);
/* GNAT_ENTITY is a GNAT tree node for a defining identifier.
GNU_DECL is the GCC tree which is to be associated with
@@ -519,12 +549,6 @@ extern tree get_gnu_tree (Entity_Id gnat_entity);
/* Return nonzero if a GCC tree has been associated with GNAT_ENTITY. */
extern bool present_gnu_tree (Entity_Id gnat_entity);
-/* Initialize the association of GNAT nodes to GCC trees as dummies. */
-extern void init_dummy_type (void);
-
-/* Destroy the association of GNAT nodes to GCC trees as dummies. */
-extern void destroy_dummy_type (void);
-
/* Make a dummy type corresponding to GNAT_TYPE. */
extern tree make_dummy_type (Entity_Id gnat_type);
@@ -1008,3 +1032,9 @@ extern void enumerate_modes (void (*f) (const char *, int, int, int, int, int,
/* Convenient shortcuts. */
#define VECTOR_TYPE_P(TYPE) (TREE_CODE (TYPE) == VECTOR_TYPE)
+
+static inline unsigned HOST_WIDE_INT
+ceil_pow2 (unsigned HOST_WIDE_INT x)
+{
+ return (unsigned HOST_WIDE_INT) 1 << (floor_log2 (x - 1) + 1);
+}
diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c
index 08ca5bb92b3..974827a787f 100644
--- a/gcc/ada/gcc-interface/misc.c
+++ b/gcc/ada/gcc-interface/misc.c
@@ -235,6 +235,10 @@ gnat_post_options (const char **pfilename ATTRIBUTE_UNUSED)
/* No psABI change warnings for Ada. */
warn_psabi = 0;
+ /* No caret by default for Ada. */
+ if (!global_options_set.x_flag_diagnostics_show_caret)
+ global_dc->show_caret = false;
+
optimize = global_options.x_optimize;
optimize_size = global_options.x_optimize_size;
flag_compare_debug = global_options.x_flag_compare_debug;
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index 3698dcaf2a4..dd1669b7977 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -338,8 +338,7 @@ gigi (Node_Id gnat_root, int max_gnat_node, int number_name ATTRIBUTE_UNUSED,
/* Initialize ourselves. */
init_code_table ();
- init_gnat_to_gnu ();
- init_dummy_type ();
+ init_gnat_utils ();
/* If we are just annotating types, give VOID_TYPE zero sizes to avoid
errors. */
@@ -503,7 +502,12 @@ gigi (Node_Id gnat_root, int max_gnat_node, int number_name ATTRIBUTE_UNUSED,
= create_subprog_decl (get_identifier ("__gnat_reraise_zcx"), NULL_TREE,
ftype, NULL_TREE, false, true, true, true, NULL,
Empty);
+ /* Indicate that these never return. */
DECL_IGNORED_P (reraise_zcx_decl) = 1;
+ TREE_THIS_VOLATILE (reraise_zcx_decl) = 1;
+ TREE_SIDE_EFFECTS (reraise_zcx_decl) = 1;
+ TREE_TYPE (reraise_zcx_decl)
+ = build_qualified_type (TREE_TYPE (reraise_zcx_decl), TYPE_QUAL_VOLATILE);
/* If in no exception handlers mode, all raise statements are redirected to
__gnat_last_chance_handler. No need to redefine raise_nodefer_decl since
@@ -551,6 +555,7 @@ gigi (Node_Id gnat_root, int max_gnat_node, int number_name ATTRIBUTE_UNUSED,
build_function_type_list (build_pointer_type (except_type_node),
NULL_TREE),
NULL_TREE, false, true, true, true, NULL, Empty);
+ DECL_IGNORED_P (get_excptr_decl) = 1;
raise_nodefer_decl
= create_subprog_decl
@@ -685,8 +690,7 @@ gigi (Node_Id gnat_root, int max_gnat_node, int number_name ATTRIBUTE_UNUSED,
}
/* Destroy ourselves. */
- destroy_gnat_to_gnu ();
- destroy_dummy_type ();
+ destroy_gnat_utils ();
/* We cannot track the location of errors past this point. */
error_gnat_node = Empty;
@@ -1501,34 +1505,25 @@ Attribute_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, int attribute)
gnu_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
}
- /* If we're looking for the size of a field, return the field size.
- Otherwise, if the prefix is an object, or if we're looking for
- 'Object_Size or 'Max_Size_In_Storage_Elements, the result is the
- GCC size of the type. Otherwise, it is the RM size of the type. */
+ /* If we're looking for the size of a field, return the field size. */
if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
gnu_result = DECL_SIZE (TREE_OPERAND (gnu_prefix, 1));
- else if (TREE_CODE (gnu_prefix) != TYPE_DECL
+
+ /* Otherwise, if the prefix is an object, or if we are looking for
+ 'Object_Size or 'Max_Size_In_Storage_Elements, the result is the
+ GCC size of the type. We make an exception for padded objects,
+ as we do not take into account alignment promotions for the size.
+ This is in keeping with the object case of gnat_to_gnu_entity. */
+ else if ((TREE_CODE (gnu_prefix) != TYPE_DECL
+ && !(TYPE_IS_PADDING_P (gnu_type)
+ && TREE_CODE (gnu_expr) == COMPONENT_REF))
|| attribute == Attr_Object_Size
|| attribute == Attr_Max_Size_In_Storage_Elements)
{
- /* If the prefix is an object of a padded type, the GCC size isn't
- relevant to the programmer. Normally what we want is the RM size,
- which was set from the specified size, but if it was not set, we
- want the size of the field. Using the MAX of those two produces
- the right result in all cases. Don't use the size of the field
- if it's self-referential, since that's never what's wanted. */
- if (TREE_CODE (gnu_prefix) != TYPE_DECL
- && TYPE_IS_PADDING_P (gnu_type)
- && TREE_CODE (gnu_expr) == COMPONENT_REF)
- {
- gnu_result = rm_size (gnu_type);
- if (!CONTAINS_PLACEHOLDER_P
- (DECL_SIZE (TREE_OPERAND (gnu_expr, 1))))
- gnu_result
- = size_binop (MAX_EXPR, gnu_result,
- DECL_SIZE (TREE_OPERAND (gnu_expr, 1)));
- }
- else if (Nkind (Prefix (gnat_node)) == N_Explicit_Dereference)
+ /* If this is a dereference and we have a special dynamic constrained
+ subtype on the prefix, use it to compute the size; otherwise, use
+ the designated subtype. */
+ if (Nkind (Prefix (gnat_node)) == N_Explicit_Dereference)
{
Node_Id gnat_deref = Prefix (gnat_node);
Node_Id gnat_actual_subtype
@@ -1547,12 +1542,12 @@ Attribute_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, int attribute)
get_identifier ("SIZE"),
false);
}
-
- gnu_result = TYPE_SIZE (gnu_type);
}
- else
- gnu_result = TYPE_SIZE (gnu_type);
+
+ gnu_result = TYPE_SIZE (gnu_type);
}
+
+ /* Otherwise, the result is the RM size of the type. */
else
gnu_result = rm_size (gnu_type);
@@ -2574,13 +2569,19 @@ Loop_Statement_to_gnu (Node_Id gnat_node)
i++)
{
tree low_ok
- = build_binary_op (GE_EXPR, boolean_type_node,
- convert (rci->type, gnu_low),
- rci->low_bound);
+ = rci->low_bound
+ ? build_binary_op (GE_EXPR, boolean_type_node,
+ convert (rci->type, gnu_low),
+ rci->low_bound)
+ : boolean_true_node;
+
tree high_ok
- = build_binary_op (LE_EXPR, boolean_type_node,
- convert (rci->type, gnu_high),
- rci->high_bound);
+ = rci->high_bound
+ ? build_binary_op (LE_EXPR, boolean_type_node,
+ convert (rci->type, gnu_high),
+ rci->high_bound)
+ : boolean_true_node;
+
tree range_ok
= build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
low_ok, high_ok);
@@ -2805,7 +2806,7 @@ finalize_nrv_r (tree *tp, int *walk_subtrees, void *data)
tree ret_val = TREE_OPERAND (TREE_OPERAND (t, 0), 1), init_expr;
/* If this is the temporary created for a return value with variable
- size in call_to_gnu, we replace the RHS with the init expression. */
+ size in Call_to_gnu, we replace the RHS with the init expression. */
if (TREE_CODE (ret_val) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (ret_val, 0)) == INIT_EXPR
&& TREE_OPERAND (TREE_OPERAND (ret_val, 0), 0)
@@ -3133,7 +3134,7 @@ build_return_expr (tree ret_obj, tree ret_val)
&& aggregate_value_p (operation_type, current_function_decl))
{
/* Recognize the temporary created for a return value with variable
- size in call_to_gnu. We want to eliminate it if possible. */
+ size in Call_to_gnu. We want to eliminate it if possible. */
if (TREE_CODE (ret_val) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (ret_val, 0)) == INIT_EXPR
&& TREE_OPERAND (TREE_OPERAND (ret_val, 0), 0)
@@ -3594,7 +3595,7 @@ create_init_temporary (const char *prefix, tree gnu_init, tree *gnu_init_stmt,
requires atomic synchronization. */
static tree
-call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target,
+Call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target,
bool atomic_sync)
{
const bool function_call = (Nkind (gnat_node) == N_Function_Call);
@@ -4762,6 +4763,134 @@ Compilation_Unit_to_gnu (Node_Id gnat_node)
invalidate_global_renaming_pointers ();
}
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Raise_xxx_Error,
+ to a GCC tree, which is returned. GNU_RESULT_TYPE_P is a pointer to where
+ we should place the result type. LABEL_P is true if there is a label to
+ branch to for the exception. */
+
+static tree
+Raise_Error_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p)
+{
+ const Node_Kind kind = Nkind (gnat_node);
+ const int reason = UI_To_Int (Reason (gnat_node));
+ const Node_Id gnat_cond = Condition (gnat_node);
+ const bool with_extra_info
+ = Exception_Extra_Info
+ && !No_Exception_Handlers_Set ()
+ && !get_exception_label (kind);
+ tree gnu_result = NULL_TREE, gnu_cond = NULL_TREE;
+
+ *gnu_result_type_p = get_unpadded_type (Etype (gnat_node));
+
+ switch (reason)
+ {
+ case CE_Access_Check_Failed:
+ if (with_extra_info)
+ gnu_result = build_call_raise_column (reason, gnat_node);
+ break;
+
+ case CE_Index_Check_Failed:
+ case CE_Range_Check_Failed:
+ case CE_Invalid_Data:
+ if (Present (gnat_cond) && Nkind (gnat_cond) == N_Op_Not)
+ {
+ Node_Id gnat_range, gnat_index, gnat_type;
+ tree gnu_index, gnu_low_bound, gnu_high_bound;
+ struct range_check_info_d *rci;
+
+ switch (Nkind (Right_Opnd (gnat_cond)))
+ {
+ case N_In:
+ gnat_range = Right_Opnd (Right_Opnd (gnat_cond));
+ gcc_assert (Nkind (gnat_range) == N_Range);
+ gnu_low_bound = gnat_to_gnu (Low_Bound (gnat_range));
+ gnu_high_bound = gnat_to_gnu (High_Bound (gnat_range));
+ break;
+
+ case N_Op_Ge:
+ gnu_low_bound = gnat_to_gnu (Right_Opnd (Right_Opnd (gnat_cond)));
+ gnu_high_bound = NULL_TREE;
+ break;
+
+ case N_Op_Le:
+ gnu_low_bound = NULL_TREE;
+ gnu_high_bound = gnat_to_gnu (Right_Opnd (Right_Opnd (gnat_cond)));
+ break;
+
+ default:
+ goto common;
+ }
+
+ gnat_index = Left_Opnd (Right_Opnd (gnat_cond));
+ gnat_type = Etype (gnat_index);
+ gnu_index = gnat_to_gnu (gnat_index);
+
+ if (with_extra_info
+ && gnu_low_bound
+ && gnu_high_bound
+ && Known_Esize (gnat_type)
+ && UI_To_Int (Esize (gnat_type)) <= 32)
+ gnu_result
+ = build_call_raise_range (reason, gnat_node, gnu_index,
+ gnu_low_bound, gnu_high_bound);
+
+ /* If loop unswitching is enabled, we try to compute invariant
+ conditions for checks applied to iteration variables, i.e.
+ conditions that are both independent of the variable and
+ necessary in order for the check to fail in the course of
+ some iteration, and prepend them to the original condition
+ of the checks. This will make it possible later for the
+ loop unswitching pass to replace the loop with two loops,
+ one of which has the checks eliminated and the other has
+ the original checks reinstated, and a run time selection.
+ The former loop will be suitable for vectorization. */
+ if (flag_unswitch_loops
+ && (!gnu_low_bound
+ || (gnu_low_bound = gnat_invariant_expr (gnu_low_bound)))
+ && (!gnu_high_bound
+ || (gnu_high_bound = gnat_invariant_expr (gnu_high_bound)))
+ && (rci = push_range_check_info (gnu_index)))
+ {
+ rci->low_bound = gnu_low_bound;
+ rci->high_bound = gnu_high_bound;
+ rci->type = gnat_to_gnu_type (gnat_type);
+ rci->invariant_cond = build1 (SAVE_EXPR, boolean_type_node,
+ boolean_true_node);
+ gnu_cond = build_binary_op (TRUTH_ANDIF_EXPR,
+ boolean_type_node,
+ rci->invariant_cond,
+ gnat_to_gnu (gnat_cond));
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+common:
+ if (!gnu_result)
+ gnu_result = build_call_raise (reason, gnat_node, kind);
+ set_expr_location_from_node (gnu_result, gnat_node);
+
+ /* If the type is VOID, this is a statement, so we need to generate the code
+ for the call. Handle a condition, if there is one. */
+ if (VOID_TYPE_P (*gnu_result_type_p))
+ {
+ if (Present (gnat_cond))
+ {
+ if (!gnu_cond)
+ gnu_cond = gnat_to_gnu (gnat_cond);
+ gnu_result = build3 (COND_EXPR, void_type_node, gnu_cond, gnu_result,
+ alloc_stmt_list ());
+ }
+ }
+ else
+ gnu_result = build1 (NULL_EXPR, *gnu_result_type_p, gnu_result);
+
+ return gnu_result;
+}
+
/* Return true if GNAT_NODE is on the LHS of an assignment or an actual
parameter of a call. */
@@ -5960,7 +6089,7 @@ gnat_to_gnu (Node_Id gnat_node)
N_Raise_Storage_Error);
else if (Nkind (Expression (gnat_node)) == N_Function_Call)
gnu_result
- = call_to_gnu (Expression (gnat_node), &gnu_result_type, gnu_lhs,
+ = Call_to_gnu (Expression (gnat_node), &gnu_result_type, gnu_lhs,
atomic_sync_required_p (Name (gnat_node)));
else
{
@@ -6249,7 +6378,7 @@ gnat_to_gnu (Node_Id gnat_node)
case N_Function_Call:
case N_Procedure_Call_Statement:
- gnu_result = call_to_gnu (gnat_node, &gnu_result_type, NULL_TREE, false);
+ gnu_result = Call_to_gnu (gnat_node, &gnu_result_type, NULL_TREE, false);
break;
/************************/
@@ -6672,105 +6801,10 @@ gnat_to_gnu (Node_Id gnat_node)
case N_Raise_Constraint_Error:
case N_Raise_Program_Error:
case N_Raise_Storage_Error:
- {
- const int reason = UI_To_Int (Reason (gnat_node));
- const Node_Id gnat_cond = Condition (gnat_node);
- const bool with_extra_info = Exception_Extra_Info
- && !No_Exception_Handlers_Set ()
- && !get_exception_label (kind);
- tree gnu_cond = NULL_TREE;
-
- if (type_annotate_only)
- {
- gnu_result = alloc_stmt_list ();
- break;
- }
-
- gnu_result_type = get_unpadded_type (Etype (gnat_node));
-
- switch (reason)
- {
- case CE_Access_Check_Failed:
- if (with_extra_info)
- gnu_result = build_call_raise_column (reason, gnat_node);
- break;
-
- case CE_Index_Check_Failed:
- case CE_Range_Check_Failed:
- case CE_Invalid_Data:
- if (Present (gnat_cond)
- && Nkind (gnat_cond) == N_Op_Not
- && Nkind (Right_Opnd (gnat_cond)) == N_In
- && Nkind (Right_Opnd (Right_Opnd (gnat_cond))) == N_Range)
- {
- Node_Id gnat_index = Left_Opnd (Right_Opnd (gnat_cond));
- Node_Id gnat_type = Etype (gnat_index);
- Node_Id gnat_range = Right_Opnd (Right_Opnd (gnat_cond));
- tree gnu_index = gnat_to_gnu (gnat_index);
- tree gnu_low_bound = gnat_to_gnu (Low_Bound (gnat_range));
- tree gnu_high_bound = gnat_to_gnu (High_Bound (gnat_range));
- struct range_check_info_d *rci;
-
- if (with_extra_info
- && Known_Esize (gnat_type)
- && UI_To_Int (Esize (gnat_type)) <= 32)
- gnu_result
- = build_call_raise_range (reason, gnat_node, gnu_index,
- gnu_low_bound, gnu_high_bound);
-
- /* If loop unswitching is enabled, we try to compute invariant
- conditions for checks applied to iteration variables, i.e.
- conditions that are both independent of the variable and
- necessary in order for the check to fail in the course of
- some iteration, and prepend them to the original condition
- of the checks. This will make it possible later for the
- loop unswitching pass to replace the loop with two loops,
- one of which has the checks eliminated and the other has
- the original checks reinstated, and a run time selection.
- The former loop will be suitable for vectorization. */
- if (flag_unswitch_loops
- && (gnu_low_bound = gnat_invariant_expr (gnu_low_bound))
- && (gnu_high_bound = gnat_invariant_expr (gnu_high_bound))
- && (rci = push_range_check_info (gnu_index)))
- {
- rci->low_bound = gnu_low_bound;
- rci->high_bound = gnu_high_bound;
- rci->type = gnat_to_gnu_type (gnat_type);
- rci->invariant_cond = build1 (SAVE_EXPR, boolean_type_node,
- boolean_true_node);
- gnu_cond = build_binary_op (TRUTH_ANDIF_EXPR,
- boolean_type_node,
- rci->invariant_cond,
- gnat_to_gnu (gnat_cond));
- }
- }
- break;
-
- default:
- break;
- }
-
- if (gnu_result == error_mark_node)
- gnu_result = build_call_raise (reason, gnat_node, kind);
-
- set_expr_location_from_node (gnu_result, gnat_node);
-
- /* If the type is VOID, this is a statement, so we need to generate
- the code for the call. Handle a condition, if there is one. */
- if (VOID_TYPE_P (gnu_result_type))
- {
- if (Present (gnat_cond))
- {
- if (!gnu_cond)
- gnu_cond = gnat_to_gnu (gnat_cond);
- gnu_result
- = build3 (COND_EXPR, void_type_node, gnu_cond, gnu_result,
- alloc_stmt_list ());
- }
- }
- else
- gnu_result = build1 (NULL_EXPR, gnu_result_type, gnu_result);
- }
+ if (type_annotate_only)
+ gnu_result = alloc_stmt_list ();
+ else
+ gnu_result = Raise_Error_to_gnu (gnat_node, &gnu_result_type);
break;
case N_Validate_Unchecked_Conversion:
@@ -6921,15 +6955,10 @@ gnat_to_gnu (Node_Id gnat_node)
else if (TREE_CODE (gnu_result) == CALL_EXPR
&& TYPE_IS_PADDING_P (TREE_TYPE (gnu_result))
+ && TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_result)))
+ == gnu_result_type
&& CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_result_type)))
- {
- /* ??? We need to convert if the padded type has fixed size because
- gnat_types_compatible_p will say that padded types are compatible
- but the gimplifier will not and, therefore, will ultimately choke
- if there isn't a conversion added early. */
- if (TREE_CODE (TYPE_SIZE (TREE_TYPE (gnu_result))) == INTEGER_CST)
- gnu_result = convert (gnu_result_type, gnu_result);
- }
+ ;
else if (TREE_TYPE (gnu_result) != gnu_result_type)
gnu_result = convert (gnu_result_type, gnu_result);
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index 6d267e0ef4e..5d264e01ac3 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -58,10 +58,6 @@
#include "ada-tree.h"
#include "gigi.h"
-#ifndef MAX_BITS_PER_WORD
-#define MAX_BITS_PER_WORD BITS_PER_WORD
-#endif
-
/* If nonzero, pretend we are allocating at global level. */
int force_global;
@@ -215,6 +211,21 @@ static GTY(()) VEC(tree,gc) *global_renaming_pointers;
/* A chain of unused BLOCK nodes. */
static GTY((deletable)) tree free_block_chain;
+static int pad_type_hash_marked_p (const void *p);
+static hashval_t pad_type_hash_hash (const void *p);
+static int pad_type_hash_eq (const void *p1, const void *p2);
+
+/* A hash table of padded types. It is modelled on the generic type
+ hash table in tree.c, which must thus be used as a reference. */
+struct GTY(()) pad_type_hash {
+ unsigned long hash;
+ tree type;
+};
+
+static GTY ((if_marked ("pad_type_hash_marked_p"),
+ param_is (struct pad_type_hash)))
+ htab_t pad_type_hash_table;
+
static tree merge_sizes (tree, tree, tree, bool, bool);
static tree compute_related_constant (tree, tree);
static tree split_plus (tree, tree *);
@@ -223,23 +234,43 @@ static tree convert_to_fat_pointer (tree, tree);
static bool potential_alignment_gap (tree, tree, tree);
static void process_attributes (tree, struct attrib *);
-/* Initialize the association of GNAT nodes to GCC trees. */
+/* Initialize data structures of the utils.c module. */
void
-init_gnat_to_gnu (void)
+init_gnat_utils (void)
{
+ /* Initialize the association of GNAT nodes to GCC trees. */
associate_gnat_to_gnu = ggc_alloc_cleared_vec_tree (max_gnat_nodes);
+
+ /* Initialize the association of GNAT nodes to GCC trees as dummies. */
+ dummy_node_table = ggc_alloc_cleared_vec_tree (max_gnat_nodes);
+
+ /* Initialize the hash table of padded types. */
+ pad_type_hash_table = htab_create_ggc (512, pad_type_hash_hash,
+ pad_type_hash_eq, 0);
}
-/* Destroy the association of GNAT nodes to GCC trees. */
+/* Destroy data structures of the utils.c module. */
void
-destroy_gnat_to_gnu (void)
+destroy_gnat_utils (void)
{
+ /* Destroy the association of GNAT nodes to GCC trees. */
ggc_free (associate_gnat_to_gnu);
associate_gnat_to_gnu = NULL;
-}
+ /* Destroy the association of GNAT nodes to GCC trees as dummies. */
+ ggc_free (dummy_node_table);
+ dummy_node_table = NULL;
+
+ /* Destroy the hash table of padded types. */
+ htab_delete (pad_type_hash_table);
+ pad_type_hash_table = NULL;
+
+ /* Invalidate the global renaming pointers. */
+ invalidate_global_renaming_pointers ();
+}
+
/* GNAT_ENTITY is a GNAT tree node for an entity. Associate GNU_DECL, a GCC
tree node, with GNAT_ENTITY. If GNU_DECL is not a ..._DECL node, abort.
If NO_CHECK is true, the latter check is suppressed.
@@ -281,23 +312,6 @@ present_gnu_tree (Entity_Id gnat_entity)
return PRESENT_GNU_TREE (gnat_entity);
}
-/* Initialize the association of GNAT nodes to GCC trees as dummies. */
-
-void
-init_dummy_type (void)
-{
- dummy_node_table = ggc_alloc_cleared_vec_tree (max_gnat_nodes);
-}
-
-/* Destroy the association of GNAT nodes to GCC trees as dummies. */
-
-void
-destroy_dummy_type (void)
-{
- ggc_free (dummy_node_table);
- dummy_node_table = NULL;
-}
-
/* Make a dummy type corresponding to GNAT_TYPE. */
tree
@@ -630,6 +644,702 @@ gnat_pushdecl (tree decl, Node_Id gnat_node)
}
}
+/* Create a record type that contains a SIZE bytes long field of TYPE with a
+ starting bit position so that it is aligned to ALIGN bits, and leaving at
+ least ROOM bytes free before the field. BASE_ALIGN is the alignment the
+ record is guaranteed to get. */
+
+tree
+make_aligning_type (tree type, unsigned int align, tree size,
+ unsigned int base_align, int room)
+{
+ /* We will be crafting a record type with one field at a position set to be
+ the next multiple of ALIGN past record'address + room bytes. We use a
+ record placeholder to express record'address. */
+ tree record_type = make_node (RECORD_TYPE);
+ tree record = build0 (PLACEHOLDER_EXPR, record_type);
+
+ tree record_addr_st
+ = convert (sizetype, build_unary_op (ADDR_EXPR, NULL_TREE, record));
+
+ /* The diagram below summarizes the shape of what we manipulate:
+
+ <--------- pos ---------->
+ { +------------+-------------+-----------------+
+ record =>{ |############| ... | field (type) |
+ { +------------+-------------+-----------------+
+ |<-- room -->|<- voffset ->|<---- size ----->|
+ o o
+ | |
+ record_addr vblock_addr
+
+ Every length is in sizetype bytes there, except "pos" which has to be
+ set as a bit position in the GCC tree for the record. */
+ tree room_st = size_int (room);
+ tree vblock_addr_st = size_binop (PLUS_EXPR, record_addr_st, room_st);
+ tree voffset_st, pos, field;
+
+ tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ name = concat_name (name, "ALIGN");
+ TYPE_NAME (record_type) = name;
+
+ /* Compute VOFFSET and then POS. The next byte position multiple of some
+ alignment after some address is obtained by "and"ing the alignment minus
+ 1 with the two's complement of the address. */
+ voffset_st = size_binop (BIT_AND_EXPR,
+ fold_build1 (NEGATE_EXPR, sizetype, vblock_addr_st),
+ size_int ((align / BITS_PER_UNIT) - 1));
+
+ /* POS = (ROOM + VOFFSET) * BIT_PER_UNIT, in bitsizetype. */
+ pos = size_binop (MULT_EXPR,
+ convert (bitsizetype,
+ size_binop (PLUS_EXPR, room_st, voffset_st)),
+ bitsize_unit_node);
+
+ /* Craft the GCC record representation. We exceptionally do everything
+ manually here because 1) our generic circuitry is not quite ready to
+ handle the complex position/size expressions we are setting up, 2) we
+ have a strong simplifying factor at hand: we know the maximum possible
+ value of voffset, and 3) we have to set/reset at least the sizes in
+ accordance with this maximum value anyway, as we need them to convey
+ what should be "alloc"ated for this type.
+
+ Use -1 as the 'addressable' indication for the field to prevent the
+ creation of a bitfield. We don't need one, it would have damaging
+ consequences on the alignment computation, and create_field_decl would
+ make one without this special argument, for instance because of the
+ complex position expression. */
+ field = create_field_decl (get_identifier ("F"), type, record_type, size,
+ pos, 1, -1);
+ TYPE_FIELDS (record_type) = field;
+
+ TYPE_ALIGN (record_type) = base_align;
+ TYPE_USER_ALIGN (record_type) = 1;
+
+ TYPE_SIZE (record_type)
+ = size_binop (PLUS_EXPR,
+ size_binop (MULT_EXPR, convert (bitsizetype, size),
+ bitsize_unit_node),
+ bitsize_int (align + room * BITS_PER_UNIT));
+ TYPE_SIZE_UNIT (record_type)
+ = size_binop (PLUS_EXPR, size,
+ size_int (room + align / BITS_PER_UNIT));
+
+ SET_TYPE_MODE (record_type, BLKmode);
+ relate_alias_sets (record_type, type, ALIAS_SET_COPY);
+
+ /* Declare it now since it will never be declared otherwise. This is
+ necessary to ensure that its subtrees are properly marked. */
+ create_type_decl (name, record_type, NULL, true, false, Empty);
+
+ return record_type;
+}
+
+/* TYPE is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE that is being used
+ as the field type of a packed record if IN_RECORD is true, or as the
+ component type of a packed array if IN_RECORD is false. See if we can
+ rewrite it either as a type that has a non-BLKmode, which we can pack
+ tighter in the packed record case, or as a smaller type. If so, return
+ the new type. If not, return the original type. */
+
+tree
+make_packable_type (tree type, bool in_record)
+{
+ unsigned HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE (type), 1);
+ unsigned HOST_WIDE_INT new_size;
+ tree new_type, old_field, field_list = NULL_TREE;
+ unsigned int align;
+
+ /* No point in doing anything if the size is zero. */
+ if (size == 0)
+ return type;
+
+ new_type = make_node (TREE_CODE (type));
+
+ /* Copy the name and flags from the old type to that of the new.
+ Note that we rely on the pointer equality created here for
+ TYPE_NAME to look through conversions in various places. */
+ TYPE_NAME (new_type) = TYPE_NAME (type);
+ TYPE_JUSTIFIED_MODULAR_P (new_type) = TYPE_JUSTIFIED_MODULAR_P (type);
+ TYPE_CONTAINS_TEMPLATE_P (new_type) = TYPE_CONTAINS_TEMPLATE_P (type);
+ if (TREE_CODE (type) == RECORD_TYPE)
+ TYPE_PADDING_P (new_type) = TYPE_PADDING_P (type);
+
+ /* If we are in a record and have a small size, set the alignment to
+ try for an integral mode. Otherwise set it to try for a smaller
+ type with BLKmode. */
+ if (in_record && size <= MAX_FIXED_MODE_SIZE)
+ {
+ align = ceil_pow2 (size);
+ TYPE_ALIGN (new_type) = align;
+ new_size = (size + align - 1) & -align;
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT align;
+
+ /* Do not try to shrink the size if the RM size is not constant. */
+ if (TYPE_CONTAINS_TEMPLATE_P (type)
+ || !host_integerp (TYPE_ADA_SIZE (type), 1))
+ return type;
+
+ /* Round the RM size up to a unit boundary to get the minimal size
+ for a BLKmode record. Give up if it's already the size. */
+ new_size = TREE_INT_CST_LOW (TYPE_ADA_SIZE (type));
+ new_size = (new_size + BITS_PER_UNIT - 1) & -BITS_PER_UNIT;
+ if (new_size == size)
+ return type;
+
+ align = new_size & -new_size;
+ TYPE_ALIGN (new_type) = MIN (TYPE_ALIGN (type), align);
+ }
+
+ TYPE_USER_ALIGN (new_type) = 1;
+
+ /* Now copy the fields, keeping the position and size as we don't want
+ to change the layout by propagating the packedness downwards. */
+ for (old_field = TYPE_FIELDS (type); old_field;
+ old_field = DECL_CHAIN (old_field))
+ {
+ tree new_field_type = TREE_TYPE (old_field);
+ tree new_field, new_size;
+
+ if (RECORD_OR_UNION_TYPE_P (new_field_type)
+ && !TYPE_FAT_POINTER_P (new_field_type)
+ && host_integerp (TYPE_SIZE (new_field_type), 1))
+ new_field_type = make_packable_type (new_field_type, true);
+
+ /* However, for the last field in a not already packed record type
+ that is of an aggregate type, we need to use the RM size in the
+ packable version of the record type, see finish_record_type. */
+ if (!DECL_CHAIN (old_field)
+ && !TYPE_PACKED (type)
+ && RECORD_OR_UNION_TYPE_P (new_field_type)
+ && !TYPE_FAT_POINTER_P (new_field_type)
+ && !TYPE_CONTAINS_TEMPLATE_P (new_field_type)
+ && TYPE_ADA_SIZE (new_field_type))
+ new_size = TYPE_ADA_SIZE (new_field_type);
+ else
+ new_size = DECL_SIZE (old_field);
+
+ new_field
+ = create_field_decl (DECL_NAME (old_field), new_field_type, new_type,
+ new_size, bit_position (old_field),
+ TYPE_PACKED (type),
+ !DECL_NONADDRESSABLE_P (old_field));
+
+ DECL_INTERNAL_P (new_field) = DECL_INTERNAL_P (old_field);
+ SET_DECL_ORIGINAL_FIELD_TO_FIELD (new_field, old_field);
+ if (TREE_CODE (new_type) == QUAL_UNION_TYPE)
+ DECL_QUALIFIER (new_field) = DECL_QUALIFIER (old_field);
+
+ DECL_CHAIN (new_field) = field_list;
+ field_list = new_field;
+ }
+
+ finish_record_type (new_type, nreverse (field_list), 2, false);
+ relate_alias_sets (new_type, type, ALIAS_SET_COPY);
+ SET_DECL_PARALLEL_TYPE (TYPE_STUB_DECL (new_type),
+ DECL_PARALLEL_TYPE (TYPE_STUB_DECL (type)));
+
+ /* If this is a padding record, we never want to make the size smaller
+ than what was specified. For QUAL_UNION_TYPE, also copy the size. */
+ if (TYPE_IS_PADDING_P (type) || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ TYPE_SIZE (new_type) = TYPE_SIZE (type);
+ TYPE_SIZE_UNIT (new_type) = TYPE_SIZE_UNIT (type);
+ new_size = size;
+ }
+ else
+ {
+ TYPE_SIZE (new_type) = bitsize_int (new_size);
+ TYPE_SIZE_UNIT (new_type)
+ = size_int ((new_size + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+ }
+
+ if (!TYPE_CONTAINS_TEMPLATE_P (type))
+ SET_TYPE_ADA_SIZE (new_type, TYPE_ADA_SIZE (type));
+
+ compute_record_mode (new_type);
+
+ /* Try harder to get a packable type if necessary, for example
+ in case the record itself contains a BLKmode field. */
+ if (in_record && TYPE_MODE (new_type) == BLKmode)
+ SET_TYPE_MODE (new_type,
+ mode_for_size_tree (TYPE_SIZE (new_type), MODE_INT, 1));
+
+ /* If neither the mode nor the size has shrunk, return the old type. */
+ if (TYPE_MODE (new_type) == BLKmode && new_size >= size)
+ return type;
+
+ return new_type;
+}
+
+/* Given a type TYPE, return a new type whose size is appropriate for SIZE.
+ If TYPE is the best type, return it. Otherwise, make a new type. We
+ only support new integral and pointer types. FOR_BIASED is true if
+ we are making a biased type. */
+
+tree
+make_type_from_size (tree type, tree size_tree, bool for_biased)
+{
+ unsigned HOST_WIDE_INT size;
+ bool biased_p;
+ tree new_type;
+
+ /* If size indicates an error, just return TYPE to avoid propagating
+ the error. Likewise if it's too large to represent. */
+ if (!size_tree || !host_integerp (size_tree, 1))
+ return type;
+
+ size = tree_low_cst (size_tree, 1);
+
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ biased_p = (TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_BIASED_REPRESENTATION_P (type));
+
+ /* Integer types with precision 0 are forbidden. */
+ if (size == 0)
+ size = 1;
+
+ /* Only do something if the type isn't a packed array type and doesn't
+ already have the proper size and the size isn't too large. */
+ if (TYPE_IS_PACKED_ARRAY_TYPE_P (type)
+ || (TYPE_PRECISION (type) == size && biased_p == for_biased)
+ || size > LONG_LONG_TYPE_SIZE)
+ break;
+
+ biased_p |= for_biased;
+ if (TYPE_UNSIGNED (type) || biased_p)
+ new_type = make_unsigned_type (size);
+ else
+ new_type = make_signed_type (size);
+ TREE_TYPE (new_type) = TREE_TYPE (type) ? TREE_TYPE (type) : type;
+ SET_TYPE_RM_MIN_VALUE (new_type,
+ convert (TREE_TYPE (new_type),
+ TYPE_MIN_VALUE (type)));
+ SET_TYPE_RM_MAX_VALUE (new_type,
+ convert (TREE_TYPE (new_type),
+ TYPE_MAX_VALUE (type)));
+ /* Copy the name to show that it's essentially the same type and
+ not a subrange type. */
+ TYPE_NAME (new_type) = TYPE_NAME (type);
+ TYPE_BIASED_REPRESENTATION_P (new_type) = biased_p;
+ SET_TYPE_RM_SIZE (new_type, bitsize_int (size));
+ return new_type;
+
+ case RECORD_TYPE:
+ /* Do something if this is a fat pointer, in which case we
+ may need to return the thin pointer. */
+ if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2)
+ {
+ enum machine_mode p_mode = mode_for_size (size, MODE_INT, 0);
+ if (!targetm.valid_pointer_mode (p_mode))
+ p_mode = ptr_mode;
+ return
+ build_pointer_type_for_mode
+ (TYPE_OBJECT_RECORD_TYPE (TYPE_UNCONSTRAINED_ARRAY (type)),
+ p_mode, 0);
+ }
+ break;
+
+ case POINTER_TYPE:
+ /* Only do something if this is a thin pointer, in which case we
+ may need to return the fat pointer. */
+ if (TYPE_IS_THIN_POINTER_P (type) && size >= POINTER_SIZE * 2)
+ return
+ build_pointer_type (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (type)));
+ break;
+
+ default:
+ break;
+ }
+
+ return type;
+}
+
+/* See if the data pointed to by the hash table slot is marked. */
+
+static int
+pad_type_hash_marked_p (const void *p)
+{
+ const_tree const type = ((const struct pad_type_hash *) p)->type;
+
+ return ggc_marked_p (type);
+}
+
+/* Return the cached hash value. */
+
+static hashval_t
+pad_type_hash_hash (const void *p)
+{
+ return ((const struct pad_type_hash *) p)->hash;
+}
+
+/* Return 1 iff the padded types are equivalent. */
+
+static int
+pad_type_hash_eq (const void *p1, const void *p2)
+{
+ const struct pad_type_hash *const t1 = (const struct pad_type_hash *) p1;
+ const struct pad_type_hash *const t2 = (const struct pad_type_hash *) p2;
+ tree type1, type2;
+
+ if (t1->hash != t2->hash)
+ return 0;
+
+ type1 = t1->type;
+ type2 = t2->type;
+
+ /* We consider that the padded types are equivalent if they pad the same
+ type and have the same size, alignment and RM size. Taking the mode
+ into account is redundant since it is determined by the others. */
+ return
+ TREE_TYPE (TYPE_FIELDS (type1)) == TREE_TYPE (TYPE_FIELDS (type2))
+ && TYPE_SIZE (type1) == TYPE_SIZE (type2)
+ && TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
+ && TYPE_ADA_SIZE (type1) == TYPE_ADA_SIZE (type2);
+}
+
+/* Ensure that TYPE has SIZE and ALIGN. Make and return a new padded type
+ if needed. We have already verified that SIZE and TYPE are large enough.
+ GNAT_ENTITY is used to name the resulting record and to issue a warning.
+ IS_COMPONENT_TYPE is true if this is being done for the component type of
+ an array. IS_USER_TYPE is true if the original type needs to be completed.
+ DEFINITION is true if this type is being defined. SET_RM_SIZE is true if
+ the RM size of the resulting type is to be set to SIZE too. */
+
+tree
+maybe_pad_type (tree type, tree size, unsigned int align,
+ Entity_Id gnat_entity, bool is_component_type,
+ bool is_user_type, bool definition, bool set_rm_size)
+{
+ tree orig_size = TYPE_SIZE (type);
+ tree record, field;
+
+ /* If TYPE is a padded type, see if it agrees with any size and alignment
+ we were given. If so, return the original type. Otherwise, strip
+ off the padding, since we will either be returning the inner type
+ or repadding it. If no size or alignment is specified, use that of
+ the original padded type. */
+ if (TYPE_IS_PADDING_P (type))
+ {
+ if ((!size
+ || operand_equal_p (round_up (size,
+ MAX (align, TYPE_ALIGN (type))),
+ round_up (TYPE_SIZE (type),
+ MAX (align, TYPE_ALIGN (type))),
+ 0))
+ && (align == 0 || align == TYPE_ALIGN (type)))
+ return type;
+
+ if (!size)
+ size = TYPE_SIZE (type);
+ if (align == 0)
+ align = TYPE_ALIGN (type);
+
+ type = TREE_TYPE (TYPE_FIELDS (type));
+ orig_size = TYPE_SIZE (type);
+ }
+
+ /* If the size is either not being changed or is being made smaller (which
+ is not done here and is only valid for bitfields anyway), show the size
+ isn't changing. Likewise, clear the alignment if it isn't being
+ changed. Then return if we aren't doing anything. */
+ if (size
+ && (operand_equal_p (size, orig_size, 0)
+ || (TREE_CODE (orig_size) == INTEGER_CST
+ && tree_int_cst_lt (size, orig_size))))
+ size = NULL_TREE;
+
+ if (align == TYPE_ALIGN (type))
+ align = 0;
+
+ if (align == 0 && !size)
+ return type;
+
+ /* If requested, complete the original type and give it a name. */
+ if (is_user_type)
+ create_type_decl (get_entity_name (gnat_entity), type,
+ NULL, !Comes_From_Source (gnat_entity),
+ !(TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type))),
+ gnat_entity);
+
+ /* We used to modify the record in place in some cases, but that could
+ generate incorrect debugging information. So make a new record
+ type and name. */
+ record = make_node (RECORD_TYPE);
+ TYPE_PADDING_P (record) = 1;
+
+ if (Present (gnat_entity))
+ TYPE_NAME (record) = create_concat_name (gnat_entity, "PAD");
+
+ TYPE_ALIGN (record) = align;
+ TYPE_SIZE (record) = size ? size : orig_size;
+ TYPE_SIZE_UNIT (record)
+ = convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, TYPE_SIZE (record),
+ bitsize_unit_node));
+
+ /* If we are changing the alignment and the input type is a record with
+ BLKmode and a small constant size, try to make a form that has an
+ integral mode. This might allow the padding record to also have an
+ integral mode, which will be much more efficient. There is no point
+ in doing so if a size is specified unless it is also a small constant
+ size and it is incorrect to do so if we cannot guarantee that the mode
+ will be naturally aligned since the field must always be addressable.
+
+ ??? This might not always be a win when done for a stand-alone object:
+ since the nominal and the effective type of the object will now have
+ different modes, a VIEW_CONVERT_EXPR will be required for converting
+ between them and it might be hard to overcome afterwards, including
+ at the RTL level when the stand-alone object is accessed as a whole. */
+ if (align != 0
+ && RECORD_OR_UNION_TYPE_P (type)
+ && TYPE_MODE (type) == BLKmode
+ && !TYPE_BY_REFERENCE_P (type)
+ && TREE_CODE (orig_size) == INTEGER_CST
+ && !TREE_OVERFLOW (orig_size)
+ && compare_tree_int (orig_size, MAX_FIXED_MODE_SIZE) <= 0
+ && (!size
+ || (TREE_CODE (size) == INTEGER_CST
+ && compare_tree_int (size, MAX_FIXED_MODE_SIZE) <= 0)))
+ {
+ tree packable_type = make_packable_type (type, true);
+ if (TYPE_MODE (packable_type) != BLKmode
+ && align >= TYPE_ALIGN (packable_type))
+ type = packable_type;
+ }
+
+ /* Now create the field with the original size. */
+ field = create_field_decl (get_identifier ("F"), type, record, orig_size,
+ bitsize_zero_node, 0, 1);
+ DECL_INTERNAL_P (field) = 1;
+
+ /* Do not emit debug info until after the auxiliary record is built. */
+ finish_record_type (record, field, 1, false);
+
+ /* Set the RM size if requested. */
+ if (set_rm_size)
+ {
+ SET_TYPE_ADA_SIZE (record, size ? size : orig_size);
+
+ /* If the padded type is complete and has constant size, we canonicalize
+ it by means of the hash table. This is consistent with the language
+ semantics and ensures that gigi and the middle-end have a common view
+ of these padded types. */
+ if (TREE_CONSTANT (TYPE_SIZE (record)))
+ {
+ hashval_t hashcode;
+ struct pad_type_hash in, *h;
+ void **loc;
+
+ hashcode = iterative_hash_object (TYPE_HASH (type), 0);
+ hashcode = iterative_hash_expr (TYPE_SIZE (record), hashcode);
+ hashcode = iterative_hash_hashval_t (TYPE_ALIGN (record), hashcode);
+ hashcode = iterative_hash_expr (TYPE_ADA_SIZE (record), hashcode);
+
+ in.hash = hashcode;
+ in.type = record;
+ h = (struct pad_type_hash *)
+ htab_find_with_hash (pad_type_hash_table, &in, hashcode);
+ if (h)
+ {
+ record = h->type;
+ goto built;
+ }
+
+ h = ggc_alloc_pad_type_hash ();
+ h->hash = hashcode;
+ h->type = record;
+ loc = htab_find_slot_with_hash (pad_type_hash_table, h, hashcode,
+ INSERT);
+ *loc = (void *)h;
+ }
+ }
+
+ /* Unless debugging information isn't being written for the input type,
+ write a record that shows what we are a subtype of and also make a
+ variable that indicates our size, if still variable. */
+ if (TREE_CODE (orig_size) != INTEGER_CST
+ && TYPE_NAME (record)
+ && TYPE_NAME (type)
+ && !(TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type))))
+ {
+ tree marker = make_node (RECORD_TYPE);
+ tree name = TYPE_NAME (record);
+ tree orig_name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ if (TREE_CODE (orig_name) == TYPE_DECL)
+ orig_name = DECL_NAME (orig_name);
+
+ TYPE_NAME (marker) = concat_name (name, "XVS");
+ finish_record_type (marker,
+ create_field_decl (orig_name,
+ build_reference_type (type),
+ marker, NULL_TREE, NULL_TREE,
+ 0, 0),
+ 0, true);
+
+ add_parallel_type (record, marker);
+
+ if (definition && size && TREE_CODE (size) != INTEGER_CST)
+ TYPE_SIZE_UNIT (marker)
+ = create_var_decl (concat_name (name, "XVZ"), NULL_TREE, sizetype,
+ TYPE_SIZE_UNIT (record), false, false, false,
+ false, NULL, gnat_entity);
+ }
+
+ rest_of_record_type_compilation (record);
+
+built:
+ /* If the size was widened explicitly, maybe give a warning. Take the
+ original size as the maximum size of the input if there was an
+ unconstrained record involved and round it up to the specified alignment,
+ if one was specified. But don't do it if we are just annotating types
+ and the type is tagged, since tagged types aren't fully laid out in this
+ mode. */
+ if (!size
+ || TREE_CODE (size) == COND_EXPR
+ || TREE_CODE (size) == MAX_EXPR
+ || No (gnat_entity)
+ || (type_annotate_only && Is_Tagged_Type (Etype (gnat_entity))))
+ return record;
+
+ if (CONTAINS_PLACEHOLDER_P (orig_size))
+ orig_size = max_size (orig_size, true);
+
+ if (align)
+ orig_size = round_up (orig_size, align);
+
+ if (!operand_equal_p (size, orig_size, 0)
+ && !(TREE_CODE (size) == INTEGER_CST
+ && TREE_CODE (orig_size) == INTEGER_CST
+ && (TREE_OVERFLOW (size)
+ || TREE_OVERFLOW (orig_size)
+ || tree_int_cst_lt (size, orig_size))))
+ {
+ Node_Id gnat_error_node = Empty;
+
+ if (Is_Packed_Array_Type (gnat_entity))
+ gnat_entity = Original_Array_Type (gnat_entity);
+
+ if ((Ekind (gnat_entity) == E_Component
+ || Ekind (gnat_entity) == E_Discriminant)
+ && Present (Component_Clause (gnat_entity)))
+ gnat_error_node = Last_Bit (Component_Clause (gnat_entity));
+ else if (Present (Size_Clause (gnat_entity)))
+ gnat_error_node = Expression (Size_Clause (gnat_entity));
+
+ /* Generate message only for entities that come from source, since
+ if we have an entity created by expansion, the message will be
+ generated for some other corresponding source entity. */
+ if (Comes_From_Source (gnat_entity))
+ {
+ if (Present (gnat_error_node))
+ post_error_ne_tree ("{^ }bits of & unused?",
+ gnat_error_node, gnat_entity,
+ size_diffop (size, orig_size));
+ else if (is_component_type)
+ post_error_ne_tree ("component of& padded{ by ^ bits}?",
+ gnat_entity, gnat_entity,
+ size_diffop (size, orig_size));
+ }
+ }
+
+ return record;
+}
+
+/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
+ If this is a multi-dimensional array type, do this recursively.
+
+ OP may be
+ - ALIAS_SET_COPY: the new set is made a copy of the old one.
+ - ALIAS_SET_SUPERSET: the new set is made a superset of the old one.
+ - ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
+
+void
+relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
+{
+ /* Remove any padding from GNU_OLD_TYPE. It doesn't matter in the case
+ of a one-dimensional array, since the padding has the same alias set
+ as the field type, but if it's a multi-dimensional array, we need to
+ see the inner types. */
+ while (TREE_CODE (gnu_old_type) == RECORD_TYPE
+ && (TYPE_JUSTIFIED_MODULAR_P (gnu_old_type)
+ || TYPE_PADDING_P (gnu_old_type)))
+ gnu_old_type = TREE_TYPE (TYPE_FIELDS (gnu_old_type));
+
+ /* Unconstrained array types are deemed incomplete and would thus be given
+ alias set 0. Retrieve the underlying array type. */
+ if (TREE_CODE (gnu_old_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_old_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_old_type))));
+ if (TREE_CODE (gnu_new_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_new_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_new_type))));
+
+ if (TREE_CODE (gnu_new_type) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (gnu_new_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_new_type)))
+ relate_alias_sets (TREE_TYPE (gnu_new_type), TREE_TYPE (gnu_old_type), op);
+
+ switch (op)
+ {
+ case ALIAS_SET_COPY:
+ /* The alias set shouldn't be copied between array types with different
+ aliasing settings because this can break the aliasing relationship
+ between the array type and its element type. */
+#ifndef ENABLE_CHECKING
+ if (flag_strict_aliasing)
+#endif
+ gcc_assert (!(TREE_CODE (gnu_new_type) == ARRAY_TYPE
+ && TREE_CODE (gnu_old_type) == ARRAY_TYPE
+ && TYPE_NONALIASED_COMPONENT (gnu_new_type)
+ != TYPE_NONALIASED_COMPONENT (gnu_old_type)));
+
+ TYPE_ALIAS_SET (gnu_new_type) = get_alias_set (gnu_old_type);
+ break;
+
+ case ALIAS_SET_SUBSET:
+ case ALIAS_SET_SUPERSET:
+ {
+ alias_set_type old_set = get_alias_set (gnu_old_type);
+ alias_set_type new_set = get_alias_set (gnu_new_type);
+
+ /* Do nothing if the alias sets conflict. This ensures that we
+ never call record_alias_subset several times for the same pair
+ or at all for alias set 0. */
+ if (!alias_sets_conflict_p (old_set, new_set))
+ {
+ if (op == ALIAS_SET_SUBSET)
+ record_alias_subset (old_set, new_set);
+ else
+ record_alias_subset (new_set, old_set);
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ record_component_aliases (gnu_new_type);
+}
+
/* Record TYPE as a builtin type for Ada. NAME is the name of the type.
ARTIFICIAL_P is true if it's a type that was generated by the compiler. */
@@ -2224,14 +2934,6 @@ gnat_types_compatible_p (tree t1, tree t2)
&& gnat_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)))))
return 1;
- /* Padding record types are also compatible if they pad the same
- type and have the same constant size. */
- if (code == RECORD_TYPE
- && TYPE_PADDING_P (t1) && TYPE_PADDING_P (t2)
- && TREE_TYPE (TYPE_FIELDS (t1)) == TREE_TYPE (TYPE_FIELDS (t2))
- && tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2)))
- return 1;
-
return 0;
}
@@ -3705,7 +4407,7 @@ convert (tree type, tree expr)
&& TYPE_PADDING_P (type) && TYPE_PADDING_P (etype)
&& (!TREE_CONSTANT (TYPE_SIZE (type))
|| !TREE_CONSTANT (TYPE_SIZE (etype))
- || gnat_types_compatible_p (type, etype)
+ || TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (etype)
|| TYPE_NAME (TREE_TYPE (TYPE_FIELDS (type)))
== TYPE_NAME (TREE_TYPE (TYPE_FIELDS (etype)))))
;
@@ -3734,8 +4436,8 @@ convert (tree type, tree expr)
if (TREE_CODE (expr) == COMPONENT_REF
&& TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (expr, 0)))
&& (!TREE_CONSTANT (TYPE_SIZE (type))
- || gnat_types_compatible_p (type,
- TREE_TYPE (TREE_OPERAND (expr, 0)))
+ || TYPE_MAIN_VARIANT (type)
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0)))
|| (ecode == RECORD_TYPE
&& TYPE_NAME (etype)
== TYPE_NAME (TREE_TYPE (TYPE_FIELDS (type))))))
diff --git a/gcc/ada/gcc-interface/utils2.c b/gcc/ada/gcc-interface/utils2.c
index d0769f7996b..e104b4f0e34 100644
--- a/gcc/ada/gcc-interface/utils2.c
+++ b/gcc/ada/gcc-interface/utils2.c
@@ -789,16 +789,28 @@ build_binary_op (enum tree_code op_code, tree result_type,
else if (TYPE_IS_PADDING_P (left_type)
&& TREE_CONSTANT (TYPE_SIZE (left_type))
&& ((TREE_CODE (right_operand) == COMPONENT_REF
- && TYPE_IS_PADDING_P
- (TREE_TYPE (TREE_OPERAND (right_operand, 0)))
- && gnat_types_compatible_p
- (left_type,
- TREE_TYPE (TREE_OPERAND (right_operand, 0))))
+ && TYPE_MAIN_VARIANT (left_type)
+ == TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_OPERAND (right_operand, 0))))
|| (TREE_CODE (right_operand) == CONSTRUCTOR
&& !CONTAINS_PLACEHOLDER_P
(DECL_SIZE (TYPE_FIELDS (left_type)))))
&& !integer_zerop (TYPE_SIZE (right_type)))
- operation_type = left_type;
+ {
+ /* We make an exception for a BLKmode type padding a non-BLKmode
+ inner type and do the conversion of the LHS right away, since
+ unchecked_convert wouldn't do it properly. */
+ if (TYPE_MODE (left_type) == BLKmode
+ && TYPE_MODE (right_type) != BLKmode
+ && TREE_CODE (right_operand) != CONSTRUCTOR)
+ {
+ operation_type = right_type;
+ left_operand = convert (operation_type, left_operand);
+ left_type = operation_type;
+ }
+ else
+ operation_type = left_type;
+ }
/* If we have a call to a function that returns an unconstrained type
with default discriminant on the RHS, use the RHS type (which is
diff --git a/gcc/alias.c b/gcc/alias.c
index 8366f9c3a5e..00af340c372 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -1433,9 +1433,9 @@ canon_rtx (rtx x)
if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
{
if (CONST_INT_P (x0))
- return plus_constant (x1, INTVAL (x0));
+ return plus_constant (GET_MODE (x), x1, INTVAL (x0));
else if (CONST_INT_P (x1))
- return plus_constant (x0, INTVAL (x1));
+ return plus_constant (GET_MODE (x), x0, INTVAL (x1));
return gen_rtx_PLUS (GET_MODE (x), x0, x1);
}
}
@@ -2928,7 +2928,8 @@ init_alias_analysis (void)
&& (t = get_reg_known_value (REGNO (XEXP (src, 0))))
&& CONST_INT_P (XEXP (src, 1)))
{
- t = plus_constant (t, INTVAL (XEXP (src, 1)));
+ t = plus_constant (GET_MODE (src), t,
+ INTVAL (XEXP (src, 1)));
set_reg_known_value (regno, t);
set_reg_known_equiv_p (regno, 0);
}
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index f8cdea0085d..f0eeba7a7d3 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -101,8 +101,24 @@ extern const struct gcov_ctr_summary *profile_info;
/* Declared in cfgloop.h. */
struct loop;
-/* Declared in tree-flow.h. */
-struct rtl_bb_info;
+struct GTY(()) rtl_bb_info {
+ /* The first insn of the block is embedded into bb->il.x. */
+ /* The last insn of the block. */
+ rtx end_;
+
+ /* In CFGlayout mode points to insn notes/jumptables to be placed just before
+ and after the block. */
+ rtx header_;
+ rtx footer_;
+};
+
+struct GTY(()) gimple_bb_info {
+ /* Sequence of statements in this block. */
+ gimple_seq seq;
+
+ /* PHI nodes for this block. */
+ gimple_seq phi_nodes;
+};
/* A basic block is a sequence of instructions with only entry and
only one exit. If any one of the instructions are executed, they
@@ -149,8 +165,11 @@ struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_d
struct basic_block_def *next_bb;
union basic_block_il_dependent {
- struct gimple_bb_info * GTY ((tag ("0"))) gimple;
- struct rtl_bb_info * GTY ((tag ("1"))) rtl;
+ struct gimple_bb_info GTY ((tag ("0"))) gimple;
+ struct {
+ rtx head_;
+ struct rtl_bb_info * rtl;
+ } GTY ((tag ("1"))) x;
} GTY ((desc ("((%1.flags & BB_RTL) != 0)"))) il;
/* Expected number of executions: calculated in profile.c. */
@@ -172,27 +191,12 @@ struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_d
int flags;
};
-struct GTY(()) rtl_bb_info {
- /* The first and last insns of the block. */
- rtx head_;
- rtx end_;
-
- /* In CFGlayout mode points to insn notes/jumptables to be placed just before
- and after the block. */
- rtx header;
- rtx footer;
-
- /* This field is used by the bb-reorder and tracer passes. */
- int visited;
-};
-
-struct GTY(()) gimple_bb_info {
- /* Sequence of statements in this block. */
- gimple_seq seq;
-
- /* PHI nodes for this block. */
- gimple_seq phi_nodes;
-};
+/* This ensures that struct gimple_bb_info is smaller than
+ struct rtl_bb_info, so that inlining the former into basic_block_def
+ is the better choice. */
+typedef int __assert_gimple_bb_smaller_rtl_bb
+ [(int)sizeof(struct rtl_bb_info)
+ - (int)sizeof (struct gimple_bb_info)];
DEF_VEC_P(basic_block);
DEF_VEC_ALLOC_P(basic_block,gc);
@@ -256,7 +260,10 @@ enum bb_flags
df_set_bb_dirty, but not cleared by df_analyze, so it can be used
to test whether a block has been modified prior to a df_analyze
call. */
- BB_MODIFIED = 1 << 12
+ BB_MODIFIED = 1 << 12,
+
+ /* A general visited flag for passes to use. */
+ BB_VISITED = 1 << 13
};
/* Dummy flag for convenience in the hot/cold partitioning code. */
@@ -411,8 +418,10 @@ struct GTY(()) control_flow_graph {
/* Stuff for recording basic block info. */
-#define BB_HEAD(B) (B)->il.rtl->head_
-#define BB_END(B) (B)->il.rtl->end_
+#define BB_HEAD(B) (B)->il.x.head_
+#define BB_END(B) (B)->il.x.rtl->end_
+#define BB_HEADER(B) (B)->il.x.rtl->header_
+#define BB_FOOTER(B) (B)->il.x.rtl->footer_
/* Special block numbers [markers] for entry and exit.
Neither of them is supposed to hold actual statements. */
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index c7f9c920d76..7f73b947e69 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -133,6 +133,9 @@ typedef struct bbro_basic_block_data_def
/* Which trace is the bb in? */
int in_trace;
+ /* Which trace was this bb visited in? */
+ int visited;
+
/* Which heap is BB in (if any)? */
fibheap_t heap;
@@ -183,6 +186,29 @@ static void connect_traces (int, struct trace *);
static bool copy_bb_p (const_basic_block, int);
static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
+/* Return the trace number in which BB was visited. */
+
+static int
+bb_visited_trace (const_basic_block bb)
+{
+ gcc_assert (bb->index < array_size);
+ return bbd[bb->index].visited;
+}
+
+/* This function marks BB that it was visited in trace number TRACE. */
+
+static void
+mark_bb_visited (basic_block bb, int trace)
+{
+ bbd[bb->index].visited = trace;
+ if (bbd[bb->index].heap)
+ {
+ fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
+ bbd[bb->index].heap = NULL;
+ bbd[bb->index].node = NULL;
+ }
+}
+
/* Check to see if bb should be pushed into the next round of trace
collections or not. Reasons for pushing the block forward are 1).
If the block is cold, we are doing partitioning, and there will be
@@ -306,14 +332,14 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->dest != EXIT_BLOCK_PTR
- && e->dest->il.rtl->visited != trace_n
+ && bb_visited_trace (e->dest) != trace_n
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX))
{
if (is_preferred)
{
/* The best edge is preferred. */
- if (!e->dest->il.rtl->visited
+ if (!bb_visited_trace (e->dest)
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is also preferred. */
@@ -329,7 +355,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
}
else
{
- if (!e->dest->il.rtl->visited
+ if (!bb_visited_trace (e->dest)
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is preferred. */
@@ -397,20 +423,6 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
return best_bb;
}
-/* This function marks BB that it was visited in trace number TRACE. */
-
-static void
-mark_bb_visited (basic_block bb, int trace)
-{
- bb->il.rtl->visited = trace;
- if (bbd[bb->index].heap)
- {
- fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
- bbd[bb->index].heap = NULL;
- bbd[bb->index].node = NULL;
- }
-}
-
/* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
not include basic blocks their probability is lower than BRANCH_TH or their
frequency is lower than EXEC_TH into traces (or count is lower than
@@ -496,8 +508,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (e->dest == EXIT_BLOCK_PTR)
continue;
- if (e->dest->il.rtl->visited
- && e->dest->il.rtl->visited != *n_traces)
+ if (bb_visited_trace (e->dest)
+ && bb_visited_trace (e->dest) != *n_traces)
continue;
if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
@@ -550,7 +562,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
{
if (e == best_edge
|| e->dest == EXIT_BLOCK_PTR
- || e->dest->il.rtl->visited)
+ || bb_visited_trace (e->dest))
continue;
key = bb_to_key (e->dest);
@@ -611,7 +623,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (best_edge) /* Suitable successor was found. */
{
- if (best_edge->dest->il.rtl->visited == *n_traces)
+ if (bb_visited_trace (best_edge->dest) == *n_traces)
{
/* We do nothing with one basic block loops. */
if (best_edge->dest != bb)
@@ -682,7 +694,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (e != best_edge
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
- && !e->dest->il.rtl->visited
+ && !bb_visited_trace (e->dest)
&& single_pred_p (e->dest)
&& !(e->flags & EDGE_CROSSING)
&& single_succ_p (e->dest)
@@ -716,7 +728,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->dest == EXIT_BLOCK_PTR
- || e->dest->il.rtl->visited)
+ || bb_visited_trace (e->dest))
continue;
if (bbd[e->dest->index].heap)
@@ -758,15 +770,11 @@ copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
BB_COPY_PARTITION (new_bb, old_bb);
gcc_assert (e->dest == new_bb);
- gcc_assert (!e->dest->il.rtl->visited);
if (dump_file)
fprintf (dump_file,
"Duplicated bb %d (created bb %d)\n",
old_bb->index, new_bb->index);
- new_bb->il.rtl->visited = trace;
- new_bb->aux = bb->aux;
- bb->aux = new_bb;
if (new_bb->index >= array_size || last_basic_block > array_size)
{
@@ -779,8 +787,9 @@ copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
for (i = array_size; i < new_size; i++)
{
bbd[i].start_of_trace = -1;
- bbd[i].in_trace = -1;
bbd[i].end_of_trace = -1;
+ bbd[i].in_trace = -1;
+ bbd[i].visited = 0;
bbd[i].heap = NULL;
bbd[i].node = NULL;
}
@@ -794,6 +803,11 @@ copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
}
}
+ gcc_assert (!bb_visited_trace (e->dest));
+ mark_bb_visited (new_bb, trace);
+ new_bb->aux = bb->aux;
+ bb->aux = new_bb;
+
bbd[new_bb->index].in_trace = trace;
return new_bb;
@@ -1214,7 +1228,7 @@ static void
emit_barrier_after_bb (basic_block bb)
{
rtx barrier = emit_barrier_after (BB_END (bb));
- bb->il.rtl->footer = unlink_insn_chain (barrier, barrier);
+ BB_FOOTER (bb) = unlink_insn_chain (barrier, barrier);
}
/* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
@@ -1929,8 +1943,9 @@ reorder_basic_blocks (void)
for (i = 0; i < array_size; i++)
{
bbd[i].start_of_trace = -1;
- bbd[i].in_trace = -1;
bbd[i].end_of_trace = -1;
+ bbd[i].in_trace = -1;
+ bbd[i].visited = 0;
bbd[i].heap = NULL;
bbd[i].node = NULL;
}
@@ -2012,6 +2027,7 @@ duplicate_computed_gotos (void)
if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return 0;
+ clear_bb_flags ();
cfg_layout_initialize (0);
/* We are estimating the length of uncond jump insn only once
@@ -2075,10 +2091,10 @@ duplicate_computed_gotos (void)
/* Duplicate computed gotos. */
FOR_EACH_BB (bb)
{
- if (bb->il.rtl->visited)
+ if (bb->flags & BB_VISITED)
continue;
- bb->il.rtl->visited = 1;
+ bb->flags |= BB_VISITED;
/* BB must have one outgoing edge. That edge must not lead to
the exit block or the next block.
@@ -2096,7 +2112,7 @@ duplicate_computed_gotos (void)
new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
new_bb->aux = bb->aux;
bb->aux = new_bb;
- new_bb->il.rtl->visited = 1;
+ new_bb->flags |= BB_VISITED;
}
done:
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 41a052b1d8f..8b20e439cc0 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -263,8 +263,10 @@ called_as_built_in (tree node)
return is_builtin_name (name);
}
-/* Compute values M and N such that M divides (address of EXP - N) and
- such that N < M. Store N in *BITPOSP and return M.
+/* Compute values M and N such that M divides (address of EXP - N) and such
+ that N < M. If these numbers can be determined, store M in alignp and N in
+ *BITPOSP and return true. Otherwise return false and store BITS_PER_UNIT to
+ *alignp and any bit-offset to *bitposp.
Note that the address (and thus the alignment) computed here is based
on the address to which a symbol resolves, whereas DECL_ALIGN is based
@@ -273,14 +275,16 @@ called_as_built_in (tree node)
the address &foo of a Thumb function foo() has the lowest bit set,
whereas foo() itself starts on an even address. */
-unsigned int
-get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp)
+bool
+get_object_alignment_1 (tree exp, unsigned int *alignp,
+ unsigned HOST_WIDE_INT *bitposp)
{
HOST_WIDE_INT bitsize, bitpos;
tree offset;
enum machine_mode mode;
int unsignedp, volatilep;
- unsigned int align, inner;
+ unsigned int inner, align = BITS_PER_UNIT;
+ bool known_alignment = false;
/* Get the innermost object and the constant (bitpos) and possibly
variable (offset) offset of the access. */
@@ -301,84 +305,97 @@ get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp)
allows the low bit to be used as a virtual bit, we know
that the address itself must be 2-byte aligned. */
if (TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_pfn)
- align = 2 * BITS_PER_UNIT;
- else
- align = BITS_PER_UNIT;
+ {
+ known_alignment = true;
+ align = 2 * BITS_PER_UNIT;
+ }
}
else
- align = DECL_ALIGN (exp);
+ {
+ known_alignment = true;
+ align = DECL_ALIGN (exp);
+ }
}
else if (CONSTANT_CLASS_P (exp))
{
+ known_alignment = true;
align = TYPE_ALIGN (TREE_TYPE (exp));
#ifdef CONSTANT_ALIGNMENT
align = (unsigned)CONSTANT_ALIGNMENT (exp, align);
#endif
}
else if (TREE_CODE (exp) == VIEW_CONVERT_EXPR)
- align = TYPE_ALIGN (TREE_TYPE (exp));
+ {
+ known_alignment = true;
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+ }
else if (TREE_CODE (exp) == INDIRECT_REF)
- align = TYPE_ALIGN (TREE_TYPE (exp));
+ {
+ known_alignment = true;
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+ }
else if (TREE_CODE (exp) == MEM_REF)
{
tree addr = TREE_OPERAND (exp, 0);
- struct ptr_info_def *pi;
+ unsigned ptr_align;
+ unsigned HOST_WIDE_INT ptr_bitpos;
+
if (TREE_CODE (addr) == BIT_AND_EXPR
&& TREE_CODE (TREE_OPERAND (addr, 1)) == INTEGER_CST)
{
+ known_alignment = true;
align = (TREE_INT_CST_LOW (TREE_OPERAND (addr, 1))
& -TREE_INT_CST_LOW (TREE_OPERAND (addr, 1)));
align *= BITS_PER_UNIT;
addr = TREE_OPERAND (addr, 0);
}
- else
- align = BITS_PER_UNIT;
- if (TREE_CODE (addr) == SSA_NAME
- && (pi = SSA_NAME_PTR_INFO (addr)))
+
+ if (get_pointer_alignment_1 (addr, &ptr_align, &ptr_bitpos))
{
- bitpos += (pi->misalign * BITS_PER_UNIT) & ~(align - 1);
- align = MAX (pi->align * BITS_PER_UNIT, align);
+ known_alignment = true;
+ bitpos += ptr_bitpos & ~(align - 1);
+ align = MAX (ptr_align, align);
}
- else if (TREE_CODE (addr) == ADDR_EXPR)
- align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0)));
+
bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT;
}
else if (TREE_CODE (exp) == TARGET_MEM_REF)
{
- struct ptr_info_def *pi;
+ unsigned ptr_align;
+ unsigned HOST_WIDE_INT ptr_bitpos;
tree addr = TMR_BASE (exp);
+
if (TREE_CODE (addr) == BIT_AND_EXPR
&& TREE_CODE (TREE_OPERAND (addr, 1)) == INTEGER_CST)
{
+ known_alignment = true;
align = (TREE_INT_CST_LOW (TREE_OPERAND (addr, 1))
& -TREE_INT_CST_LOW (TREE_OPERAND (addr, 1)));
align *= BITS_PER_UNIT;
addr = TREE_OPERAND (addr, 0);
}
- else
- align = BITS_PER_UNIT;
- if (TREE_CODE (addr) == SSA_NAME
- && (pi = SSA_NAME_PTR_INFO (addr)))
+
+ if (get_pointer_alignment_1 (addr, &ptr_align, &ptr_bitpos))
{
- bitpos += (pi->misalign * BITS_PER_UNIT) & ~(align - 1);
- align = MAX (pi->align * BITS_PER_UNIT, align);
+ known_alignment = true;
+ bitpos += ptr_bitpos & ~(align - 1);
+ align = MAX (ptr_align, align);
}
- else if (TREE_CODE (addr) == ADDR_EXPR)
- align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0)));
+
if (TMR_OFFSET (exp))
bitpos += TREE_INT_CST_LOW (TMR_OFFSET (exp)) * BITS_PER_UNIT;
if (TMR_INDEX (exp) && TMR_STEP (exp))
{
unsigned HOST_WIDE_INT step = TREE_INT_CST_LOW (TMR_STEP (exp));
align = MIN (align, (step & -step) * BITS_PER_UNIT);
+ known_alignment = true;
}
else if (TMR_INDEX (exp))
- align = BITS_PER_UNIT;
+ known_alignment = false;
+
if (TMR_INDEX2 (exp))
- align = BITS_PER_UNIT;
+ known_alignment = false;
}
- else
- align = BITS_PER_UNIT;
/* If there is a non-constant offset part extract the maximum
alignment that can prevail. */
@@ -418,19 +435,27 @@ get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp)
}
else
{
- inner = MIN (inner, BITS_PER_UNIT);
+ known_alignment = false;
break;
}
offset = next_offset;
}
- /* Alignment is innermost object alignment adjusted by the constant
- and non-constant offset parts. */
- align = MIN (align, inner);
- bitpos = bitpos & (align - 1);
-
+ if (known_alignment)
+ {
+ /* Alignment is innermost object alignment adjusted by the constant
+ and non-constant offset parts. */
+ align = MIN (align, inner);
+ bitpos = bitpos & (align - 1);
+ *alignp = align;
+ }
+ else
+ {
+ bitpos = bitpos & (BITS_PER_UNIT - 1);
+ *alignp = BITS_PER_UNIT;
+ }
*bitposp = bitpos;
- return align;
+ return known_alignment;
}
/* Return the alignment in bits of EXP, an object. */
@@ -441,14 +466,13 @@ get_object_alignment (tree exp)
unsigned HOST_WIDE_INT bitpos = 0;
unsigned int align;
- align = get_object_alignment_1 (exp, &bitpos);
+ get_object_alignment_1 (exp, &align, &bitpos);
/* align and bitpos now specify known low bits of the pointer.
ptr & (align - 1) == bitpos. */
if (bitpos != 0)
align = (bitpos & -bitpos);
-
return align;
}
@@ -465,45 +489,57 @@ unsigned int
get_object_or_type_alignment (tree exp)
{
unsigned HOST_WIDE_INT misalign;
- unsigned int align = get_object_alignment_1 (exp, &misalign);
+ unsigned int align;
+ bool known_alignment;
gcc_assert (TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == TARGET_MEM_REF);
-
+ known_alignment = get_object_alignment_1 (exp, &align, &misalign);
if (misalign != 0)
align = (misalign & -misalign);
- else
- align = MAX (TYPE_ALIGN (TREE_TYPE (exp)), align);
+ else if (!known_alignment)
+ align = TYPE_ALIGN (TREE_TYPE (exp));
return align;
}
-/* For a pointer valued expression EXP compute values M and N such that
- M divides (EXP - N) and such that N < M. Store N in *BITPOSP and return M.
+/* For a pointer valued expression EXP compute values M and N such that M
+ divides (EXP - N) and such that N < M. If these numbers can be determined,
+ store M in alignp and N in *BITPOSP and return true. Otherwise return false
+ and store BITS_PER_UNIT to *alignp and any bit-offset to *bitposp.
- If EXP is not a pointer, 0 is returned. */
+ If EXP is not a pointer, false is returned too. */
-unsigned int
-get_pointer_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp)
+bool
+get_pointer_alignment_1 (tree exp, unsigned int *alignp,
+ unsigned HOST_WIDE_INT *bitposp)
{
STRIP_NOPS (exp);
if (TREE_CODE (exp) == ADDR_EXPR)
- return get_object_alignment_1 (TREE_OPERAND (exp, 0), bitposp);
+ return get_object_alignment_1 (TREE_OPERAND (exp, 0), alignp, bitposp);
else if (TREE_CODE (exp) == SSA_NAME
&& POINTER_TYPE_P (TREE_TYPE (exp)))
{
+ unsigned int ptr_align, ptr_misalign;
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (exp);
- if (!pi)
+
+ if (pi && get_ptr_info_alignment (pi, &ptr_align, &ptr_misalign))
+ {
+ *bitposp = ptr_misalign * BITS_PER_UNIT;
+ *alignp = ptr_align * BITS_PER_UNIT;
+ return true;
+ }
+ else
{
*bitposp = 0;
- return BITS_PER_UNIT;
+ *alignp = BITS_PER_UNIT;
+ return false;
}
- *bitposp = pi->misalign * BITS_PER_UNIT;
- return pi->align * BITS_PER_UNIT;
}
*bitposp = 0;
- return POINTER_TYPE_P (TREE_TYPE (exp)) ? BITS_PER_UNIT : 0;
+ *alignp = BITS_PER_UNIT;
+ return false;
}
/* Return the alignment in bits of EXP, a pointer valued expression.
@@ -518,8 +554,8 @@ get_pointer_alignment (tree exp)
{
unsigned HOST_WIDE_INT bitpos = 0;
unsigned int align;
-
- align = get_pointer_alignment_1 (exp, &bitpos);
+
+ get_pointer_alignment_1 (exp, &align, &bitpos);
/* align and bitpos now specify known low bits of the pointer.
ptr & (align - 1) == bitpos. */
@@ -808,7 +844,7 @@ expand_builtin_return_addr (enum built_in_function fndecl_code, int count)
tem = RETURN_ADDR_RTX (count, tem);
#else
tem = memory_address (Pmode,
- plus_constant (tem, GET_MODE_SIZE (Pmode)));
+ plus_constant (Pmode, tem, GET_MODE_SIZE (Pmode)));
tem = gen_frame_mem (Pmode, tem);
#endif
return tem;
@@ -843,14 +879,15 @@ expand_builtin_setjmp_setup (rtx buf_addr, rtx receiver_label)
set_mem_alias_set (mem, setjmp_alias_set);
emit_move_insn (mem, targetm.builtin_setjmp_frame_value ());
- mem = gen_rtx_MEM (Pmode, plus_constant (buf_addr, GET_MODE_SIZE (Pmode))),
+ mem = gen_rtx_MEM (Pmode, plus_constant (Pmode, buf_addr,
+ GET_MODE_SIZE (Pmode))),
set_mem_alias_set (mem, setjmp_alias_set);
emit_move_insn (validize_mem (mem),
force_reg (Pmode, gen_rtx_LABEL_REF (Pmode, receiver_label)));
stack_save = gen_rtx_MEM (sa_mode,
- plus_constant (buf_addr,
+ plus_constant (Pmode, buf_addr,
2 * GET_MODE_SIZE (Pmode)));
set_mem_alias_set (stack_save, setjmp_alias_set);
emit_stack_save (SAVE_NONLOCAL, &stack_save);
@@ -971,10 +1008,10 @@ expand_builtin_longjmp (rtx buf_addr, rtx value)
#endif
{
fp = gen_rtx_MEM (Pmode, buf_addr);
- lab = gen_rtx_MEM (Pmode, plus_constant (buf_addr,
+ lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, buf_addr,
GET_MODE_SIZE (Pmode)));
- stack = gen_rtx_MEM (sa_mode, plus_constant (buf_addr,
+ stack = gen_rtx_MEM (sa_mode, plus_constant (Pmode, buf_addr,
2 * GET_MODE_SIZE (Pmode)));
set_mem_alias_set (fp, setjmp_alias_set);
set_mem_alias_set (lab, setjmp_alias_set);
@@ -1048,7 +1085,8 @@ expand_builtin_nonlocal_goto (tree exp)
r_save_area = copy_to_reg (r_save_area);
r_fp = gen_rtx_MEM (Pmode, r_save_area);
r_sp = gen_rtx_MEM (STACK_SAVEAREA_MODE (SAVE_NONLOCAL),
- plus_constant (r_save_area, GET_MODE_SIZE (Pmode)));
+ plus_constant (Pmode, r_save_area,
+ GET_MODE_SIZE (Pmode)));
crtl->has_nonlocal_goto = 1;
@@ -1118,7 +1156,8 @@ expand_builtin_update_setjmp_buf (rtx buf_addr)
= gen_rtx_MEM (sa_mode,
memory_address
(sa_mode,
- plus_constant (buf_addr, 2 * GET_MODE_SIZE (Pmode))));
+ plus_constant (Pmode, buf_addr,
+ 2 * GET_MODE_SIZE (Pmode))));
emit_stack_save (SAVE_NONLOCAL, &stack_save);
}
@@ -1503,7 +1542,7 @@ expand_builtin_apply_args_1 (void)
as we might have pretended they were passed. Make sure it's a valid
operand, as emit_move_insn isn't expected to handle a PLUS. */
tem
- = force_operand (plus_constant (tem, crtl->args.pretend_args_size),
+ = force_operand (plus_constant (Pmode, tem, crtl->args.pretend_args_size),
NULL_RTX);
#endif
emit_move_insn (adjust_address (registers, Pmode, 0), tem);
@@ -1624,7 +1663,7 @@ expand_builtin_apply (rtx function, rtx arguments, rtx argsize)
dest = virtual_outgoing_args_rtx;
#ifndef STACK_GROWS_DOWNWARD
if (CONST_INT_P (argsize))
- dest = plus_constant (dest, -INTVAL (argsize));
+ dest = plus_constant (Pmode, dest, -INTVAL (argsize));
else
dest = gen_rtx_PLUS (Pmode, dest, negate_rtx (Pmode, argsize));
#endif
@@ -3316,7 +3355,8 @@ expand_movstr (tree dest, tree src, rtx target, int endp)
adjust it. */
if (endp == 1)
{
- rtx tem = plus_constant (gen_lowpart (GET_MODE (target), target), 1);
+ rtx tem = plus_constant (GET_MODE (target),
+ gen_lowpart (GET_MODE (target), target), 1);
emit_move_insn (target, force_operand (tem, NULL_RTX));
}
}
@@ -3415,7 +3455,7 @@ expand_builtin_stpcpy (tree exp, rtx target, enum machine_mode mode)
if (GET_MODE (target) != GET_MODE (ret))
ret = gen_lowpart (GET_MODE (target), ret);
- ret = plus_constant (ret, INTVAL (len_rtx));
+ ret = plus_constant (GET_MODE (ret), ret, INTVAL (len_rtx));
ret = emit_move_insn (target, force_operand (ret, NULL_RTX));
gcc_assert (ret);
@@ -5338,6 +5378,7 @@ static enum memmodel
get_memmodel (tree exp)
{
rtx op;
+ unsigned HOST_WIDE_INT val;
/* If the parameter is not a constant, it's a run time value so we'll just
convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking. */
@@ -5345,13 +5386,25 @@ get_memmodel (tree exp)
return MEMMODEL_SEQ_CST;
op = expand_normal (exp);
- if (INTVAL (op) < 0 || INTVAL (op) >= MEMMODEL_LAST)
+
+ val = INTVAL (op);
+ if (targetm.memmodel_check)
+ val = targetm.memmodel_check (val);
+ else if (val & ~MEMMODEL_MASK)
+ {
+ warning (OPT_Winvalid_memory_model,
+ "Unknown architecture specifier in memory model to builtin.");
+ return MEMMODEL_SEQ_CST;
+ }
+
+ if ((INTVAL(op) & MEMMODEL_MASK) >= MEMMODEL_LAST)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model argument to builtin");
return MEMMODEL_SEQ_CST;
}
- return (enum memmodel) INTVAL (op);
+
+ return (enum memmodel) val;
}
/* Expand the __atomic_exchange intrinsic:
@@ -5366,7 +5419,7 @@ expand_builtin_atomic_exchange (enum machine_mode mode, tree exp, rtx target)
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 2));
- if (model == MEMMODEL_CONSUME)
+ if ((model & MEMMODEL_MASK) == MEMMODEL_CONSUME)
{
error ("invalid memory model for %<__atomic_exchange%>");
return NULL_RTX;
@@ -5402,7 +5455,8 @@ expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp,
success = get_memmodel (CALL_EXPR_ARG (exp, 4));
failure = get_memmodel (CALL_EXPR_ARG (exp, 5));
- if (failure == MEMMODEL_RELEASE || failure == MEMMODEL_ACQ_REL)
+ if ((failure & MEMMODEL_MASK) == MEMMODEL_RELEASE
+ || (failure & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
error ("invalid failure memory model for %<__atomic_compare_exchange%>");
return NULL_RTX;
@@ -5453,8 +5507,8 @@ expand_builtin_atomic_load (enum machine_mode mode, tree exp, rtx target)
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
- if (model == MEMMODEL_RELEASE
- || model == MEMMODEL_ACQ_REL)
+ if ((model & MEMMODEL_MASK) == MEMMODEL_RELEASE
+ || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
error ("invalid memory model for %<__atomic_load%>");
return NULL_RTX;
@@ -5482,9 +5536,9 @@ expand_builtin_atomic_store (enum machine_mode mode, tree exp)
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 2));
- if (model != MEMMODEL_RELAXED
- && model != MEMMODEL_SEQ_CST
- && model != MEMMODEL_RELEASE)
+ if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED
+ && (model & MEMMODEL_MASK) != MEMMODEL_SEQ_CST
+ && (model & MEMMODEL_MASK) != MEMMODEL_RELEASE)
{
error ("invalid memory model for %<__atomic_store%>");
return NULL_RTX;
@@ -5590,7 +5644,8 @@ expand_builtin_atomic_clear (tree exp)
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
- if (model == MEMMODEL_ACQUIRE || model == MEMMODEL_ACQ_REL)
+ if ((model & MEMMODEL_MASK) == MEMMODEL_ACQUIRE
+ || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
error ("invalid memory model for %<__atomic_store%>");
return const0_rtx;
diff --git a/gcc/c-decl.c b/gcc/c-decl.c
index 158b3ad0f19..3153cf4e183 100644
--- a/gcc/c-decl.c
+++ b/gcc/c-decl.c
@@ -5811,12 +5811,12 @@ grokdeclarator (const struct c_declarator *declarator,
}
}
- /* Did array size calculations overflow? */
-
+ /* Did array size calculations overflow or does the array cover more
+ than half of the address-space? */
if (TREE_CODE (type) == ARRAY_TYPE
&& COMPLETE_TYPE_P (type)
&& TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST
- && TREE_OVERFLOW (TYPE_SIZE_UNIT (type)))
+ && ! valid_constant_size_p (TYPE_SIZE_UNIT (type)))
{
if (name)
error_at (loc, "size of array %qE is too large", name);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 7bd6ad7afa6..a01f3eaef45 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,35 @@
+2012-05-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53158
+ * c-common.c (warnings_for_convert_and_check): Use warning_at.
+
+2012-05-10 Richard Guenther <rguenther@suse.de>
+
+ * c-common.c (c_sizeof_or_alignof_type): Remove assert and
+ adjust commentary about TYPE_IS_SIZETYPE types.
+
+2012-05-09 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c++/53261
+ * c-common.c (warn_logical_operator): Check that argument of
+ integer_zerop is not NULL.
+
+2012-05-05 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c/43772
+ * c-common.c (warn_logical_operator): Do not warn if either side
+ is already true or false.
+
+2012-05-04 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c/51712
+ * c-common.c (expr_original_type): New.
+ (shorten_compare): Do not warn for enumeration types.
+
+2012-05-03 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ * c.opt (fpermissive): Add Var(flag_permissive).
+
2012-04-30 Marc Glisse <marc.glisse@inria.fr>
PR c++/51033
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index dce390260cd..ad988286ee6 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -1612,31 +1612,50 @@ warn_logical_operator (location_t location, enum tree_code code, tree type,
|| INTEGRAL_TYPE_P (TREE_TYPE (op_right))))
return;
- lhs = make_range (op_left, &in0_p, &low0, &high0, &strict_overflow_p);
- rhs = make_range (op_right, &in1_p, &low1, &high1, &strict_overflow_p);
- if (lhs && TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
+ /* We first test whether either side separately is trivially true
+ (with OR) or trivially false (with AND). If so, do not warn.
+ This is a common idiom for testing ranges of data types in
+ portable code. */
+ lhs = make_range (op_left, &in0_p, &low0, &high0, &strict_overflow_p);
+ if (!lhs)
+ return;
+ if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
lhs = C_MAYBE_CONST_EXPR_EXPR (lhs);
- if (rhs && TREE_CODE (rhs) == C_MAYBE_CONST_EXPR)
+ /* If this is an OR operation, invert both sides; now, the result
+ should be always false to get a warning. */
+ if (or_op)
+ in0_p = !in0_p;
+
+ tem = build_range_check (UNKNOWN_LOCATION, type, lhs, in0_p, low0, high0);
+ if (tem && integer_zerop (tem))
+ return;
+
+ rhs = make_range (op_right, &in1_p, &low1, &high1, &strict_overflow_p);
+ if (!rhs)
+ return;
+ if (TREE_CODE (rhs) == C_MAYBE_CONST_EXPR)
rhs = C_MAYBE_CONST_EXPR_EXPR (rhs);
- /* If this is an OR operation, invert both sides; we will invert
- again at the end. */
+ /* If this is an OR operation, invert both sides; now, the result
+ should be always false to get a warning. */
if (or_op)
- in0_p = !in0_p, in1_p = !in1_p;
+ in1_p = !in1_p;
+
+ tem = build_range_check (UNKNOWN_LOCATION, type, rhs, in1_p, low1, high1);
+ if (tem && integer_zerop (tem))
+ return;
- /* If both expressions are the same, if we can merge the ranges, and we
- can build the range test, return it or it inverted. */
- if (lhs && rhs && operand_equal_p (lhs, rhs, 0)
+ /* If both expressions have the same operand, if we can merge the
+ ranges, and if the range test is always false, then warn. */
+ if (operand_equal_p (lhs, rhs, 0)
&& merge_ranges (&in_p, &low, &high, in0_p, low0, high0,
in1_p, low1, high1)
&& 0 != (tem = build_range_check (UNKNOWN_LOCATION,
- type, lhs, in_p, low, high)))
+ type, lhs, in_p, low, high))
+ && integer_zerop (tem))
{
- if (TREE_CODE (tem) != INTEGER_CST)
- return;
-
if (or_op)
warning_at (location, OPT_Wlogical_op,
"logical %<or%> "
@@ -2310,6 +2329,8 @@ conversion_warning (tree type, tree expr)
void
warnings_for_convert_and_check (tree type, tree expr, tree result)
{
+ location_t loc = EXPR_LOC_OR_HERE (expr);
+
if (TREE_CODE (expr) == INTEGER_CST
&& (TREE_CODE (type) == INTEGER_TYPE
|| TREE_CODE (type) == ENUMERAL_TYPE)
@@ -2325,8 +2346,8 @@ warnings_for_convert_and_check (tree type, tree expr, tree result)
/* This detects cases like converting -129 or 256 to
unsigned char. */
if (!int_fits_type_p (expr, c_common_signed_type (type)))
- warning (OPT_Woverflow,
- "large integer implicitly truncated to unsigned type");
+ warning_at (loc, OPT_Woverflow,
+ "large integer implicitly truncated to unsigned type");
else
conversion_warning (type, expr);
}
@@ -2338,16 +2359,16 @@ warnings_for_convert_and_check (tree type, tree expr, tree result)
&& (TREE_CODE (TREE_TYPE (expr)) != INTEGER_TYPE
|| TYPE_PRECISION (TREE_TYPE (expr))
!= TYPE_PRECISION (type)))
- warning (OPT_Woverflow,
- "overflow in implicit constant conversion");
+ warning_at (loc, OPT_Woverflow,
+ "overflow in implicit constant conversion");
else
conversion_warning (type, expr);
}
else if ((TREE_CODE (result) == INTEGER_CST
|| TREE_CODE (result) == FIXED_CST) && TREE_OVERFLOW (result))
- warning (OPT_Woverflow,
- "overflow in implicit constant conversion");
+ warning_at (loc, OPT_Woverflow,
+ "overflow in implicit constant conversion");
else
conversion_warning (type, expr);
}
@@ -3481,6 +3502,15 @@ binary_op_error (location_t location, enum tree_code code,
type0, type1);
}
+/* Given an expression as a tree, return its original type. Do this
+ by stripping any conversion that preserves the sign and precision. */
+static tree
+expr_original_type (tree expr)
+{
+ STRIP_SIGN_NOPS (expr);
+ return TREE_TYPE (expr);
+}
+
/* Subroutine of build_binary_op, used for comparison operations.
See if the operands have both been converted from subword integer types
and, if so, perhaps change them both back to their original type.
@@ -3506,6 +3536,7 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
int real1, real2;
tree primop0, primop1;
enum tree_code code = *rescode_ptr;
+ location_t loc = EXPR_LOC_OR_HERE (op0);
/* Throw away any conversions to wider types
already present in the operands. */
@@ -3726,9 +3757,11 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
if (TREE_CODE (primop0) != INTEGER_CST)
{
if (val == truthvalue_false_node)
- warning (OPT_Wtype_limits, "comparison is always false due to limited range of data type");
+ warning_at (loc, OPT_Wtype_limits,
+ "comparison is always false due to limited range of data type");
if (val == truthvalue_true_node)
- warning (OPT_Wtype_limits, "comparison is always true due to limited range of data type");
+ warning_at (loc, OPT_Wtype_limits,
+ "comparison is always true due to limited range of data type");
}
if (val != 0)
@@ -3795,29 +3828,31 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
&& TYPE_UNSIGNED (*restype_ptr))
{
tree value = 0;
+ /* All unsigned values are >= 0, so we warn. However,
+ if OP0 is a constant that is >= 0, the signedness of
+ the comparison isn't an issue, so suppress the
+ warning. */
+ bool warn =
+ warn_type_limits && !in_system_header
+ && !(TREE_CODE (primop0) == INTEGER_CST
+ && !TREE_OVERFLOW (convert (c_common_signed_type (type),
+ primop0)))
+ /* Do not warn for enumeration types. */
+ && (TREE_CODE (expr_original_type (primop0)) != ENUMERAL_TYPE);
+
switch (code)
{
case GE_EXPR:
- /* All unsigned values are >= 0, so we warn. However,
- if OP0 is a constant that is >= 0, the signedness of
- the comparison isn't an issue, so suppress the
- warning. */
- if (warn_type_limits && !in_system_header
- && !(TREE_CODE (primop0) == INTEGER_CST
- && !TREE_OVERFLOW (convert (c_common_signed_type (type),
- primop0))))
- warning (OPT_Wtype_limits,
- "comparison of unsigned expression >= 0 is always true");
+ if (warn)
+ warning_at (loc, OPT_Wtype_limits,
+ "comparison of unsigned expression >= 0 is always true");
value = truthvalue_true_node;
break;
case LT_EXPR:
- if (warn_type_limits && !in_system_header
- && !(TREE_CODE (primop0) == INTEGER_CST
- && !TREE_OVERFLOW (convert (c_common_signed_type (type),
- primop0))))
- warning (OPT_Wtype_limits,
- "comparison of unsigned expression < 0 is always false");
+ if (warn)
+ warning_at (loc, OPT_Wtype_limits,
+ "comparison of unsigned expression < 0 is always false");
value = truthvalue_false_node;
break;
@@ -4506,12 +4541,10 @@ c_sizeof_or_alignof_type (location_t loc,
value = size_int (TYPE_ALIGN_UNIT (type));
}
- /* VALUE will have an integer type with TYPE_IS_SIZETYPE set.
- TYPE_IS_SIZETYPE means that certain things (like overflow) will
- never happen. However, this node should really have type
- `size_t', which is just a typedef for an ordinary integer type. */
+ /* VALUE will have the middle-end integer type sizetype.
+ However, we should really return a value of type `size_t',
+ which is just a typedef for an ordinary integer type. */
value = fold_convert_loc (loc, size_type_node, value);
- gcc_assert (!TYPE_IS_SIZETYPE (TREE_TYPE (value)));
return value;
}
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
index d2cf6ee6caa..e0c166c8d48 100644
--- a/gcc/c-family/c.opt
+++ b/gcc/c-family/c.opt
@@ -982,7 +982,7 @@ C ObjC C++ ObjC++
Look for and use PCH files even when preprocessing
fpermissive
-C++ ObjC++
+C++ ObjC++ Var(flag_permissive)
Downgrade conformance errors to warnings
fplan9-extensions
diff --git a/gcc/c-tree.h b/gcc/c-tree.h
index db60935d7de..468cfe4a278 100644
--- a/gcc/c-tree.h
+++ b/gcc/c-tree.h
@@ -621,10 +621,6 @@ extern int current_function_returns_null;
extern int current_function_returns_abnormally;
-/* Nonzero means we are reading code that came from a system header file. */
-
-extern int system_header_p;
-
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
diff --git a/gcc/calls.c b/gcc/calls.c
index 8c1e0797e44..a01c4233357 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -868,6 +868,7 @@ save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_sa
int num_to_save;
enum machine_mode save_mode;
int delta;
+ rtx addr;
rtx stack_area;
rtx save_area;
@@ -891,10 +892,8 @@ save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_sa
#else
delta = low;
#endif
- stack_area = gen_rtx_MEM (save_mode,
- memory_address (save_mode,
- plus_constant (argblock,
- delta)));
+ addr = plus_constant (Pmode, argblock, delta);
+ stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, addr));
set_mem_align (stack_area, PARM_BOUNDARY);
if (save_mode == BLKmode)
@@ -920,16 +919,15 @@ restore_fixed_argument_area (rtx save_area, rtx argblock, int high_to_save, int
{
enum machine_mode save_mode = GET_MODE (save_area);
int delta;
- rtx stack_area;
+ rtx addr, stack_area;
#ifdef ARGS_GROW_DOWNWARD
delta = -high_to_save;
#else
delta = low_to_save;
#endif
- stack_area = gen_rtx_MEM (save_mode,
- memory_address (save_mode,
- plus_constant (argblock, delta)));
+ addr = plus_constant (Pmode, argblock, delta);
+ stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, addr));
set_mem_align (stack_area, PARM_BOUNDARY);
if (save_mode != BLKmode)
@@ -1560,11 +1558,11 @@ compute_argument_addresses (struct arg_data *args, rtx argblock, int num_actuals
continue;
if (CONST_INT_P (offset))
- addr = plus_constant (arg_reg, INTVAL (offset));
+ addr = plus_constant (Pmode, arg_reg, INTVAL (offset));
else
addr = gen_rtx_PLUS (Pmode, arg_reg, offset);
- addr = plus_constant (addr, arg_offset);
+ addr = plus_constant (Pmode, addr, arg_offset);
if (args[i].partial != 0)
{
@@ -1594,11 +1592,11 @@ compute_argument_addresses (struct arg_data *args, rtx argblock, int num_actuals
set_mem_align (args[i].stack, align);
if (CONST_INT_P (slot_offset))
- addr = plus_constant (arg_reg, INTVAL (slot_offset));
+ addr = plus_constant (Pmode, arg_reg, INTVAL (slot_offset));
else
addr = gen_rtx_PLUS (Pmode, arg_reg, slot_offset);
- addr = plus_constant (addr, arg_offset);
+ addr = plus_constant (Pmode, addr, arg_offset);
if (args[i].partial != 0)
{
@@ -1759,7 +1757,7 @@ internal_arg_pointer_based_exp (rtx rtl, bool toplevel)
rtx val = internal_arg_pointer_based_exp (XEXP (rtl, 0), toplevel);
if (val == NULL_RTX || val == pc_rtx)
return val;
- return plus_constant (val, INTVAL (XEXP (rtl, 1)));
+ return plus_constant (Pmode, val, INTVAL (XEXP (rtl, 1)));
}
/* When called at the topmost level, scan pseudo assignments in between the
@@ -2716,9 +2714,9 @@ expand_call (tree exp, rtx target, int ignore)
argblock = crtl->args.internal_arg_pointer;
argblock
#ifdef STACK_GROWS_DOWNWARD
- = plus_constant (argblock, crtl->args.pretend_args_size);
+ = plus_constant (Pmode, argblock, crtl->args.pretend_args_size);
#else
- = plus_constant (argblock, -crtl->args.pretend_args_size);
+ = plus_constant (Pmode, argblock, -crtl->args.pretend_args_size);
#endif
stored_args_map = sbitmap_alloc (args_size.constant);
sbitmap_zero (stored_args_map);
@@ -2853,7 +2851,7 @@ expand_call (tree exp, rtx target, int ignore)
{
argblock = push_block (GEN_INT (needed), 0, 0);
#ifdef ARGS_GROW_DOWNWARD
- argblock = plus_constant (argblock, needed);
+ argblock = plus_constant (Pmode, argblock, needed);
#endif
}
@@ -3890,7 +3888,8 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
use virtuals anyway, they won't match the rtl patterns. */
if (virtuals_instantiated)
- argblock = plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET);
+ argblock = plus_constant (Pmode, stack_pointer_rtx,
+ STACK_POINTER_OFFSET);
else
argblock = virtual_outgoing_args_rtx;
}
@@ -3976,7 +3975,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
enum machine_mode save_mode
= mode_for_size (size, MODE_INT, 1);
rtx adr
- = plus_constant (argblock,
+ = plus_constant (Pmode, argblock,
argvec[argnum].locate.offset.constant);
rtx stack_area
= gen_rtx_MEM (save_mode, memory_address (save_mode, adr));
@@ -4018,7 +4017,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
/* Indicate argument access so that alias.c knows that these
values are live. */
if (argblock)
- use = plus_constant (argblock,
+ use = plus_constant (Pmode, argblock,
argvec[argnum].locate.offset.constant);
else
/* When arguments are pushed, trying to tell alias.c where
@@ -4244,7 +4243,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
if (argvec[count].save_area)
{
enum machine_mode save_mode = GET_MODE (argvec[count].save_area);
- rtx adr = plus_constant (argblock,
+ rtx adr = plus_constant (Pmode, argblock,
argvec[count].locate.offset.constant);
rtx stack_area = gen_rtx_MEM (save_mode,
memory_address (save_mode, adr));
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index d06a9b1a903..e94e7e3fb41 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -2586,21 +2586,21 @@ try_optimize_cfg (int mode)
if (current_ir_type () == IR_RTL_CFGLAYOUT)
{
- if (b->il.rtl->footer
- && BARRIER_P (b->il.rtl->footer))
+ if (BB_FOOTER (b)
+ && BARRIER_P (BB_FOOTER (b)))
FOR_EACH_EDGE (e, ei, b->preds)
if ((e->flags & EDGE_FALLTHRU)
- && e->src->il.rtl->footer == NULL)
+ && BB_FOOTER (e->src) == NULL)
{
- if (b->il.rtl->footer)
+ if (BB_FOOTER (b))
{
- e->src->il.rtl->footer = b->il.rtl->footer;
- b->il.rtl->footer = NULL;
+ BB_FOOTER (e->src) = BB_FOOTER (b);
+ BB_FOOTER (b) = NULL;
}
else
{
start_sequence ();
- e->src->il.rtl->footer = emit_barrier ();
+ BB_FOOTER (e->src) = emit_barrier ();
end_sequence ();
}
}
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 9de3e51c673..126b132eb05 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -873,7 +873,7 @@ expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
/* If this fails, we've overflowed the stack frame. Error nicely? */
gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
- x = plus_constant (base, offset);
+ x = plus_constant (Pmode, base, offset);
x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x);
if (TREE_CODE (decl) != SSA_NAME)
@@ -1241,8 +1241,9 @@ expand_one_var (tree var, bool toplevel, bool really_expand)
if (really_expand)
expand_one_register_var (origvar);
}
- else if (!host_integerp (DECL_SIZE_UNIT (var), 1))
+ else if (! valid_constant_size_p (DECL_SIZE_UNIT (var)))
{
+ /* Reject variables which cover more than half of the address-space. */
if (really_expand)
{
error ("size of variable %q+D is too large", var);
@@ -2835,6 +2836,7 @@ expand_debug_expr (tree exp)
}
/* FALLTHROUGH */
case INDIRECT_REF:
+ inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
if (!op0)
return NULL;
@@ -2852,7 +2854,7 @@ expand_debug_expr (tree exp)
if (!op1 || !CONST_INT_P (op1))
return NULL;
- op0 = plus_constant (op0, INTVAL (op1));
+ op0 = plus_constant (inner_mode, op0, INTVAL (op1));
}
if (POINTER_TYPE_P (TREE_TYPE (exp)))
@@ -3346,8 +3348,10 @@ expand_debug_expr (tree exp)
&& (bitoffset % BITS_PER_UNIT) == 0
&& bitsize > 0
&& bitsize == maxsize)
- return plus_constant (gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl),
- bitoffset / BITS_PER_UNIT);
+ {
+ rtx base = gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl);
+ return plus_constant (mode, base, bitoffset / BITS_PER_UNIT);
+ }
}
return NULL;
@@ -3729,7 +3733,8 @@ expand_gimple_basic_block (basic_block bb)
block to be in GIMPLE, instead of RTL. Therefore, we need to
access the BB sequence directly. */
stmts = bb_seq (bb);
- bb->il.gimple = NULL;
+ bb->il.gimple.seq = NULL;
+ bb->il.gimple.phi_nodes = NULL;
rtl_profile_for_bb (bb);
init_rtl_bb_info (bb);
bb->flags |= BB_RTL;
diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c
index c6e1f8324d8..2a5448c7083 100644
--- a/gcc/cfglayout.c
+++ b/gcc/cfglayout.c
@@ -208,11 +208,11 @@ record_effective_endpoints (void)
rtx end;
if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
- bb->il.rtl->header = unlink_insn_chain (next_insn,
+ BB_HEADER (bb) = unlink_insn_chain (next_insn,
PREV_INSN (BB_HEAD (bb)));
end = skip_insns_after_block (bb);
if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
- bb->il.rtl->footer = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
+ BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
next_insn = NEXT_INSN (BB_END (bb));
}
@@ -633,9 +633,8 @@ reemit_insn_block_notes (void)
/* Link the basic blocks in the correct order, compacting the basic
- block queue while at it. This also clears the visited flag on
- all basic blocks. If STAY_IN_CFGLAYOUT_MODE is false, this function
- also clears the basic block header and footer fields.
+ block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
+ function also clears the basic block header and footer fields.
This function is usually called after a pass (e.g. tracer) finishes
some transformations while in cfglayout mode. The required sequence
@@ -681,13 +680,12 @@ relink_block_chain (bool stay_in_cfglayout_mode)
prev_bb->next_bb = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->prev_bb = prev_bb;
- /* Then, clean up the aux and visited fields. */
+ /* Then, clean up the aux fields. */
FOR_ALL_BB (bb)
{
bb->aux = NULL;
- bb->il.rtl->visited = 0;
if (!stay_in_cfglayout_mode)
- bb->il.rtl->header = bb->il.rtl->footer = NULL;
+ BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
}
/* Maybe reset the original copy tables, they are not valid anymore
@@ -723,14 +721,14 @@ fixup_reorder_chain (void)
for (bb = ENTRY_BLOCK_PTR->next_bb; bb; bb = (basic_block) bb->aux)
{
- if (bb->il.rtl->header)
+ if (BB_HEADER (bb))
{
if (insn)
- NEXT_INSN (insn) = bb->il.rtl->header;
+ NEXT_INSN (insn) = BB_HEADER (bb);
else
- set_first_insn (bb->il.rtl->header);
- PREV_INSN (bb->il.rtl->header) = insn;
- insn = bb->il.rtl->header;
+ set_first_insn (BB_HEADER (bb));
+ PREV_INSN (BB_HEADER (bb)) = insn;
+ insn = BB_HEADER (bb);
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
}
@@ -740,10 +738,10 @@ fixup_reorder_chain (void)
set_first_insn (BB_HEAD (bb));
PREV_INSN (BB_HEAD (bb)) = insn;
insn = BB_END (bb);
- if (bb->il.rtl->footer)
+ if (BB_FOOTER (bb))
{
- NEXT_INSN (insn) = bb->il.rtl->footer;
- PREV_INSN (bb->il.rtl->footer) = insn;
+ NEXT_INSN (insn) = BB_FOOTER (bb);
+ PREV_INSN (BB_FOOTER (bb)) = insn;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
}
@@ -799,7 +797,7 @@ fixup_reorder_chain (void)
{
gcc_assert (!onlyjump_p (bb_end_insn)
|| returnjump_p (bb_end_insn));
- bb->il.rtl->footer = emit_barrier_after (bb_end_insn);
+ BB_FOOTER (bb) = emit_barrier_after (bb_end_insn);
continue;
}
@@ -908,7 +906,6 @@ fixup_reorder_chain (void)
nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
if (nb)
{
- nb->il.rtl->visited = 1;
nb->aux = bb->aux;
bb->aux = nb;
/* Don't process this new block. */
@@ -1062,8 +1059,8 @@ fixup_fallthru_exit_predecessor (void)
bb = split_block (bb, NULL)->dest;
bb->aux = c->aux;
c->aux = bb;
- bb->il.rtl->footer = c->il.rtl->footer;
- c->il.rtl->footer = NULL;
+ BB_FOOTER (bb) = BB_FOOTER (c);
+ BB_FOOTER (c) = NULL;
}
while (c->aux != bb)
@@ -1272,24 +1269,24 @@ cfg_layout_duplicate_bb (basic_block bb)
EXIT_BLOCK_PTR->prev_bb);
BB_COPY_PARTITION (new_bb, bb);
- if (bb->il.rtl->header)
+ if (BB_HEADER (bb))
{
- insn = bb->il.rtl->header;
+ insn = BB_HEADER (bb);
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
- insn = duplicate_insn_chain (bb->il.rtl->header, insn);
+ insn = duplicate_insn_chain (BB_HEADER (bb), insn);
if (insn)
- new_bb->il.rtl->header = unlink_insn_chain (insn, get_last_insn ());
+ BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
}
- if (bb->il.rtl->footer)
+ if (BB_FOOTER (bb))
{
- insn = bb->il.rtl->footer;
+ insn = BB_FOOTER (bb);
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
- insn = duplicate_insn_chain (bb->il.rtl->footer, insn);
+ insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
if (insn)
- new_bb->il.rtl->footer = unlink_insn_chain (insn, get_last_insn ());
+ BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
}
return new_bb;
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index e3ffc9c656e..4e1ec8644d1 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -837,7 +837,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
/* Selectively unlink whole insn chain. */
if (in_cfglayout)
{
- rtx insn = src->il.rtl->footer;
+ rtx insn = BB_FOOTER (src);
delete_insn_chain (kill_from, BB_END (src), false);
@@ -849,7 +849,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
- src->il.rtl->footer = NEXT_INSN (insn);
+ BB_FOOTER (src) = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
@@ -1857,7 +1857,7 @@ rtl_verify_flow_info_1 (void)
err = 1;
}
- for (insn = bb->il.rtl->header; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
if (!BARRIER_P (insn)
&& BLOCK_FOR_INSN (insn) != NULL)
{
@@ -1865,7 +1865,7 @@ rtl_verify_flow_info_1 (void)
INSN_UID (insn), bb->index);
err = 1;
}
- for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
if (!BARRIER_P (insn)
&& BLOCK_FOR_INSN (insn) != NULL)
{
@@ -2597,8 +2597,8 @@ cfg_layout_split_block (basic_block bb, void *insnp)
rtx insn = (rtx) insnp;
basic_block new_bb = rtl_split_block (bb, insn);
- new_bb->il.rtl->footer = bb->il.rtl->footer;
- bb->il.rtl->footer = NULL;
+ BB_FOOTER (new_bb) = BB_FOOTER (bb);
+ BB_FOOTER (bb) = NULL;
return new_bb;
}
@@ -2703,24 +2703,24 @@ cfg_layout_delete_block (basic_block bb)
{
rtx insn, next, prev = PREV_INSN (BB_HEAD (bb)), *to, remaints;
- if (bb->il.rtl->header)
+ if (BB_HEADER (bb))
{
next = BB_HEAD (bb);
if (prev)
- NEXT_INSN (prev) = bb->il.rtl->header;
+ NEXT_INSN (prev) = BB_HEADER (bb);
else
- set_first_insn (bb->il.rtl->header);
- PREV_INSN (bb->il.rtl->header) = prev;
- insn = bb->il.rtl->header;
+ set_first_insn (BB_HEADER (bb));
+ PREV_INSN (BB_HEADER (bb)) = prev;
+ insn = BB_HEADER (bb);
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
PREV_INSN (next) = insn;
}
next = NEXT_INSN (BB_END (bb));
- if (bb->il.rtl->footer)
+ if (BB_FOOTER (bb))
{
- insn = bb->il.rtl->footer;
+ insn = BB_FOOTER (bb);
while (insn)
{
if (BARRIER_P (insn))
@@ -2728,7 +2728,7 @@ cfg_layout_delete_block (basic_block bb)
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
- bb->il.rtl->footer = NEXT_INSN (insn);
+ BB_FOOTER (bb) = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
@@ -2736,11 +2736,11 @@ cfg_layout_delete_block (basic_block bb)
break;
insn = NEXT_INSN (insn);
}
- if (bb->il.rtl->footer)
+ if (BB_FOOTER (bb))
{
insn = BB_END (bb);
- NEXT_INSN (insn) = bb->il.rtl->footer;
- PREV_INSN (bb->il.rtl->footer) = insn;
+ NEXT_INSN (insn) = BB_FOOTER (bb);
+ PREV_INSN (BB_FOOTER (bb)) = insn;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
@@ -2751,7 +2751,7 @@ cfg_layout_delete_block (basic_block bb)
}
}
if (bb->next_bb != EXIT_BLOCK_PTR)
- to = &bb->next_bb->il.rtl->header;
+ to = &BB_HEADER (bb->next_bb);
else
to = &cfg_layout_function_footer;
@@ -2882,18 +2882,18 @@ cfg_layout_merge_blocks (basic_block a, basic_block b)
}
/* Possible line number notes should appear in between. */
- if (b->il.rtl->header)
+ if (BB_HEADER (b))
{
rtx first = BB_END (a), last;
- last = emit_insn_after_noloc (b->il.rtl->header, BB_END (a), a);
+ last = emit_insn_after_noloc (BB_HEADER (b), BB_END (a), a);
/* The above might add a BARRIER as BB_END, but as barriers
aren't valid parts of a bb, remove_insn doesn't update
BB_END if it is a barrier. So adjust BB_END here. */
while (BB_END (a) != first && BARRIER_P (BB_END (a)))
BB_END (a) = PREV_INSN (BB_END (a));
delete_insn_chain (NEXT_INSN (first), last, false);
- b->il.rtl->header = NULL;
+ BB_HEADER (b) = NULL;
}
/* In the case basic blocks are not adjacent, move them around. */
@@ -2924,20 +2924,20 @@ cfg_layout_merge_blocks (basic_block a, basic_block b)
df_bb_delete (b->index);
/* Possible tablejumps and barriers should appear after the block. */
- if (b->il.rtl->footer)
+ if (BB_FOOTER (b))
{
- if (!a->il.rtl->footer)
- a->il.rtl->footer = b->il.rtl->footer;
+ if (!BB_FOOTER (a))
+ BB_FOOTER (a) = BB_FOOTER (b);
else
{
- rtx last = a->il.rtl->footer;
+ rtx last = BB_FOOTER (a);
while (NEXT_INSN (last))
last = NEXT_INSN (last);
- NEXT_INSN (last) = b->il.rtl->footer;
- PREV_INSN (b->il.rtl->footer) = last;
+ NEXT_INSN (last) = BB_FOOTER (b);
+ PREV_INSN (BB_FOOTER (b)) = last;
}
- b->il.rtl->footer = NULL;
+ BB_FOOTER (b) = NULL;
}
/* If B was a forwarder block, propagate the locus on the edge. */
@@ -3211,8 +3211,9 @@ rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
void
init_rtl_bb_info (basic_block bb)
{
- gcc_assert (!bb->il.rtl);
- bb->il.rtl = ggc_alloc_cleared_rtl_bb_info ();
+ gcc_assert (!bb->il.x.rtl);
+ bb->il.x.head_ = NULL;
+ bb->il.x.rtl = ggc_alloc_cleared_rtl_bb_info ();
}
/* Returns true if it is possible to remove edge E by redirecting
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index c765b3179b5..88ef1f14e93 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -288,7 +288,7 @@ cgraph_remove_edge_duplication_hook (struct cgraph_2edge_hook_list *entry)
}
/* Call all edge duplication hooks. */
-static void
+void
cgraph_call_edge_duplication_hooks (struct cgraph_edge *cs1,
struct cgraph_edge *cs2)
{
@@ -365,8 +365,8 @@ cgraph_allocate_node (void)
/* Allocate new callgraph node and insert it into basic data structures. */
-static struct cgraph_node *
-cgraph_create_node_1 (void)
+struct cgraph_node *
+cgraph_create_empty_node (void)
{
struct cgraph_node *node = cgraph_allocate_node ();
@@ -382,7 +382,7 @@ cgraph_create_node_1 (void)
struct cgraph_node *
cgraph_create_node (tree decl)
{
- struct cgraph_node *node = cgraph_create_node_1 ();
+ struct cgraph_node *node = cgraph_create_empty_node ();
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node->symbol.decl = decl;
@@ -620,97 +620,6 @@ cgraph_set_call_stmt (struct cgraph_edge *e, gimple new_stmt)
cgraph_add_edge_to_call_site_hash (e);
}
-/* Like cgraph_set_call_stmt but walk the clone tree and update all
- clones sharing the same function body. */
-
-void
-cgraph_set_call_stmt_including_clones (struct cgraph_node *orig,
- gimple old_stmt, gimple new_stmt)
-{
- struct cgraph_node *node;
- struct cgraph_edge *edge = cgraph_edge (orig, old_stmt);
-
- if (edge)
- cgraph_set_call_stmt (edge, new_stmt);
-
- node = orig->clones;
- if (node)
- while (node != orig)
- {
- struct cgraph_edge *edge = cgraph_edge (node, old_stmt);
- if (edge)
- cgraph_set_call_stmt (edge, new_stmt);
- if (node->clones)
- node = node->clones;
- else if (node->next_sibling_clone)
- node = node->next_sibling_clone;
- else
- {
- while (node != orig && !node->next_sibling_clone)
- node = node->clone_of;
- if (node != orig)
- node = node->next_sibling_clone;
- }
- }
-}
-
-/* Like cgraph_create_edge walk the clone tree and update all clones sharing
- same function body. If clones already have edge for OLD_STMT; only
- update the edge same way as cgraph_set_call_stmt_including_clones does.
-
- TODO: COUNT and LOOP_DEPTH should be properly distributed based on relative
- frequencies of the clones. */
-
-void
-cgraph_create_edge_including_clones (struct cgraph_node *orig,
- struct cgraph_node *callee,
- gimple old_stmt,
- gimple stmt, gcov_type count,
- int freq,
- cgraph_inline_failed_t reason)
-{
- struct cgraph_node *node;
- struct cgraph_edge *edge;
-
- if (!cgraph_edge (orig, stmt))
- {
- edge = cgraph_create_edge (orig, callee, stmt, count, freq);
- edge->inline_failed = reason;
- }
-
- node = orig->clones;
- if (node)
- while (node != orig)
- {
- struct cgraph_edge *edge = cgraph_edge (node, old_stmt);
-
- /* It is possible that clones already contain the edge while
- master didn't. Either we promoted indirect call into direct
- call in the clone or we are processing clones of unreachable
- master where edges has been removed. */
- if (edge)
- cgraph_set_call_stmt (edge, stmt);
- else if (!cgraph_edge (node, stmt))
- {
- edge = cgraph_create_edge (node, callee, stmt, count,
- freq);
- edge->inline_failed = reason;
- }
-
- if (node->clones)
- node = node->clones;
- else if (node->next_sibling_clone)
- node = node->next_sibling_clone;
- else
- {
- while (node != orig && !node->next_sibling_clone)
- node = node->clone_of;
- if (node != orig)
- node = node->next_sibling_clone;
- }
- }
-}
-
/* Allocate a cgraph_edge structure and fill it with data according to the
parameters of which only CALLEE can be NULL (when creating an indirect call
edge). */
@@ -970,6 +879,87 @@ cgraph_make_edge_direct (struct cgraph_edge *edge, struct cgraph_node *callee)
initialize_inline_failed (edge);
}
+/* If necessary, change the function declaration in the call statement
+ associated with E so that it corresponds to the edge callee. */
+
+gimple
+cgraph_redirect_edge_call_stmt_to_callee (struct cgraph_edge *e)
+{
+ tree decl = gimple_call_fndecl (e->call_stmt);
+ gimple new_stmt;
+ gimple_stmt_iterator gsi;
+#ifdef ENABLE_CHECKING
+ struct cgraph_node *node;
+#endif
+
+ if (e->indirect_unknown_callee
+ || decl == e->callee->symbol.decl)
+ return e->call_stmt;
+
+#ifdef ENABLE_CHECKING
+ if (decl)
+ {
+ node = cgraph_get_node (decl);
+ gcc_assert (!node || !node->clone.combined_args_to_skip);
+ }
+#endif
+
+ if (cgraph_dump_file)
+ {
+ fprintf (cgraph_dump_file, "updating call of %s/%i -> %s/%i: ",
+ xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
+ xstrdup (cgraph_node_name (e->callee)), e->callee->uid);
+ print_gimple_stmt (cgraph_dump_file, e->call_stmt, 0, dump_flags);
+ if (e->callee->clone.combined_args_to_skip)
+ {
+ fprintf (cgraph_dump_file, " combined args to skip: ");
+ dump_bitmap (cgraph_dump_file,
+ e->callee->clone.combined_args_to_skip);
+ }
+ }
+
+ if (e->callee->clone.combined_args_to_skip)
+ {
+ int lp_nr;
+
+ new_stmt
+ = gimple_call_copy_skip_args (e->call_stmt,
+ e->callee->clone.combined_args_to_skip);
+ gimple_call_set_fndecl (new_stmt, e->callee->symbol.decl);
+
+ if (gimple_vdef (new_stmt)
+ && TREE_CODE (gimple_vdef (new_stmt)) == SSA_NAME)
+ SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
+
+ gsi = gsi_for_stmt (e->call_stmt);
+ gsi_replace (&gsi, new_stmt, false);
+ /* We need to defer cleaning EH info on the new statement to
+ fixup-cfg. We may not have dominator information at this point
+ and thus would end up with unreachable blocks and have no way
+ to communicate that we need to run CFG cleanup then. */
+ lp_nr = lookup_stmt_eh_lp (e->call_stmt);
+ if (lp_nr != 0)
+ {
+ remove_stmt_from_eh_lp (e->call_stmt);
+ add_stmt_to_eh_lp (new_stmt, lp_nr);
+ }
+ }
+ else
+ {
+ new_stmt = e->call_stmt;
+ gimple_call_set_fndecl (new_stmt, e->callee->symbol.decl);
+ update_stmt (new_stmt);
+ }
+
+ cgraph_set_call_stmt_including_clones (e->caller, e->call_stmt, new_stmt);
+
+ if (cgraph_dump_file)
+ {
+ fprintf (cgraph_dump_file, " updated to:");
+ print_gimple_stmt (cgraph_dump_file, e->call_stmt, 0, dump_flags);
+ }
+ return new_stmt;
+}
/* Update or remove the corresponding cgraph edge if a GIMPLE_CALL
OLD_STMT changed into NEW_STMT. OLD_CALL is gimple_call_fndecl
@@ -1172,95 +1162,10 @@ cgraph_release_function_body (struct cgraph_node *node)
/* If the node is abstract and needed, then do not clear DECL_INITIAL
of its associated function function declaration because it's
needed to emit debug info later. */
- if (!node->abstract_and_needed)
+ if (!node->abstract_and_needed && DECL_INITIAL (node->symbol.decl))
DECL_INITIAL (node->symbol.decl) = error_mark_node;
}
-/* NODE is being removed from symbol table; see if its entry can be replaced by
- other inline clone. */
-struct cgraph_node *
-cgraph_find_replacement_node (struct cgraph_node *node)
-{
- struct cgraph_node *next_inline_clone, *replacement;
-
- for (next_inline_clone = node->clones;
- next_inline_clone
- && next_inline_clone->symbol.decl != node->symbol.decl;
- next_inline_clone = next_inline_clone->next_sibling_clone)
- ;
-
- /* If there is inline clone of the node being removed, we need
- to put it into the position of removed node and reorganize all
- other clones to be based on it. */
- if (next_inline_clone)
- {
- struct cgraph_node *n;
- struct cgraph_node *new_clones;
-
- replacement = next_inline_clone;
-
- /* Unlink inline clone from the list of clones of removed node. */
- if (next_inline_clone->next_sibling_clone)
- next_inline_clone->next_sibling_clone->prev_sibling_clone
- = next_inline_clone->prev_sibling_clone;
- if (next_inline_clone->prev_sibling_clone)
- {
- gcc_assert (node->clones != next_inline_clone);
- next_inline_clone->prev_sibling_clone->next_sibling_clone
- = next_inline_clone->next_sibling_clone;
- }
- else
- {
- gcc_assert (node->clones == next_inline_clone);
- node->clones = next_inline_clone->next_sibling_clone;
- }
-
- new_clones = node->clones;
- node->clones = NULL;
-
- /* Copy clone info. */
- next_inline_clone->clone = node->clone;
-
- /* Now place it into clone tree at same level at NODE. */
- next_inline_clone->clone_of = node->clone_of;
- next_inline_clone->prev_sibling_clone = NULL;
- next_inline_clone->next_sibling_clone = NULL;
- if (node->clone_of)
- {
- if (node->clone_of->clones)
- node->clone_of->clones->prev_sibling_clone = next_inline_clone;
- next_inline_clone->next_sibling_clone = node->clone_of->clones;
- node->clone_of->clones = next_inline_clone;
- }
-
- /* Merge the clone list. */
- if (new_clones)
- {
- if (!next_inline_clone->clones)
- next_inline_clone->clones = new_clones;
- else
- {
- n = next_inline_clone->clones;
- while (n->next_sibling_clone)
- n = n->next_sibling_clone;
- n->next_sibling_clone = new_clones;
- new_clones->prev_sibling_clone = n;
- }
- }
-
- /* Update clone_of pointers. */
- n = new_clones;
- while (n)
- {
- n->clone_of = next_inline_clone;
- n = n->next_sibling_clone;
- }
- return replacement;
- }
- else
- return NULL;
-}
-
/* Remove the node from cgraph. */
void
@@ -1312,7 +1217,7 @@ cgraph_remove_node (struct cgraph_node *node)
}
else
{
- /* We are removing node with clones. this makes clones inconsistent,
+ /* We are removing node with clones. This makes clones inconsistent,
but assume they will be removed subsequently and just keep clone
tree intact. This can happen in unreachable function removal since
we remove unreachable functions in random order, not by bottom-up
@@ -1357,29 +1262,6 @@ cgraph_remove_node (struct cgraph_node *node)
free_nodes = node;
}
-/* Remove the node from cgraph and all inline clones inlined into it.
- Skip however removal of FORBIDDEN_NODE and return true if it needs to be
- removed. This allows to call the function from outer loop walking clone
- tree. */
-
-bool
-cgraph_remove_node_and_inline_clones (struct cgraph_node *node, struct cgraph_node *forbidden_node)
-{
- struct cgraph_edge *e, *next;
- bool found = false;
-
- if (node == forbidden_node)
- return true;
- for (e = node->callees; e; e = next)
- {
- next = e->next_callee;
- if (!e->inline_failed)
- found |= cgraph_remove_node_and_inline_clones (e->callee, forbidden_node);
- }
- cgraph_remove_node (node);
- return found;
-}
-
/* Likewise indicate that a node is having address taken. */
void
@@ -1624,291 +1506,6 @@ cgraph_function_possibly_inlined_p (tree decl)
return DECL_POSSIBLY_INLINED (decl);
}
-/* Create clone of E in the node N represented by CALL_EXPR the callgraph. */
-struct cgraph_edge *
-cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n,
- gimple call_stmt, unsigned stmt_uid, gcov_type count_scale,
- int freq_scale, bool update_original)
-{
- struct cgraph_edge *new_edge;
- gcov_type count = e->count * count_scale / REG_BR_PROB_BASE;
- gcov_type freq;
-
- /* We do not want to ignore loop nest after frequency drops to 0. */
- if (!freq_scale)
- freq_scale = 1;
- freq = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
- if (freq > CGRAPH_FREQ_MAX)
- freq = CGRAPH_FREQ_MAX;
-
- if (e->indirect_unknown_callee)
- {
- tree decl;
-
- if (call_stmt && (decl = gimple_call_fndecl (call_stmt)))
- {
- struct cgraph_node *callee = cgraph_get_node (decl);
- gcc_checking_assert (callee);
- new_edge = cgraph_create_edge (n, callee, call_stmt, count, freq);
- }
- else
- {
- new_edge = cgraph_create_indirect_edge (n, call_stmt,
- e->indirect_info->ecf_flags,
- count, freq);
- *new_edge->indirect_info = *e->indirect_info;
- }
- }
- else
- {
- new_edge = cgraph_create_edge (n, e->callee, call_stmt, count, freq);
- if (e->indirect_info)
- {
- new_edge->indirect_info
- = ggc_alloc_cleared_cgraph_indirect_call_info ();
- *new_edge->indirect_info = *e->indirect_info;
- }
- }
-
- new_edge->inline_failed = e->inline_failed;
- new_edge->indirect_inlining_edge = e->indirect_inlining_edge;
- new_edge->lto_stmt_uid = stmt_uid;
- /* Clone flags that depend on call_stmt availability manually. */
- new_edge->can_throw_external = e->can_throw_external;
- new_edge->call_stmt_cannot_inline_p = e->call_stmt_cannot_inline_p;
- if (update_original)
- {
- e->count -= new_edge->count;
- if (e->count < 0)
- e->count = 0;
- }
- cgraph_call_edge_duplication_hooks (e, new_edge);
- return new_edge;
-}
-
-
-/* Create node representing clone of N executed COUNT times. Decrease
- the execution counts from original node too.
- The new clone will have decl set to DECL that may or may not be the same
- as decl of N.
-
- When UPDATE_ORIGINAL is true, the counts are subtracted from the original
- function's profile to reflect the fact that part of execution is handled
- by node.
- When CALL_DUPLICATOIN_HOOK is true, the ipa passes are acknowledged about
- the new clone. Otherwise the caller is responsible for doing so later. */
-
-struct cgraph_node *
-cgraph_clone_node (struct cgraph_node *n, tree decl, gcov_type count, int freq,
- bool update_original,
- VEC(cgraph_edge_p,heap) *redirect_callers,
- bool call_duplication_hook)
-{
- struct cgraph_node *new_node = cgraph_create_node_1 ();
- struct cgraph_edge *e;
- gcov_type count_scale;
- unsigned i;
-
- new_node->symbol.decl = decl;
- symtab_register_node ((symtab_node)new_node);
- new_node->origin = n->origin;
- if (new_node->origin)
- {
- new_node->next_nested = new_node->origin->nested;
- new_node->origin->nested = new_node;
- }
- new_node->analyzed = n->analyzed;
- new_node->local = n->local;
- new_node->symbol.externally_visible = false;
- new_node->local.local = true;
- new_node->global = n->global;
- new_node->rtl = n->rtl;
- new_node->count = count;
- new_node->frequency = n->frequency;
- new_node->clone = n->clone;
- new_node->clone.tree_map = 0;
- if (n->count)
- {
- if (new_node->count > n->count)
- count_scale = REG_BR_PROB_BASE;
- else
- count_scale = new_node->count * REG_BR_PROB_BASE / n->count;
- }
- else
- count_scale = 0;
- if (update_original)
- {
- n->count -= count;
- if (n->count < 0)
- n->count = 0;
- }
-
- FOR_EACH_VEC_ELT (cgraph_edge_p, redirect_callers, i, e)
- {
- /* Redirect calls to the old version node to point to its new
- version. */
- cgraph_redirect_edge_callee (e, new_node);
- }
-
-
- for (e = n->callees;e; e=e->next_callee)
- cgraph_clone_edge (e, new_node, e->call_stmt, e->lto_stmt_uid,
- count_scale, freq, update_original);
-
- for (e = n->indirect_calls; e; e = e->next_callee)
- cgraph_clone_edge (e, new_node, e->call_stmt, e->lto_stmt_uid,
- count_scale, freq, update_original);
- ipa_clone_references ((symtab_node)new_node, &n->symbol.ref_list);
-
- new_node->next_sibling_clone = n->clones;
- if (n->clones)
- n->clones->prev_sibling_clone = new_node;
- n->clones = new_node;
- new_node->clone_of = n;
-
- if (call_duplication_hook)
- cgraph_call_node_duplication_hooks (n, new_node);
- return new_node;
-}
-
-/* Create a new name for clone of DECL, add SUFFIX. Returns an identifier. */
-
-static GTY(()) unsigned int clone_fn_id_num;
-
-tree
-clone_function_name (tree decl, const char *suffix)
-{
- tree name = DECL_ASSEMBLER_NAME (decl);
- size_t len = IDENTIFIER_LENGTH (name);
- char *tmp_name, *prefix;
-
- prefix = XALLOCAVEC (char, len + strlen (suffix) + 2);
- memcpy (prefix, IDENTIFIER_POINTER (name), len);
- strcpy (prefix + len + 1, suffix);
-#ifndef NO_DOT_IN_LABEL
- prefix[len] = '.';
-#elif !defined NO_DOLLAR_IN_LABEL
- prefix[len] = '$';
-#else
- prefix[len] = '_';
-#endif
- ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, clone_fn_id_num++);
- return get_identifier (tmp_name);
-}
-
-/* Create callgraph node clone with new declaration. The actual body will
- be copied later at compilation stage.
-
- TODO: after merging in ipa-sra use function call notes instead of args_to_skip
- bitmap interface.
- */
-struct cgraph_node *
-cgraph_create_virtual_clone (struct cgraph_node *old_node,
- VEC(cgraph_edge_p,heap) *redirect_callers,
- VEC(ipa_replace_map_p,gc) *tree_map,
- bitmap args_to_skip,
- const char * suffix)
-{
- tree old_decl = old_node->symbol.decl;
- struct cgraph_node *new_node = NULL;
- tree new_decl;
- size_t i;
- struct ipa_replace_map *map;
-
- if (!flag_wpa)
- gcc_checking_assert (tree_versionable_function_p (old_decl));
-
- gcc_assert (old_node->local.can_change_signature || !args_to_skip);
-
- /* Make a new FUNCTION_DECL tree node */
- if (!args_to_skip)
- new_decl = copy_node (old_decl);
- else
- new_decl = build_function_decl_skip_args (old_decl, args_to_skip, false);
- DECL_STRUCT_FUNCTION (new_decl) = NULL;
-
- /* Generate a new name for the new version. */
- DECL_NAME (new_decl) = clone_function_name (old_decl, suffix);
- SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
- SET_DECL_RTL (new_decl, NULL);
-
- new_node = cgraph_clone_node (old_node, new_decl, old_node->count,
- CGRAPH_FREQ_BASE, false,
- redirect_callers, false);
- /* Update the properties.
- Make clone visible only within this translation unit. Make sure
- that is not weak also.
- ??? We cannot use COMDAT linkage because there is no
- ABI support for this. */
- DECL_EXTERNAL (new_node->symbol.decl) = 0;
- if (DECL_ONE_ONLY (old_decl))
- DECL_SECTION_NAME (new_node->symbol.decl) = NULL;
- DECL_COMDAT_GROUP (new_node->symbol.decl) = 0;
- TREE_PUBLIC (new_node->symbol.decl) = 0;
- DECL_COMDAT (new_node->symbol.decl) = 0;
- DECL_WEAK (new_node->symbol.decl) = 0;
- DECL_STATIC_CONSTRUCTOR (new_node->symbol.decl) = 0;
- DECL_STATIC_DESTRUCTOR (new_node->symbol.decl) = 0;
- new_node->clone.tree_map = tree_map;
- new_node->clone.args_to_skip = args_to_skip;
- FOR_EACH_VEC_ELT (ipa_replace_map_p, tree_map, i, map)
- {
- tree var = map->new_tree;
- symtab_node ref_node;
-
- STRIP_NOPS (var);
- if (TREE_CODE (var) != ADDR_EXPR)
- continue;
- var = get_base_var (var);
- if (!var)
- continue;
- if (TREE_CODE (var) != FUNCTION_DECL
- && TREE_CODE (var) != VAR_DECL)
- continue;
-
- /* Record references of the future statement initializing the constant
- argument. */
- ref_node = symtab_get_node (var);
- gcc_checking_assert (ref_node);
- ipa_record_reference ((symtab_node)new_node, (symtab_node)ref_node,
- IPA_REF_ADDR, NULL);
- }
- if (!args_to_skip)
- new_node->clone.combined_args_to_skip = old_node->clone.combined_args_to_skip;
- else if (old_node->clone.combined_args_to_skip)
- {
- int newi = 0, oldi = 0;
- tree arg;
- bitmap new_args_to_skip = BITMAP_GGC_ALLOC ();
- struct cgraph_node *orig_node;
- for (orig_node = old_node; orig_node->clone_of; orig_node = orig_node->clone_of)
- ;
- for (arg = DECL_ARGUMENTS (orig_node->symbol.decl);
- arg; arg = DECL_CHAIN (arg), oldi++)
- {
- if (bitmap_bit_p (old_node->clone.combined_args_to_skip, oldi))
- {
- bitmap_set_bit (new_args_to_skip, oldi);
- continue;
- }
- if (bitmap_bit_p (args_to_skip, newi))
- bitmap_set_bit (new_args_to_skip, oldi);
- newi++;
- }
- new_node->clone.combined_args_to_skip = new_args_to_skip;
- }
- else
- new_node->clone.combined_args_to_skip = args_to_skip;
- new_node->symbol.externally_visible = 0;
- new_node->local.local = 1;
- new_node->lowered = true;
-
- cgraph_call_node_duplication_hooks (old_node, new_node);
-
-
- return new_node;
-}
-
/* NODE is no longer nested function; update cgraph accordingly. */
void
cgraph_unnest_node (struct cgraph_node *node)
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index eefb2f4e2e8..a4c23b35ec5 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -500,8 +500,6 @@ void dump_cgraph_node (FILE *, struct cgraph_node *);
void debug_cgraph_node (struct cgraph_node *);
void cgraph_remove_edge (struct cgraph_edge *);
void cgraph_remove_node (struct cgraph_node *);
-struct cgraph_node *cgraph_find_replacement_node (struct cgraph_node *);
-bool cgraph_remove_node_and_inline_clones (struct cgraph_node *, struct cgraph_node *);
void cgraph_release_function_body (struct cgraph_node *);
void cgraph_node_remove_callees (struct cgraph_node *node);
struct cgraph_edge *cgraph_create_edge (struct cgraph_node *,
@@ -511,6 +509,7 @@ struct cgraph_edge *cgraph_create_indirect_edge (struct cgraph_node *, gimple,
int, gcov_type, int);
struct cgraph_indirect_call_info *cgraph_allocate_init_indirect_info (void);
struct cgraph_node * cgraph_create_node (tree);
+struct cgraph_node * cgraph_create_empty_node (void);
struct cgraph_node * cgraph_get_create_node (tree);
struct cgraph_node * cgraph_same_body_alias (struct cgraph_node *, tree, tree);
struct cgraph_node * cgraph_add_thunk (struct cgraph_node *, tree, tree, bool, HOST_WIDE_INT,
@@ -518,24 +517,15 @@ struct cgraph_node * cgraph_add_thunk (struct cgraph_node *, tree, tree, bool, H
struct cgraph_node *cgraph_node_for_asm (tree);
struct cgraph_edge *cgraph_edge (struct cgraph_node *, gimple);
void cgraph_set_call_stmt (struct cgraph_edge *, gimple);
-void cgraph_set_call_stmt_including_clones (struct cgraph_node *, gimple, gimple);
-void cgraph_create_edge_including_clones (struct cgraph_node *,
- struct cgraph_node *,
- gimple, gimple, gcov_type, int,
- cgraph_inline_failed_t);
void cgraph_update_edges_for_call_stmt (gimple, tree, gimple);
struct cgraph_local_info *cgraph_local_info (tree);
struct cgraph_global_info *cgraph_global_info (tree);
struct cgraph_rtl_info *cgraph_rtl_info (tree);
-struct cgraph_edge * cgraph_clone_edge (struct cgraph_edge *,
- struct cgraph_node *, gimple,
- unsigned, gcov_type, int, bool);
-struct cgraph_node * cgraph_clone_node (struct cgraph_node *, tree, gcov_type,
- int, bool, VEC(cgraph_edge_p,heap) *,
- bool);
struct cgraph_node *cgraph_create_function_alias (tree, tree);
-void cgraph_call_node_duplication_hooks (struct cgraph_node *node1,
- struct cgraph_node *node2);
+void cgraph_call_node_duplication_hooks (struct cgraph_node *,
+ struct cgraph_node *);
+void cgraph_call_edge_duplication_hooks (struct cgraph_edge *,
+ struct cgraph_edge *);
void cgraph_redirect_edge_callee (struct cgraph_edge *, struct cgraph_node *);
void cgraph_make_edge_direct (struct cgraph_edge *, struct cgraph_node *);
@@ -547,16 +537,10 @@ void cgraph_unnest_node (struct cgraph_node *);
enum availability cgraph_function_body_availability (struct cgraph_node *);
void cgraph_add_new_function (tree, bool);
const char* cgraph_inline_failed_string (cgraph_inline_failed_t);
-struct cgraph_node * cgraph_create_virtual_clone (struct cgraph_node *old_node,
- VEC(cgraph_edge_p,heap)*,
- VEC(ipa_replace_map_p,gc)* tree_map,
- bitmap args_to_skip,
- const char *clone_name);
void cgraph_set_nothrow_flag (struct cgraph_node *, bool);
void cgraph_set_const_flag (struct cgraph_node *, bool, bool);
void cgraph_set_pure_flag (struct cgraph_node *, bool, bool);
-tree clone_function_name (tree decl, const char *);
bool cgraph_node_cannot_return (struct cgraph_node *);
bool cgraph_edge_cannot_lead_to_return (struct cgraph_edge *);
bool cgraph_will_be_removed_from_program_if_no_direct_calls
@@ -610,6 +594,32 @@ void cgraph_finalize_function (tree, bool);
void finalize_compilation_unit (void);
void compile (void);
void init_cgraph (void);
+bool cgraph_process_new_functions (void);
+void cgraph_process_same_body_aliases (void);
+void fixup_same_cpp_alias_visibility (symtab_node node, symtab_node target, tree alias);
+
+/* In cgraphclones.c */
+
+struct cgraph_edge * cgraph_clone_edge (struct cgraph_edge *,
+ struct cgraph_node *, gimple,
+ unsigned, gcov_type, int, bool);
+struct cgraph_node * cgraph_clone_node (struct cgraph_node *, tree, gcov_type,
+ int, bool, VEC(cgraph_edge_p,heap) *,
+ bool);
+tree clone_function_name (tree decl, const char *);
+struct cgraph_node * cgraph_create_virtual_clone (struct cgraph_node *old_node,
+ VEC(cgraph_edge_p,heap)*,
+ VEC(ipa_replace_map_p,gc)* tree_map,
+ bitmap args_to_skip,
+ const char *clone_name);
+struct cgraph_node *cgraph_find_replacement_node (struct cgraph_node *);
+bool cgraph_remove_node_and_inline_clones (struct cgraph_node *, struct cgraph_node *);
+void cgraph_set_call_stmt_including_clones (struct cgraph_node *, gimple, gimple);
+void cgraph_create_edge_including_clones (struct cgraph_node *,
+ struct cgraph_node *,
+ gimple, gimple, gcov_type, int,
+ cgraph_inline_failed_t);
+void cgraph_materialize_all_clones (void);
struct cgraph_node * cgraph_copy_node_for_versioning (struct cgraph_node *,
tree, VEC(cgraph_edge_p,heap)*, bitmap);
struct cgraph_node *cgraph_function_versioning (struct cgraph_node *,
@@ -619,10 +629,6 @@ struct cgraph_node *cgraph_function_versioning (struct cgraph_node *,
basic_block, const char *);
void tree_function_versioning (tree, tree, VEC (ipa_replace_map_p,gc)*,
bool, bitmap, bool, bitmap, basic_block);
-bool cgraph_process_new_functions (void);
-void cgraph_process_same_body_aliases (void);
-void fixup_same_cpp_alias_visibility (symtab_node node, symtab_node target, tree alias);
-
/* In cgraphbuild.c */
unsigned int rebuild_cgraph_edges (void);
@@ -631,7 +637,7 @@ int compute_call_stmt_bb_frequency (tree, basic_block bb);
void record_references_in_initializer (tree, bool);
/* In ipa.c */
-bool cgraph_remove_unreachable_nodes (bool, FILE *);
+bool symtab_remove_unreachable_nodes (bool, FILE *);
cgraph_node_set cgraph_node_set_new (void);
cgraph_node_set_iterator cgraph_node_set_find (cgraph_node_set,
struct cgraph_node *);
@@ -1120,6 +1126,7 @@ varpool_can_remove_if_no_refs (struct varpool_node *node)
return (!node->symbol.force_output && !node->symbol.used_from_other_partition
&& (DECL_COMDAT (node->symbol.decl)
|| !node->symbol.externally_visible
+ || DECL_HAS_VALUE_EXPR_P (node->symbol.decl)
|| DECL_EXTERNAL (node->symbol.decl)));
}
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index a847980c663..ea5351342c0 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -348,7 +348,7 @@ build_cgraph_edges (void)
IPA_REF_ADDR, stmt);
}
}
- for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node,
mark_load, mark_store, mark_address);
}
@@ -356,7 +356,8 @@ build_cgraph_edges (void)
/* Look for initializers of constant variables and private statics. */
FOR_EACH_LOCAL_DECL (cfun, ix, decl)
if (TREE_CODE (decl) == VAR_DECL
- && (TREE_STATIC (decl) && !DECL_EXTERNAL (decl)))
+ && (TREE_STATIC (decl) && !DECL_EXTERNAL (decl))
+ && !DECL_HAS_VALUE_EXPR_P (decl))
varpool_finalize_decl (decl);
record_eh_tables (node, cfun);
@@ -440,7 +441,7 @@ rebuild_cgraph_edges (void)
mark_store, mark_address);
}
- for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node,
mark_load, mark_store, mark_address);
}
@@ -474,7 +475,7 @@ cgraph_rebuild_references (void)
mark_store, mark_address);
}
- for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node,
mark_load, mark_store, mark_address);
}
diff --git a/gcc/cgraphclones.c b/gcc/cgraphclones.c
new file mode 100644
index 00000000000..7a6fb642413
--- /dev/null
+++ b/gcc/cgraphclones.c
@@ -0,0 +1,876 @@
+/* Callgraph clones
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011, 2012 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This module provide facilities for clonning functions. I.e. creating
+ new functions based on existing functions with simple modifications,
+ such as replacement of parameters.
+
+ To allow whole program optimization without actual presence of function
+ bodies, an additional infrastructure is provided for so-called virtual
+ clones
+
+ A virtual clone in the callgraph is a function that has no
+ associated body, just a description of how to create its body based
+ on a different function (which itself may be a virtual clone).
+
+ The description of function modifications includes adjustments to
+ the function's signature (which allows, for example, removing or
+ adding function arguments), substitutions to perform on the
+ function body, and, for inlined functions, a pointer to the
+ function that it will be inlined into.
+
+ It is also possible to redirect any edge of the callgraph from a
+ function to its virtual clone. This implies updating of the call
+ site to adjust for the new function signature.
+
+ Most of the transformations performed by inter-procedural
+ optimizations can be represented via virtual clones. For
+ instance, a constant propagation pass can produce a virtual clone
+ of the function which replaces one of its arguments by a
+ constant. The inliner can represent its decisions by producing a
+ clone of a function whose body will be later integrated into
+ a given function.
+
+ Using virtual clones, the program can be easily updated
+ during the Execute stage, solving most of pass interactions
+ problems that would otherwise occur during Transform.
+
+ Virtual clones are later materialized in the LTRANS stage and
+ turned into real functions. Passes executed after the virtual
+ clone were introduced also perform their Transform stage
+ on new functions, so for a pass there is no significant
+ difference between operating on a real function or a virtual
+ clone introduced before its Execute stage.
+
+ Optimization passes then work on virtual clones introduced before
+ their Execute stage as if they were real functions. The
+ only difference is that clones are not visible during the
+ Generate Summary stage. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "output.h"
+#include "rtl.h"
+#include "tree-flow.h"
+#include "tree-inline.h"
+#include "langhooks.h"
+#include "pointer-set.h"
+#include "toplev.h"
+#include "flags.h"
+#include "ggc.h"
+#include "debug.h"
+#include "target.h"
+#include "cgraph.h"
+#include "diagnostic.h"
+#include "timevar.h"
+#include "params.h"
+#include "fibheap.h"
+#include "intl.h"
+#include "function.h"
+#include "ipa-prop.h"
+#include "gimple.h"
+#include "tree-iterator.h"
+#include "tree-pass.h"
+#include "tree-dump.h"
+#include "gimple-pretty-print.h"
+#include "output.h"
+#include "coverage.h"
+#include "plugin.h"
+#include "ipa-inline.h"
+#include "ipa-utils.h"
+#include "lto-streamer.h"
+#include "except.h"
+
+/* Create clone of E in the node N represented by CALL_EXPR the callgraph. */
+struct cgraph_edge *
+cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n,
+ gimple call_stmt, unsigned stmt_uid, gcov_type count_scale,
+ int freq_scale, bool update_original)
+{
+ struct cgraph_edge *new_edge;
+ gcov_type count = e->count * count_scale / REG_BR_PROB_BASE;
+ gcov_type freq;
+
+ /* We do not want to ignore loop nest after frequency drops to 0. */
+ if (!freq_scale)
+ freq_scale = 1;
+ freq = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
+ if (freq > CGRAPH_FREQ_MAX)
+ freq = CGRAPH_FREQ_MAX;
+
+ if (e->indirect_unknown_callee)
+ {
+ tree decl;
+
+ if (call_stmt && (decl = gimple_call_fndecl (call_stmt)))
+ {
+ struct cgraph_node *callee = cgraph_get_node (decl);
+ gcc_checking_assert (callee);
+ new_edge = cgraph_create_edge (n, callee, call_stmt, count, freq);
+ }
+ else
+ {
+ new_edge = cgraph_create_indirect_edge (n, call_stmt,
+ e->indirect_info->ecf_flags,
+ count, freq);
+ *new_edge->indirect_info = *e->indirect_info;
+ }
+ }
+ else
+ {
+ new_edge = cgraph_create_edge (n, e->callee, call_stmt, count, freq);
+ if (e->indirect_info)
+ {
+ new_edge->indirect_info
+ = ggc_alloc_cleared_cgraph_indirect_call_info ();
+ *new_edge->indirect_info = *e->indirect_info;
+ }
+ }
+
+ new_edge->inline_failed = e->inline_failed;
+ new_edge->indirect_inlining_edge = e->indirect_inlining_edge;
+ new_edge->lto_stmt_uid = stmt_uid;
+ /* Clone flags that depend on call_stmt availability manually. */
+ new_edge->can_throw_external = e->can_throw_external;
+ new_edge->call_stmt_cannot_inline_p = e->call_stmt_cannot_inline_p;
+ if (update_original)
+ {
+ e->count -= new_edge->count;
+ if (e->count < 0)
+ e->count = 0;
+ }
+ cgraph_call_edge_duplication_hooks (e, new_edge);
+ return new_edge;
+}
+
+
+/* Create node representing clone of N executed COUNT times. Decrease
+ the execution counts from original node too.
+ The new clone will have decl set to DECL that may or may not be the same
+ as decl of N.
+
+ When UPDATE_ORIGINAL is true, the counts are subtracted from the original
+ function's profile to reflect the fact that part of execution is handled
+ by node.
+ When CALL_DUPLICATOIN_HOOK is true, the ipa passes are acknowledged about
+ the new clone. Otherwise the caller is responsible for doing so later. */
+
+struct cgraph_node *
+cgraph_clone_node (struct cgraph_node *n, tree decl, gcov_type count, int freq,
+ bool update_original,
+ VEC(cgraph_edge_p,heap) *redirect_callers,
+ bool call_duplication_hook)
+{
+ struct cgraph_node *new_node = cgraph_create_empty_node ();
+ struct cgraph_edge *e;
+ gcov_type count_scale;
+ unsigned i;
+
+ new_node->symbol.decl = decl;
+ symtab_register_node ((symtab_node)new_node);
+ new_node->origin = n->origin;
+ if (new_node->origin)
+ {
+ new_node->next_nested = new_node->origin->nested;
+ new_node->origin->nested = new_node;
+ }
+ new_node->analyzed = n->analyzed;
+ new_node->local = n->local;
+ new_node->symbol.externally_visible = false;
+ new_node->local.local = true;
+ new_node->global = n->global;
+ new_node->rtl = n->rtl;
+ new_node->count = count;
+ new_node->frequency = n->frequency;
+ new_node->clone = n->clone;
+ new_node->clone.tree_map = 0;
+ if (n->count)
+ {
+ if (new_node->count > n->count)
+ count_scale = REG_BR_PROB_BASE;
+ else
+ count_scale = new_node->count * REG_BR_PROB_BASE / n->count;
+ }
+ else
+ count_scale = 0;
+ if (update_original)
+ {
+ n->count -= count;
+ if (n->count < 0)
+ n->count = 0;
+ }
+
+ FOR_EACH_VEC_ELT (cgraph_edge_p, redirect_callers, i, e)
+ {
+ /* Redirect calls to the old version node to point to its new
+ version. */
+ cgraph_redirect_edge_callee (e, new_node);
+ }
+
+
+ for (e = n->callees;e; e=e->next_callee)
+ cgraph_clone_edge (e, new_node, e->call_stmt, e->lto_stmt_uid,
+ count_scale, freq, update_original);
+
+ for (e = n->indirect_calls; e; e = e->next_callee)
+ cgraph_clone_edge (e, new_node, e->call_stmt, e->lto_stmt_uid,
+ count_scale, freq, update_original);
+ ipa_clone_references ((symtab_node)new_node, &n->symbol.ref_list);
+
+ new_node->next_sibling_clone = n->clones;
+ if (n->clones)
+ n->clones->prev_sibling_clone = new_node;
+ n->clones = new_node;
+ new_node->clone_of = n;
+
+ if (call_duplication_hook)
+ cgraph_call_node_duplication_hooks (n, new_node);
+ return new_node;
+}
+
+/* Create a new name for clone of DECL, add SUFFIX. Returns an identifier. */
+
+static GTY(()) unsigned int clone_fn_id_num;
+
+tree
+clone_function_name (tree decl, const char *suffix)
+{
+ tree name = DECL_ASSEMBLER_NAME (decl);
+ size_t len = IDENTIFIER_LENGTH (name);
+ char *tmp_name, *prefix;
+
+ prefix = XALLOCAVEC (char, len + strlen (suffix) + 2);
+ memcpy (prefix, IDENTIFIER_POINTER (name), len);
+ strcpy (prefix + len + 1, suffix);
+#ifndef NO_DOT_IN_LABEL
+ prefix[len] = '.';
+#elif !defined NO_DOLLAR_IN_LABEL
+ prefix[len] = '$';
+#else
+ prefix[len] = '_';
+#endif
+ ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, clone_fn_id_num++);
+ return get_identifier (tmp_name);
+}
+
+/* Create callgraph node clone with new declaration. The actual body will
+ be copied later at compilation stage.
+
+ TODO: after merging in ipa-sra use function call notes instead of args_to_skip
+ bitmap interface.
+ */
+struct cgraph_node *
+cgraph_create_virtual_clone (struct cgraph_node *old_node,
+ VEC(cgraph_edge_p,heap) *redirect_callers,
+ VEC(ipa_replace_map_p,gc) *tree_map,
+ bitmap args_to_skip,
+ const char * suffix)
+{
+ tree old_decl = old_node->symbol.decl;
+ struct cgraph_node *new_node = NULL;
+ tree new_decl;
+ size_t i;
+ struct ipa_replace_map *map;
+
+ if (!flag_wpa)
+ gcc_checking_assert (tree_versionable_function_p (old_decl));
+
+ gcc_assert (old_node->local.can_change_signature || !args_to_skip);
+
+ /* Make a new FUNCTION_DECL tree node */
+ if (!args_to_skip)
+ new_decl = copy_node (old_decl);
+ else
+ new_decl = build_function_decl_skip_args (old_decl, args_to_skip, false);
+ DECL_STRUCT_FUNCTION (new_decl) = NULL;
+
+ /* Generate a new name for the new version. */
+ DECL_NAME (new_decl) = clone_function_name (old_decl, suffix);
+ SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
+ SET_DECL_RTL (new_decl, NULL);
+
+ new_node = cgraph_clone_node (old_node, new_decl, old_node->count,
+ CGRAPH_FREQ_BASE, false,
+ redirect_callers, false);
+ /* Update the properties.
+ Make clone visible only within this translation unit. Make sure
+ that is not weak also.
+ ??? We cannot use COMDAT linkage because there is no
+ ABI support for this. */
+ DECL_EXTERNAL (new_node->symbol.decl) = 0;
+ if (DECL_ONE_ONLY (old_decl))
+ DECL_SECTION_NAME (new_node->symbol.decl) = NULL;
+ DECL_COMDAT_GROUP (new_node->symbol.decl) = 0;
+ TREE_PUBLIC (new_node->symbol.decl) = 0;
+ DECL_COMDAT (new_node->symbol.decl) = 0;
+ DECL_WEAK (new_node->symbol.decl) = 0;
+ DECL_STATIC_CONSTRUCTOR (new_node->symbol.decl) = 0;
+ DECL_STATIC_DESTRUCTOR (new_node->symbol.decl) = 0;
+ new_node->clone.tree_map = tree_map;
+ new_node->clone.args_to_skip = args_to_skip;
+ FOR_EACH_VEC_ELT (ipa_replace_map_p, tree_map, i, map)
+ {
+ tree var = map->new_tree;
+ symtab_node ref_node;
+
+ STRIP_NOPS (var);
+ if (TREE_CODE (var) != ADDR_EXPR)
+ continue;
+ var = get_base_var (var);
+ if (!var)
+ continue;
+ if (TREE_CODE (var) != FUNCTION_DECL
+ && TREE_CODE (var) != VAR_DECL)
+ continue;
+
+ /* Record references of the future statement initializing the constant
+ argument. */
+ ref_node = symtab_get_node (var);
+ gcc_checking_assert (ref_node);
+ ipa_record_reference ((symtab_node)new_node, (symtab_node)ref_node,
+ IPA_REF_ADDR, NULL);
+ }
+ if (!args_to_skip)
+ new_node->clone.combined_args_to_skip = old_node->clone.combined_args_to_skip;
+ else if (old_node->clone.combined_args_to_skip)
+ {
+ int newi = 0, oldi = 0;
+ tree arg;
+ bitmap new_args_to_skip = BITMAP_GGC_ALLOC ();
+ struct cgraph_node *orig_node;
+ for (orig_node = old_node; orig_node->clone_of; orig_node = orig_node->clone_of)
+ ;
+ for (arg = DECL_ARGUMENTS (orig_node->symbol.decl);
+ arg; arg = DECL_CHAIN (arg), oldi++)
+ {
+ if (bitmap_bit_p (old_node->clone.combined_args_to_skip, oldi))
+ {
+ bitmap_set_bit (new_args_to_skip, oldi);
+ continue;
+ }
+ if (bitmap_bit_p (args_to_skip, newi))
+ bitmap_set_bit (new_args_to_skip, oldi);
+ newi++;
+ }
+ new_node->clone.combined_args_to_skip = new_args_to_skip;
+ }
+ else
+ new_node->clone.combined_args_to_skip = args_to_skip;
+ new_node->symbol.externally_visible = 0;
+ new_node->local.local = 1;
+ new_node->lowered = true;
+
+ cgraph_call_node_duplication_hooks (old_node, new_node);
+
+
+ return new_node;
+}
+
+/* NODE is being removed from symbol table; see if its entry can be replaced by
+ other inline clone. */
+struct cgraph_node *
+cgraph_find_replacement_node (struct cgraph_node *node)
+{
+ struct cgraph_node *next_inline_clone, *replacement;
+
+ for (next_inline_clone = node->clones;
+ next_inline_clone
+ && next_inline_clone->symbol.decl != node->symbol.decl;
+ next_inline_clone = next_inline_clone->next_sibling_clone)
+ ;
+
+ /* If there is inline clone of the node being removed, we need
+ to put it into the position of removed node and reorganize all
+ other clones to be based on it. */
+ if (next_inline_clone)
+ {
+ struct cgraph_node *n;
+ struct cgraph_node *new_clones;
+
+ replacement = next_inline_clone;
+
+ /* Unlink inline clone from the list of clones of removed node. */
+ if (next_inline_clone->next_sibling_clone)
+ next_inline_clone->next_sibling_clone->prev_sibling_clone
+ = next_inline_clone->prev_sibling_clone;
+ if (next_inline_clone->prev_sibling_clone)
+ {
+ gcc_assert (node->clones != next_inline_clone);
+ next_inline_clone->prev_sibling_clone->next_sibling_clone
+ = next_inline_clone->next_sibling_clone;
+ }
+ else
+ {
+ gcc_assert (node->clones == next_inline_clone);
+ node->clones = next_inline_clone->next_sibling_clone;
+ }
+
+ new_clones = node->clones;
+ node->clones = NULL;
+
+ /* Copy clone info. */
+ next_inline_clone->clone = node->clone;
+
+ /* Now place it into clone tree at same level at NODE. */
+ next_inline_clone->clone_of = node->clone_of;
+ next_inline_clone->prev_sibling_clone = NULL;
+ next_inline_clone->next_sibling_clone = NULL;
+ if (node->clone_of)
+ {
+ if (node->clone_of->clones)
+ node->clone_of->clones->prev_sibling_clone = next_inline_clone;
+ next_inline_clone->next_sibling_clone = node->clone_of->clones;
+ node->clone_of->clones = next_inline_clone;
+ }
+
+ /* Merge the clone list. */
+ if (new_clones)
+ {
+ if (!next_inline_clone->clones)
+ next_inline_clone->clones = new_clones;
+ else
+ {
+ n = next_inline_clone->clones;
+ while (n->next_sibling_clone)
+ n = n->next_sibling_clone;
+ n->next_sibling_clone = new_clones;
+ new_clones->prev_sibling_clone = n;
+ }
+ }
+
+ /* Update clone_of pointers. */
+ n = new_clones;
+ while (n)
+ {
+ n->clone_of = next_inline_clone;
+ n = n->next_sibling_clone;
+ }
+ return replacement;
+ }
+ else
+ return NULL;
+}
+
+/* Like cgraph_set_call_stmt but walk the clone tree and update all
+ clones sharing the same function body. */
+
+void
+cgraph_set_call_stmt_including_clones (struct cgraph_node *orig,
+ gimple old_stmt, gimple new_stmt)
+{
+ struct cgraph_node *node;
+ struct cgraph_edge *edge = cgraph_edge (orig, old_stmt);
+
+ if (edge)
+ cgraph_set_call_stmt (edge, new_stmt);
+
+ node = orig->clones;
+ if (node)
+ while (node != orig)
+ {
+ struct cgraph_edge *edge = cgraph_edge (node, old_stmt);
+ if (edge)
+ cgraph_set_call_stmt (edge, new_stmt);
+ if (node->clones)
+ node = node->clones;
+ else if (node->next_sibling_clone)
+ node = node->next_sibling_clone;
+ else
+ {
+ while (node != orig && !node->next_sibling_clone)
+ node = node->clone_of;
+ if (node != orig)
+ node = node->next_sibling_clone;
+ }
+ }
+}
+
+/* Like cgraph_create_edge walk the clone tree and update all clones sharing
+ same function body. If clones already have edge for OLD_STMT; only
+ update the edge same way as cgraph_set_call_stmt_including_clones does.
+
+ TODO: COUNT and LOOP_DEPTH should be properly distributed based on relative
+ frequencies of the clones. */
+
+void
+cgraph_create_edge_including_clones (struct cgraph_node *orig,
+ struct cgraph_node *callee,
+ gimple old_stmt,
+ gimple stmt, gcov_type count,
+ int freq,
+ cgraph_inline_failed_t reason)
+{
+ struct cgraph_node *node;
+ struct cgraph_edge *edge;
+
+ if (!cgraph_edge (orig, stmt))
+ {
+ edge = cgraph_create_edge (orig, callee, stmt, count, freq);
+ edge->inline_failed = reason;
+ }
+
+ node = orig->clones;
+ if (node)
+ while (node != orig)
+ {
+ struct cgraph_edge *edge = cgraph_edge (node, old_stmt);
+
+ /* It is possible that clones already contain the edge while
+ master didn't. Either we promoted indirect call into direct
+ call in the clone or we are processing clones of unreachable
+ master where edges has been removed. */
+ if (edge)
+ cgraph_set_call_stmt (edge, stmt);
+ else if (!cgraph_edge (node, stmt))
+ {
+ edge = cgraph_create_edge (node, callee, stmt, count,
+ freq);
+ edge->inline_failed = reason;
+ }
+
+ if (node->clones)
+ node = node->clones;
+ else if (node->next_sibling_clone)
+ node = node->next_sibling_clone;
+ else
+ {
+ while (node != orig && !node->next_sibling_clone)
+ node = node->clone_of;
+ if (node != orig)
+ node = node->next_sibling_clone;
+ }
+ }
+}
+
+/* Remove the node from cgraph and all inline clones inlined into it.
+ Skip however removal of FORBIDDEN_NODE and return true if it needs to be
+ removed. This allows to call the function from outer loop walking clone
+ tree. */
+
+bool
+cgraph_remove_node_and_inline_clones (struct cgraph_node *node, struct cgraph_node *forbidden_node)
+{
+ struct cgraph_edge *e, *next;
+ bool found = false;
+
+ if (node == forbidden_node)
+ return true;
+ for (e = node->callees; e; e = next)
+ {
+ next = e->next_callee;
+ if (!e->inline_failed)
+ found |= cgraph_remove_node_and_inline_clones (e->callee, forbidden_node);
+ }
+ cgraph_remove_node (node);
+ return found;
+}
+
+/* The edges representing the callers of the NEW_VERSION node were
+ fixed by cgraph_function_versioning (), now the call_expr in their
+ respective tree code should be updated to call the NEW_VERSION. */
+
+static void
+update_call_expr (struct cgraph_node *new_version)
+{
+ struct cgraph_edge *e;
+
+ gcc_assert (new_version);
+
+ /* Update the call expr on the edges to call the new version. */
+ for (e = new_version->callers; e; e = e->next_caller)
+ {
+ struct function *inner_function = DECL_STRUCT_FUNCTION (e->caller->symbol.decl);
+ gimple_call_set_fndecl (e->call_stmt, new_version->symbol.decl);
+ maybe_clean_eh_stmt_fn (inner_function, e->call_stmt);
+ }
+}
+
+
+/* Create a new cgraph node which is the new version of
+ OLD_VERSION node. REDIRECT_CALLERS holds the callers
+ edges which should be redirected to point to
+ NEW_VERSION. ALL the callees edges of OLD_VERSION
+ are cloned to the new version node. Return the new
+ version node.
+
+ If non-NULL BLOCK_TO_COPY determine what basic blocks
+ was copied to prevent duplications of calls that are dead
+ in the clone. */
+
+struct cgraph_node *
+cgraph_copy_node_for_versioning (struct cgraph_node *old_version,
+ tree new_decl,
+ VEC(cgraph_edge_p,heap) *redirect_callers,
+ bitmap bbs_to_copy)
+ {
+ struct cgraph_node *new_version;
+ struct cgraph_edge *e;
+ unsigned i;
+
+ gcc_assert (old_version);
+
+ new_version = cgraph_create_node (new_decl);
+
+ new_version->analyzed = old_version->analyzed;
+ new_version->local = old_version->local;
+ new_version->symbol.externally_visible = false;
+ new_version->local.local = old_version->analyzed;
+ new_version->global = old_version->global;
+ new_version->rtl = old_version->rtl;
+ new_version->count = old_version->count;
+
+ for (e = old_version->callees; e; e=e->next_callee)
+ if (!bbs_to_copy
+ || bitmap_bit_p (bbs_to_copy, gimple_bb (e->call_stmt)->index))
+ cgraph_clone_edge (e, new_version, e->call_stmt,
+ e->lto_stmt_uid, REG_BR_PROB_BASE,
+ CGRAPH_FREQ_BASE,
+ true);
+ for (e = old_version->indirect_calls; e; e=e->next_callee)
+ if (!bbs_to_copy
+ || bitmap_bit_p (bbs_to_copy, gimple_bb (e->call_stmt)->index))
+ cgraph_clone_edge (e, new_version, e->call_stmt,
+ e->lto_stmt_uid, REG_BR_PROB_BASE,
+ CGRAPH_FREQ_BASE,
+ true);
+ FOR_EACH_VEC_ELT (cgraph_edge_p, redirect_callers, i, e)
+ {
+ /* Redirect calls to the old version node to point to its new
+ version. */
+ cgraph_redirect_edge_callee (e, new_version);
+ }
+
+ cgraph_call_node_duplication_hooks (old_version, new_version);
+
+ return new_version;
+ }
+
+/* Perform function versioning.
+ Function versioning includes copying of the tree and
+ a callgraph update (creating a new cgraph node and updating
+ its callees and callers).
+
+ REDIRECT_CALLERS varray includes the edges to be redirected
+ to the new version.
+
+ TREE_MAP is a mapping of tree nodes we want to replace with
+ new ones (according to results of prior analysis).
+ OLD_VERSION_NODE is the node that is versioned.
+
+ If non-NULL ARGS_TO_SKIP determine function parameters to remove
+ from new version.
+ If SKIP_RETURN is true, the new version will return void.
+ If non-NULL BLOCK_TO_COPY determine what basic blocks to copy.
+ If non_NULL NEW_ENTRY determine new entry BB of the clone.
+
+ Return the new version's cgraph node. */
+
+struct cgraph_node *
+cgraph_function_versioning (struct cgraph_node *old_version_node,
+ VEC(cgraph_edge_p,heap) *redirect_callers,
+ VEC (ipa_replace_map_p,gc)* tree_map,
+ bitmap args_to_skip,
+ bool skip_return,
+ bitmap bbs_to_copy,
+ basic_block new_entry_block,
+ const char *clone_name)
+{
+ tree old_decl = old_version_node->symbol.decl;
+ struct cgraph_node *new_version_node = NULL;
+ tree new_decl;
+
+ if (!tree_versionable_function_p (old_decl))
+ return NULL;
+
+ gcc_assert (old_version_node->local.can_change_signature || !args_to_skip);
+
+ /* Make a new FUNCTION_DECL tree node for the new version. */
+ if (!args_to_skip && !skip_return)
+ new_decl = copy_node (old_decl);
+ else
+ new_decl
+ = build_function_decl_skip_args (old_decl, args_to_skip, skip_return);
+
+ /* Generate a new name for the new version. */
+ DECL_NAME (new_decl) = clone_function_name (old_decl, clone_name);
+ SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
+ SET_DECL_RTL (new_decl, NULL);
+
+ /* When the old decl was a con-/destructor make sure the clone isn't. */
+ DECL_STATIC_CONSTRUCTOR(new_decl) = 0;
+ DECL_STATIC_DESTRUCTOR(new_decl) = 0;
+
+ /* Create the new version's call-graph node.
+ and update the edges of the new node. */
+ new_version_node =
+ cgraph_copy_node_for_versioning (old_version_node, new_decl,
+ redirect_callers, bbs_to_copy);
+
+ /* Copy the OLD_VERSION_NODE function tree to the new version. */
+ tree_function_versioning (old_decl, new_decl, tree_map, false, args_to_skip,
+ skip_return, bbs_to_copy, new_entry_block);
+
+ /* Update the new version's properties.
+ Make The new version visible only within this translation unit. Make sure
+ that is not weak also.
+ ??? We cannot use COMDAT linkage because there is no
+ ABI support for this. */
+ symtab_make_decl_local (new_version_node->symbol.decl);
+ DECL_VIRTUAL_P (new_version_node->symbol.decl) = 0;
+ new_version_node->symbol.externally_visible = 0;
+ new_version_node->local.local = 1;
+ new_version_node->lowered = true;
+
+ /* Update the call_expr on the edges to call the new version node. */
+ update_call_expr (new_version_node);
+
+ cgraph_call_function_insertion_hooks (new_version_node);
+ return new_version_node;
+}
+
+/* Given virtual clone, turn it into actual clone. */
+
+static void
+cgraph_materialize_clone (struct cgraph_node *node)
+{
+ bitmap_obstack_initialize (NULL);
+ node->former_clone_of = node->clone_of->symbol.decl;
+ if (node->clone_of->former_clone_of)
+ node->former_clone_of = node->clone_of->former_clone_of;
+ /* Copy the OLD_VERSION_NODE function tree to the new version. */
+ tree_function_versioning (node->clone_of->symbol.decl, node->symbol.decl,
+ node->clone.tree_map, true,
+ node->clone.args_to_skip, false,
+ NULL, NULL);
+ if (cgraph_dump_file)
+ {
+ dump_function_to_file (node->clone_of->symbol.decl, cgraph_dump_file, dump_flags);
+ dump_function_to_file (node->symbol.decl, cgraph_dump_file, dump_flags);
+ }
+
+ /* Function is no longer clone. */
+ if (node->next_sibling_clone)
+ node->next_sibling_clone->prev_sibling_clone = node->prev_sibling_clone;
+ if (node->prev_sibling_clone)
+ node->prev_sibling_clone->next_sibling_clone = node->next_sibling_clone;
+ else
+ node->clone_of->clones = node->next_sibling_clone;
+ node->next_sibling_clone = NULL;
+ node->prev_sibling_clone = NULL;
+ if (!node->clone_of->analyzed && !node->clone_of->clones)
+ {
+ cgraph_release_function_body (node->clone_of);
+ cgraph_node_remove_callees (node->clone_of);
+ ipa_remove_all_references (&node->clone_of->symbol.ref_list);
+ }
+ node->clone_of = NULL;
+ bitmap_obstack_release (NULL);
+}
+
+/* Once all functions from compilation unit are in memory, produce all clones
+ and update all calls. We might also do this on demand if we don't want to
+ bring all functions to memory prior compilation, but current WHOPR
+ implementation does that and it is is bit easier to keep everything right in
+ this order. */
+
+void
+cgraph_materialize_all_clones (void)
+{
+ struct cgraph_node *node;
+ bool stabilized = false;
+
+ if (cgraph_dump_file)
+ fprintf (cgraph_dump_file, "Materializing clones\n");
+#ifdef ENABLE_CHECKING
+ verify_cgraph ();
+#endif
+
+ /* We can also do topological order, but number of iterations should be
+ bounded by number of IPA passes since single IPA pass is probably not
+ going to create clones of clones it created itself. */
+ while (!stabilized)
+ {
+ stabilized = true;
+ FOR_EACH_FUNCTION (node)
+ {
+ if (node->clone_of && node->symbol.decl != node->clone_of->symbol.decl
+ && !gimple_has_body_p (node->symbol.decl))
+ {
+ if (gimple_has_body_p (node->clone_of->symbol.decl))
+ {
+ if (cgraph_dump_file)
+ {
+ fprintf (cgraph_dump_file, "cloning %s to %s\n",
+ xstrdup (cgraph_node_name (node->clone_of)),
+ xstrdup (cgraph_node_name (node)));
+ if (node->clone.tree_map)
+ {
+ unsigned int i;
+ fprintf (cgraph_dump_file, " replace map: ");
+ for (i = 0; i < VEC_length (ipa_replace_map_p,
+ node->clone.tree_map);
+ i++)
+ {
+ struct ipa_replace_map *replace_info;
+ replace_info = VEC_index (ipa_replace_map_p,
+ node->clone.tree_map,
+ i);
+ print_generic_expr (cgraph_dump_file, replace_info->old_tree, 0);
+ fprintf (cgraph_dump_file, " -> ");
+ print_generic_expr (cgraph_dump_file, replace_info->new_tree, 0);
+ fprintf (cgraph_dump_file, "%s%s;",
+ replace_info->replace_p ? "(replace)":"",
+ replace_info->ref_p ? "(ref)":"");
+ }
+ fprintf (cgraph_dump_file, "\n");
+ }
+ if (node->clone.args_to_skip)
+ {
+ fprintf (cgraph_dump_file, " args_to_skip: ");
+ dump_bitmap (cgraph_dump_file, node->clone.args_to_skip);
+ }
+ if (node->clone.args_to_skip)
+ {
+ fprintf (cgraph_dump_file, " combined_args_to_skip:");
+ dump_bitmap (cgraph_dump_file, node->clone.combined_args_to_skip);
+ }
+ }
+ cgraph_materialize_clone (node);
+ stabilized = false;
+ }
+ }
+ }
+ }
+ FOR_EACH_FUNCTION (node)
+ if (!node->analyzed && node->callees)
+ cgraph_node_remove_callees (node);
+ if (cgraph_dump_file)
+ fprintf (cgraph_dump_file, "Materialization Call site updates done.\n");
+#ifdef ENABLE_CHECKING
+ verify_cgraph ();
+#endif
+ symtab_remove_unreachable_nodes (false, cgraph_dump_file);
+}
+
+#include "gt-cgraphclones.h"
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index ee69afa0e8c..52c69b04f16 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -646,7 +646,7 @@ cgraph_analyze_function (struct cgraph_node *node)
function we lower it, which will require gimplified nested
functions, so we can end up here with an already gimplified
body. */
- if (!gimple_body (decl))
+ if (!gimple_has_body_p (decl))
gimplify_function_tree (decl);
dump_function (TDI_generic, decl);
@@ -834,6 +834,10 @@ varpool_finalize_decl (tree decl)
enqueue_node ((symtab_node)node);
if (cgraph_state >= CGRAPH_STATE_IPA_SSA)
varpool_analyze_node (node);
+ /* Some frontends produce various interface variables after compilation
+ finished. */
+ if (cgraph_state == CGRAPH_STATE_FINISHED)
+ varpool_assemble_decl (node);
}
/* Discover all functions and variables that are trivially needed, analyze
@@ -1832,7 +1836,7 @@ ipa_passes (void)
because TODO is run before the subpasses. It is important to remove
the unreachable functions to save works at IPA level and to get LTO
symbol tables right. */
- cgraph_remove_unreachable_nodes (true, cgraph_dump_file);
+ symtab_remove_unreachable_nodes (true, cgraph_dump_file);
/* If pass_all_early_optimizations was not scheduled, the state of
the cgraph will not be properly updated. Update it now. */
@@ -1917,373 +1921,6 @@ init_cgraph (void)
cgraph_dump_file = dump_begin (TDI_cgraph, NULL);
}
-/* The edges representing the callers of the NEW_VERSION node were
- fixed by cgraph_function_versioning (), now the call_expr in their
- respective tree code should be updated to call the NEW_VERSION. */
-
-static void
-update_call_expr (struct cgraph_node *new_version)
-{
- struct cgraph_edge *e;
-
- gcc_assert (new_version);
-
- /* Update the call expr on the edges to call the new version. */
- for (e = new_version->callers; e; e = e->next_caller)
- {
- struct function *inner_function = DECL_STRUCT_FUNCTION (e->caller->symbol.decl);
- gimple_call_set_fndecl (e->call_stmt, new_version->symbol.decl);
- maybe_clean_eh_stmt_fn (inner_function, e->call_stmt);
- }
-}
-
-
-/* Create a new cgraph node which is the new version of
- OLD_VERSION node. REDIRECT_CALLERS holds the callers
- edges which should be redirected to point to
- NEW_VERSION. ALL the callees edges of OLD_VERSION
- are cloned to the new version node. Return the new
- version node.
-
- If non-NULL BLOCK_TO_COPY determine what basic blocks
- was copied to prevent duplications of calls that are dead
- in the clone. */
-
-struct cgraph_node *
-cgraph_copy_node_for_versioning (struct cgraph_node *old_version,
- tree new_decl,
- VEC(cgraph_edge_p,heap) *redirect_callers,
- bitmap bbs_to_copy)
- {
- struct cgraph_node *new_version;
- struct cgraph_edge *e;
- unsigned i;
-
- gcc_assert (old_version);
-
- new_version = cgraph_create_node (new_decl);
-
- new_version->analyzed = old_version->analyzed;
- new_version->local = old_version->local;
- new_version->symbol.externally_visible = false;
- new_version->local.local = old_version->analyzed;
- new_version->global = old_version->global;
- new_version->rtl = old_version->rtl;
- new_version->count = old_version->count;
-
- for (e = old_version->callees; e; e=e->next_callee)
- if (!bbs_to_copy
- || bitmap_bit_p (bbs_to_copy, gimple_bb (e->call_stmt)->index))
- cgraph_clone_edge (e, new_version, e->call_stmt,
- e->lto_stmt_uid, REG_BR_PROB_BASE,
- CGRAPH_FREQ_BASE,
- true);
- for (e = old_version->indirect_calls; e; e=e->next_callee)
- if (!bbs_to_copy
- || bitmap_bit_p (bbs_to_copy, gimple_bb (e->call_stmt)->index))
- cgraph_clone_edge (e, new_version, e->call_stmt,
- e->lto_stmt_uid, REG_BR_PROB_BASE,
- CGRAPH_FREQ_BASE,
- true);
- FOR_EACH_VEC_ELT (cgraph_edge_p, redirect_callers, i, e)
- {
- /* Redirect calls to the old version node to point to its new
- version. */
- cgraph_redirect_edge_callee (e, new_version);
- }
-
- cgraph_call_node_duplication_hooks (old_version, new_version);
-
- return new_version;
- }
-
- /* Perform function versioning.
- Function versioning includes copying of the tree and
- a callgraph update (creating a new cgraph node and updating
- its callees and callers).
-
- REDIRECT_CALLERS varray includes the edges to be redirected
- to the new version.
-
- TREE_MAP is a mapping of tree nodes we want to replace with
- new ones (according to results of prior analysis).
- OLD_VERSION_NODE is the node that is versioned.
-
- If non-NULL ARGS_TO_SKIP determine function parameters to remove
- from new version.
- If SKIP_RETURN is true, the new version will return void.
- If non-NULL BLOCK_TO_COPY determine what basic blocks to copy.
- If non_NULL NEW_ENTRY determine new entry BB of the clone.
-
- Return the new version's cgraph node. */
-
-struct cgraph_node *
-cgraph_function_versioning (struct cgraph_node *old_version_node,
- VEC(cgraph_edge_p,heap) *redirect_callers,
- VEC (ipa_replace_map_p,gc)* tree_map,
- bitmap args_to_skip,
- bool skip_return,
- bitmap bbs_to_copy,
- basic_block new_entry_block,
- const char *clone_name)
-{
- tree old_decl = old_version_node->symbol.decl;
- struct cgraph_node *new_version_node = NULL;
- tree new_decl;
-
- if (!tree_versionable_function_p (old_decl))
- return NULL;
-
- gcc_assert (old_version_node->local.can_change_signature || !args_to_skip);
-
- /* Make a new FUNCTION_DECL tree node for the new version. */
- if (!args_to_skip && !skip_return)
- new_decl = copy_node (old_decl);
- else
- new_decl
- = build_function_decl_skip_args (old_decl, args_to_skip, skip_return);
-
- /* Generate a new name for the new version. */
- DECL_NAME (new_decl) = clone_function_name (old_decl, clone_name);
- SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
- SET_DECL_RTL (new_decl, NULL);
-
- /* When the old decl was a con-/destructor make sure the clone isn't. */
- DECL_STATIC_CONSTRUCTOR(new_decl) = 0;
- DECL_STATIC_DESTRUCTOR(new_decl) = 0;
-
- /* Create the new version's call-graph node.
- and update the edges of the new node. */
- new_version_node =
- cgraph_copy_node_for_versioning (old_version_node, new_decl,
- redirect_callers, bbs_to_copy);
-
- /* Copy the OLD_VERSION_NODE function tree to the new version. */
- tree_function_versioning (old_decl, new_decl, tree_map, false, args_to_skip,
- skip_return, bbs_to_copy, new_entry_block);
-
- /* Update the new version's properties.
- Make The new version visible only within this translation unit. Make sure
- that is not weak also.
- ??? We cannot use COMDAT linkage because there is no
- ABI support for this. */
- symtab_make_decl_local (new_version_node->symbol.decl);
- DECL_VIRTUAL_P (new_version_node->symbol.decl) = 0;
- new_version_node->symbol.externally_visible = 0;
- new_version_node->local.local = 1;
- new_version_node->lowered = true;
-
- /* Update the call_expr on the edges to call the new version node. */
- update_call_expr (new_version_node);
-
- cgraph_call_function_insertion_hooks (new_version_node);
- return new_version_node;
-}
-
-/* Given virtual clone, turn it into actual clone. */
-static void
-cgraph_materialize_clone (struct cgraph_node *node)
-{
- bitmap_obstack_initialize (NULL);
- node->former_clone_of = node->clone_of->symbol.decl;
- if (node->clone_of->former_clone_of)
- node->former_clone_of = node->clone_of->former_clone_of;
- /* Copy the OLD_VERSION_NODE function tree to the new version. */
- tree_function_versioning (node->clone_of->symbol.decl, node->symbol.decl,
- node->clone.tree_map, true,
- node->clone.args_to_skip, false,
- NULL, NULL);
- if (cgraph_dump_file)
- {
- dump_function_to_file (node->clone_of->symbol.decl, cgraph_dump_file, dump_flags);
- dump_function_to_file (node->symbol.decl, cgraph_dump_file, dump_flags);
- }
-
- /* Function is no longer clone. */
- if (node->next_sibling_clone)
- node->next_sibling_clone->prev_sibling_clone = node->prev_sibling_clone;
- if (node->prev_sibling_clone)
- node->prev_sibling_clone->next_sibling_clone = node->next_sibling_clone;
- else
- node->clone_of->clones = node->next_sibling_clone;
- node->next_sibling_clone = NULL;
- node->prev_sibling_clone = NULL;
- if (!node->clone_of->analyzed && !node->clone_of->clones)
- {
- cgraph_release_function_body (node->clone_of);
- cgraph_node_remove_callees (node->clone_of);
- ipa_remove_all_references (&node->clone_of->symbol.ref_list);
- }
- node->clone_of = NULL;
- bitmap_obstack_release (NULL);
-}
-
-/* If necessary, change the function declaration in the call statement
- associated with E so that it corresponds to the edge callee. */
-
-gimple
-cgraph_redirect_edge_call_stmt_to_callee (struct cgraph_edge *e)
-{
- tree decl = gimple_call_fndecl (e->call_stmt);
- gimple new_stmt;
- gimple_stmt_iterator gsi;
-#ifdef ENABLE_CHECKING
- struct cgraph_node *node;
-#endif
-
- if (e->indirect_unknown_callee
- || decl == e->callee->symbol.decl)
- return e->call_stmt;
-
-#ifdef ENABLE_CHECKING
- if (decl)
- {
- node = cgraph_get_node (decl);
- gcc_assert (!node || !node->clone.combined_args_to_skip);
- }
-#endif
-
- if (cgraph_dump_file)
- {
- fprintf (cgraph_dump_file, "updating call of %s/%i -> %s/%i: ",
- xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
- xstrdup (cgraph_node_name (e->callee)), e->callee->uid);
- print_gimple_stmt (cgraph_dump_file, e->call_stmt, 0, dump_flags);
- if (e->callee->clone.combined_args_to_skip)
- {
- fprintf (cgraph_dump_file, " combined args to skip: ");
- dump_bitmap (cgraph_dump_file,
- e->callee->clone.combined_args_to_skip);
- }
- }
-
- if (e->callee->clone.combined_args_to_skip)
- {
- int lp_nr;
-
- new_stmt
- = gimple_call_copy_skip_args (e->call_stmt,
- e->callee->clone.combined_args_to_skip);
- gimple_call_set_fndecl (new_stmt, e->callee->symbol.decl);
-
- if (gimple_vdef (new_stmt)
- && TREE_CODE (gimple_vdef (new_stmt)) == SSA_NAME)
- SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
-
- gsi = gsi_for_stmt (e->call_stmt);
- gsi_replace (&gsi, new_stmt, false);
- /* We need to defer cleaning EH info on the new statement to
- fixup-cfg. We may not have dominator information at this point
- and thus would end up with unreachable blocks and have no way
- to communicate that we need to run CFG cleanup then. */
- lp_nr = lookup_stmt_eh_lp (e->call_stmt);
- if (lp_nr != 0)
- {
- remove_stmt_from_eh_lp (e->call_stmt);
- add_stmt_to_eh_lp (new_stmt, lp_nr);
- }
- }
- else
- {
- new_stmt = e->call_stmt;
- gimple_call_set_fndecl (new_stmt, e->callee->symbol.decl);
- update_stmt (new_stmt);
- }
-
- cgraph_set_call_stmt_including_clones (e->caller, e->call_stmt, new_stmt);
-
- if (cgraph_dump_file)
- {
- fprintf (cgraph_dump_file, " updated to:");
- print_gimple_stmt (cgraph_dump_file, e->call_stmt, 0, dump_flags);
- }
- return new_stmt;
-}
-
-/* Once all functions from compilation unit are in memory, produce all clones
- and update all calls. We might also do this on demand if we don't want to
- bring all functions to memory prior compilation, but current WHOPR
- implementation does that and it is is bit easier to keep everything right in
- this order. */
-static void
-cgraph_materialize_all_clones (void)
-{
- struct cgraph_node *node;
- bool stabilized = false;
-
- if (cgraph_dump_file)
- fprintf (cgraph_dump_file, "Materializing clones\n");
-#ifdef ENABLE_CHECKING
- verify_cgraph ();
-#endif
-
- /* We can also do topological order, but number of iterations should be
- bounded by number of IPA passes since single IPA pass is probably not
- going to create clones of clones it created itself. */
- while (!stabilized)
- {
- stabilized = true;
- FOR_EACH_FUNCTION (node)
- {
- if (node->clone_of && node->symbol.decl != node->clone_of->symbol.decl
- && !gimple_has_body_p (node->symbol.decl))
- {
- if (gimple_has_body_p (node->clone_of->symbol.decl))
- {
- if (cgraph_dump_file)
- {
- fprintf (cgraph_dump_file, "cloning %s to %s\n",
- xstrdup (cgraph_node_name (node->clone_of)),
- xstrdup (cgraph_node_name (node)));
- if (node->clone.tree_map)
- {
- unsigned int i;
- fprintf (cgraph_dump_file, " replace map: ");
- for (i = 0; i < VEC_length (ipa_replace_map_p,
- node->clone.tree_map);
- i++)
- {
- struct ipa_replace_map *replace_info;
- replace_info = VEC_index (ipa_replace_map_p,
- node->clone.tree_map,
- i);
- print_generic_expr (cgraph_dump_file, replace_info->old_tree, 0);
- fprintf (cgraph_dump_file, " -> ");
- print_generic_expr (cgraph_dump_file, replace_info->new_tree, 0);
- fprintf (cgraph_dump_file, "%s%s;",
- replace_info->replace_p ? "(replace)":"",
- replace_info->ref_p ? "(ref)":"");
- }
- fprintf (cgraph_dump_file, "\n");
- }
- if (node->clone.args_to_skip)
- {
- fprintf (cgraph_dump_file, " args_to_skip: ");
- dump_bitmap (cgraph_dump_file, node->clone.args_to_skip);
- }
- if (node->clone.args_to_skip)
- {
- fprintf (cgraph_dump_file, " combined_args_to_skip:");
- dump_bitmap (cgraph_dump_file, node->clone.combined_args_to_skip);
- }
- }
- cgraph_materialize_clone (node);
- stabilized = false;
- }
- }
- }
- }
- FOR_EACH_FUNCTION (node)
- if (!node->analyzed && node->callees)
- cgraph_node_remove_callees (node);
- if (cgraph_dump_file)
- fprintf (cgraph_dump_file, "Materialization Call site updates done.\n");
-#ifdef ENABLE_CHECKING
- verify_cgraph ();
-#endif
- cgraph_remove_unreachable_nodes (false, cgraph_dump_file);
-}
-
/* Perform simple optimizations based on callgraph. */
@@ -2325,7 +1962,7 @@ compile (void)
/* This pass remove bodies of extern inline functions we never inlined.
Do this later so other IPA passes see what is really going on. */
- cgraph_remove_unreachable_nodes (false, dump_file);
+ symtab_remove_unreachable_nodes (false, dump_file);
cgraph_global_info_ready = true;
if (cgraph_dump_file)
{
@@ -2350,7 +1987,7 @@ compile (void)
cgraph_materialize_all_clones ();
bitmap_obstack_initialize (NULL);
execute_ipa_pass_list (all_late_ipa_passes);
- cgraph_remove_unreachable_nodes (true, dump_file);
+ symtab_remove_unreachable_nodes (true, dump_file);
#ifdef ENABLE_CHECKING
verify_symtab ();
#endif
diff --git a/gcc/collect2.c b/gcc/collect2.c
index a52e95a64e2..deed052af2f 100644
--- a/gcc/collect2.c
+++ b/gcc/collect2.c
@@ -237,6 +237,12 @@ static const char *target_system_root = TARGET_SYSTEM_ROOT;
static const char *target_system_root = "";
#endif
+/* Whether we may unlink the output file, which should be set as soon as we
+ know we have successfully produced it. This is typically useful to prevent
+ blindly attempting to unlink a read-only output that the target linker
+ would leave untouched. */
+bool may_unlink_output_file = false;
+
/* Structure to hold all the directories in which to search for files to
execute. */
@@ -1018,7 +1024,7 @@ int
main (int argc, char **argv)
{
static const char *const ld_suffix = "ld";
- static const char *const plugin_ld_suffix = PLUGIN_LD;
+ static const char *const plugin_ld_suffix = PLUGIN_LD_SUFFIX;
static const char *const real_ld_suffix = "real-ld";
static const char *const collect_ld_suffix = "collect-ld";
static const char *const nm_suffix = "nm";
@@ -2095,15 +2101,22 @@ fork_execute (const char *prog, char **argv)
do_wait (prog, pex);
}
-/* Unlink a file unless we are debugging. */
+/* Unlink FILE unless we are debugging or this is the output_file
+ and we may not unlink it. */
static void
maybe_unlink (const char *file)
{
- if (!debug)
- unlink_if_ordinary (file);
- else
- notice ("[Leaving %s]\n", file);
+ if (debug)
+ {
+ notice ("[Leaving %s]\n", file);
+ return;
+ }
+
+ if (file == output_file && !may_unlink_output_file)
+ return;
+
+ unlink_if_ordinary (file);
}
/* Call maybe_unlink on the NULL-terminated list, FILE_LIST. */
diff --git a/gcc/collect2.h b/gcc/collect2.h
index e18892d3a55..d55198dce23 100644
--- a/gcc/collect2.h
+++ b/gcc/collect2.h
@@ -40,6 +40,7 @@ extern const char *c_file_name;
extern struct obstack temporary_obstack;
extern char *temporary_firstobj;
extern bool vflag, debug;
+extern bool may_unlink_output_file;
extern void notice_translated (const char *, ...) ATTRIBUTE_PRINTF_1;
extern void notice (const char *, ...) ATTRIBUTE_PRINTF_1;
diff --git a/gcc/combine-stack-adj.c b/gcc/combine-stack-adj.c
index 6b6f74b4b25..27aca85de38 100644
--- a/gcc/combine-stack-adj.c
+++ b/gcc/combine-stack-adj.c
@@ -214,7 +214,8 @@ try_apply_stack_adjustment (rtx insn, struct csa_reflist *reflist,
for (ml = reflist; ml ; ml = ml->next)
{
- rtx new_addr = plus_constant (stack_pointer_rtx, ml->sp_offset - delta);
+ rtx new_addr = plus_constant (Pmode, stack_pointer_rtx,
+ ml->sp_offset - delta);
rtx new_val;
if (MEM_P (*ml->ref))
diff --git a/gcc/combine.c b/gcc/combine.c
index 7eaaf476c6e..d23ecfcbccd 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -2536,13 +2536,13 @@ update_cfg_for_uncondjump (rtx insn)
single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
/* Remove barriers from the footer if there are any. */
- for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
if (BARRIER_P (insn))
{
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
- bb->il.rtl->footer = NEXT_INSN (insn);
+ BB_FOOTER (bb) = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
@@ -4611,8 +4611,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src)
if (GET_CODE (XEXP (x, 0)) == CONST
|| GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (x));
+ enum machine_mode address_mode = get_address_mode (x);
SUBST (XEXP (x, 0),
gen_rtx_LO_SUM (address_mode,
@@ -5874,7 +5873,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest,
== GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
- return plus_constant (gen_lowpart (mode, op0), 1);
+ return plus_constant (mode, gen_lowpart (mode, op0), 1);
}
/* If STORE_FLAG_VALUE is -1, we have cases similar to
@@ -5923,7 +5922,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest,
&& nonzero_bits (op0, mode) == 1)
{
op0 = expand_compound_operation (op0);
- return plus_constant (gen_lowpart (mode, op0), -1);
+ return plus_constant (mode, gen_lowpart (mode, op0), -1);
}
/* If STORE_FLAG_VALUE says to just test the sign bit and X has just
@@ -8306,7 +8305,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
&& exact_log2 (- smask) >= 0
&& (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
&& (INTVAL (XEXP (x, 1)) & ~smask) != 0)
- return force_to_mode (plus_constant (XEXP (x, 0),
+ return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
(INTVAL (XEXP (x, 1)) & smask)),
mode, smask, next_select);
}
diff --git a/gcc/common.opt b/gcc/common.opt
index ca1bc685aac..bf78a7411f6 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1560,7 +1560,7 @@ Common Joined RejectNegative UInteger Var(flag_lto_compression_level) Init(-1)
-flto-compression-level=<number> Use zlib compression level <number> for IL
flto-report
-Common Report Var(flag_lto_report) Init(0) Optimization
+Common Report Var(flag_lto_report) Init(0)
Report various link-time optimization statistics
fmath-errno
diff --git a/gcc/common/config/s390/s390-common.c b/gcc/common/config/s390/s390-common.c
index 98b5c283aaa..4d364283ae3 100644
--- a/gcc/common/config/s390/s390-common.c
+++ b/gcc/common/config/s390/s390-common.c
@@ -51,6 +51,12 @@ static const struct default_options s390_option_optimization_table[] =
{
{ OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+ /* Enable -fsched-pressure using -fsched-pressure-algorithm=model
+ by default when optimizing. */
+ { OPT_LEVELS_1_PLUS, OPT_fsched_pressure, NULL, 1 },
+ { OPT_LEVELS_1_PLUS, OPT_fsched_pressure_algorithm_,
+ NULL, SCHED_PRESSURE_MODEL },
+
/* ??? There are apparently still problems with -fcaller-saves. */
{ OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
diff --git a/gcc/config.in b/gcc/config.in
index 8806012ffed..441e0819edd 100644
--- a/gcc/config.in
+++ b/gcc/config.in
@@ -333,6 +333,12 @@
#endif
+/* Define if your assembler supports HLE prefixes. */
+#ifndef USED_FOR_TARGET
+#undef HAVE_AS_IX86_HLE
+#endif
+
+
/* Define if your assembler supports the .quad directive. */
#ifndef USED_FOR_TARGET
#undef HAVE_AS_IX86_QUAD
@@ -1688,7 +1694,7 @@
/* Specify plugin linker */
#ifndef USED_FOR_TARGET
-#undef PLUGIN_LD
+#undef PLUGIN_LD_SUFFIX
#endif
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index c52fc503b56..174abc2f559 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -988,7 +988,7 @@ alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
(!can_create_pseudo_p () ? scratch : NULL_RTX),
1, OPTAB_LIB_WIDEN);
- return plus_constant (x, low);
+ return plus_constant (Pmode, x, low);
}
}
@@ -1388,7 +1388,7 @@ get_unaligned_address (rtx ref)
if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
- return plus_constant (base, offset);
+ return plus_constant (Pmode, base, offset);
}
/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
@@ -3233,21 +3233,22 @@ alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
tmp = change_address (mem, DImode,
gen_rtx_AND (DImode,
- plus_constant (mema, ofs),
+ plus_constant (DImode, mema, ofs),
GEN_INT (-8)));
set_mem_alias_set (tmp, 0);
emit_move_insn (meml, tmp);
tmp = change_address (mem, DImode,
gen_rtx_AND (DImode,
- plus_constant (mema, ofs + size - 1),
+ plus_constant (DImode, mema,
+ ofs + size - 1),
GEN_INT (-8)));
set_mem_alias_set (tmp, 0);
emit_move_insn (memh, tmp);
if (sign && size == 2)
{
- emit_move_insn (addr, plus_constant (mema, ofs+2));
+ emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
emit_insn (gen_extql (extl, meml, addr));
emit_insn (gen_extqh (exth, memh, addr));
@@ -3261,7 +3262,7 @@ alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
}
else
{
- emit_move_insn (addr, plus_constant (mema, ofs));
+ emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
switch ((int) size)
{
@@ -3333,20 +3334,21 @@ alpha_expand_unaligned_store (rtx dst, rtx src,
meml = change_address (dst, DImode,
gen_rtx_AND (DImode,
- plus_constant (dsta, ofs),
+ plus_constant (DImode, dsta, ofs),
GEN_INT (-8)));
set_mem_alias_set (meml, 0);
memh = change_address (dst, DImode,
gen_rtx_AND (DImode,
- plus_constant (dsta, ofs + size - 1),
+ plus_constant (DImode, dsta,
+ ofs + size - 1),
GEN_INT (-8)));
set_mem_alias_set (memh, 0);
emit_move_insn (dsth, memh);
emit_move_insn (dstl, meml);
- addr = copy_addr_to_reg (plus_constant (dsta, ofs));
+ addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
if (src != CONST0_RTX (GET_MODE (src)))
{
@@ -3436,7 +3438,7 @@ alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
{
tmp = change_address (smem, DImode,
gen_rtx_AND (DImode,
- plus_constant (smema, 8*i),
+ plus_constant (DImode, smema, 8*i),
im8));
set_mem_alias_set (tmp, 0);
emit_move_insn (data_regs[i], tmp);
@@ -3444,7 +3446,8 @@ alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
tmp = change_address (smem, DImode,
gen_rtx_AND (DImode,
- plus_constant (smema, 8*words - 1),
+ plus_constant (DImode, smema,
+ 8*words - 1),
im8));
set_mem_alias_set (tmp, 0);
emit_move_insn (data_regs[words], tmp);
@@ -3504,8 +3507,9 @@ alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
st_addr_2 = change_address (dmem, DImode,
gen_rtx_AND (DImode,
- plus_constant (dmema, words*8 - 1),
- im8));
+ plus_constant (DImode, dmema,
+ words*8 - 1),
+ im8));
set_mem_alias_set (st_addr_2, 0);
st_addr_1 = change_address (dmem, DImode,
@@ -3551,7 +3555,8 @@ alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
{
rtx tmp = change_address (dmem, DImode,
gen_rtx_AND (DImode,
- plus_constant (dmema, i*8),
+ plus_constant (DImode,
+ dmema, i*8),
im8));
set_mem_alias_set (tmp, 0);
emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
@@ -3961,7 +3966,8 @@ alpha_expand_block_clear (rtx operands[])
rtx mem
= change_address (orig_dst, DImode,
gen_rtx_AND (DImode,
- plus_constant (orig_dsta, ofs + i*8),
+ plus_constant (DImode, orig_dsta,
+ ofs + i*8),
GEN_INT (-8)));
set_mem_alias_set (mem, 0);
emit_move_insn (mem, const0_rtx);
@@ -4718,8 +4724,8 @@ alpha_init_machine_status (void)
void
alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
{
- rtx handler_slot_address
- = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
+ rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
+ VMS_COND_HANDLER_FP_OFFSET);
rtx handler_slot
= gen_rtx_MEM (DImode, handler_slot_address);
@@ -6055,7 +6061,7 @@ alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
if (cfun->va_list_fpr_size & 1)
{
tmp = gen_rtx_MEM (BLKmode,
- plus_constant (virtual_incoming_args_rtx,
+ plus_constant (Pmode, virtual_incoming_args_rtx,
(cum + 6) * UNITS_PER_WORD));
MEM_NOTRAP_P (tmp) = 1;
set_mem_alias_set (tmp, set);
@@ -6065,7 +6071,7 @@ alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
if (cfun->va_list_fpr_size & 2)
{
tmp = gen_rtx_MEM (BLKmode,
- plus_constant (virtual_incoming_args_rtx,
+ plus_constant (Pmode, virtual_incoming_args_rtx,
cum * UNITS_PER_WORD));
MEM_NOTRAP_P (tmp) = 1;
set_mem_alias_set (tmp, set);
@@ -7514,7 +7520,7 @@ emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
{
rtx addr, mem, insn;
- addr = plus_constant (base_reg, base_ofs);
+ addr = plus_constant (Pmode, base_reg, base_ofs);
mem = gen_frame_mem (DImode, addr);
insn = emit_move_insn (mem, value);
@@ -7524,7 +7530,8 @@ emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
{
if (frame_bias)
{
- addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
+ addr = plus_constant (Pmode, stack_pointer_rtx,
+ frame_bias + base_ofs);
mem = gen_rtx_MEM (DImode, addr);
}
@@ -7679,7 +7686,8 @@ alpha_expand_prologue (void)
if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
{
- rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
+ rtx last = gen_rtx_MEM (DImode,
+ plus_constant (Pmode, ptr, -leftover));
MEM_VOLATILE_P (last) = 1;
emit_move_insn (last, const0_rtx);
}
@@ -7710,7 +7718,7 @@ alpha_expand_prologue (void)
RTX_FRAME_RELATED_P (seq) = 1;
add_reg_note (seq, REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-frame_size)));
}
@@ -7791,7 +7799,7 @@ alpha_expand_prologue (void)
rtx seq
= emit_move_insn (stack_pointer_rtx,
plus_constant
- (hard_frame_pointer_rtx,
+ (Pmode, hard_frame_pointer_rtx,
- (ALPHA_ROUND
(crtl->outgoing_args_size))));
@@ -8087,14 +8095,14 @@ alpha_expand_epilogue (void)
bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 22);
- sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
+ sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
emit_move_insn (sa_reg, sa_reg_exp);
}
/* Restore registers in order, excepting a true frame pointer. */
- mem = gen_frame_mem (DImode, plus_constant (sa_reg, reg_offset));
+ mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
reg = gen_rtx_REG (DImode, REG_RA);
emit_move_insn (reg, mem);
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
@@ -8110,7 +8118,8 @@ alpha_expand_epilogue (void)
else
{
mem = gen_frame_mem (DImode,
- plus_constant (sa_reg, reg_offset));
+ plus_constant (Pmode, sa_reg,
+ reg_offset));
reg = gen_rtx_REG (DImode, i);
emit_move_insn (reg, mem);
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
@@ -8122,7 +8131,8 @@ alpha_expand_epilogue (void)
for (i = 0; i < 31; ++i)
if (fmask & (1UL << i))
{
- mem = gen_frame_mem (DFmode, plus_constant (sa_reg, reg_offset));
+ mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
+ reg_offset));
reg = gen_rtx_REG (DFmode, i+32);
emit_move_insn (reg, mem);
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
@@ -8150,7 +8160,7 @@ alpha_expand_epilogue (void)
{
int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
- sp_adj2 = plus_constant (sp_adj1, frame_size - low);
+ sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
sp_adj1 = sa_reg;
else
@@ -8180,7 +8190,8 @@ alpha_expand_epilogue (void)
if (fp_is_frame_pointer)
{
emit_insn (gen_blockage ());
- mem = gen_frame_mem (DImode, plus_constant (sa_reg, fp_offset));
+ mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
+ fp_offset));
emit_move_insn (hard_frame_pointer_rtx, mem);
cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
hard_frame_pointer_rtx, cfa_restores);
@@ -9509,7 +9520,7 @@ alpha_use_linkage (rtx func, bool lflag, bool rflag)
al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
if (lflag)
- return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
+ return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
else
return al->linkage;
}
diff --git a/gcc/config/alpha/alpha.h b/gcc/config/alpha/alpha.h
index 194e9a257e5..8520ea82f45 100644
--- a/gcc/config/alpha/alpha.h
+++ b/gcc/config/alpha/alpha.h
@@ -767,7 +767,7 @@ extern int alpha_memory_latency;
#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 16 : INVALID_REGNUM)
#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 28)
#define EH_RETURN_HANDLER_RTX \
- gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, \
+ gen_rtx_MEM (Pmode, plus_constant (Pmode, stack_pointer_rtx, \
crtl->outgoing_args_size))
/* Addressing modes, and classification of registers for them. */
diff --git a/gcc/config/alpha/alpha.md b/gcc/config/alpha/alpha.md
index d090634bb4d..21c4d2e0554 100644
--- a/gcc/config/alpha/alpha.md
+++ b/gcc/config/alpha/alpha.md
@@ -1666,7 +1666,7 @@
;; (match_dup 4)))]
;; "
;;{
-;; operands[6] = plus_constant (operands[3],
+;; operands[6] = plus_constant (DImode, operands[3],
;; INTVAL (operands[2]) / BITS_PER_UNIT);
;; operands[7] = GEN_INT (- INTVAL (operands[2]) / BITS_PER_UNIT);
;;}")
@@ -3971,7 +3971,8 @@
else
{
emit_move_insn (gen_rtx_REG (Pmode, 26),
- gen_rtx_MEM (Pmode, plus_constant (operands[0], 8)));
+ gen_rtx_MEM (Pmode, plus_constant (Pmode,
+ operands[0], 8)));
operands[2] = operands[0];
}
@@ -4046,7 +4047,8 @@
else
{
emit_move_insn (gen_rtx_REG (Pmode, 26),
- gen_rtx_MEM (Pmode, plus_constant (operands[1], 8)));
+ gen_rtx_MEM (Pmode, plus_constant (Pmode,
+ operands[1], 8)));
operands[3] = operands[1];
}
})
@@ -4344,6 +4346,15 @@
"call_pal 0x86"
[(set_attr "type" "callpal")])
+(define_expand "clear_cache"
+ [(match_operand:DI 0 "") ; region start
+ (match_operand:DI 1 "")] ; region end
+ ""
+{
+ emit_insn (gen_imb ());
+ DONE;
+})
+
;; BUGCHK is documented common to OSF/1 and VMS PALcode.
(define_insn "trap"
[(trap_if (const_int 1) (const_int 0))]
@@ -5572,7 +5583,7 @@
[(set (match_dup 1) (match_operand:DI 0 "const_int_operand" ""))]
""
{
- operands[1] = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx,
+ operands[1] = gen_rtx_MEM (DImode, plus_constant (Pmode, stack_pointer_rtx,
INTVAL (operands[0])));
MEM_VOLATILE_P (operands[1]) = 1;
@@ -5794,8 +5805,8 @@
{
/* The elements of the buffer are, in order: */
rtx fp = gen_rtx_MEM (Pmode, operands[0]);
- rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0], 8));
- rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0], 16));
+ rtx lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 8));
+ rtx stack = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 16));
rtx pv = gen_rtx_REG (Pmode, 27);
/* This bit is the same as expand_builtin_longjmp. */
diff --git a/gcc/config/alpha/elf.h b/gcc/config/alpha/elf.h
index 70be527f79a..a7d04f276cc 100644
--- a/gcc/config/alpha/elf.h
+++ b/gcc/config/alpha/elf.h
@@ -67,21 +67,6 @@ do { \
ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \
} while (0)
-/* The biggest alignment supported by ELF in bits. 32-bit ELF
- supports section alignment up to (0x80000000 * 8), while
- 64-bit ELF supports (0x8000000000000000 * 8). If this macro
- is not defined, the default is the largest alignment supported
- by 32-bit ELF and representable on a 32-bit host. Use this
- macro to limit the alignment which can be specified using
- the `__attribute__ ((aligned (N)))' construct.
-
- This value is really 2^63. Since gcc figures the alignment in bits,
- we could only potentially get to 2^60 on suitable hosts. Due to other
- considerations in varasm, we must restrict this to what fits in an int. */
-
-#undef MAX_OFILE_ALIGNMENT
-#define MAX_OFILE_ALIGNMENT (((unsigned int) 1 << 28) * 8)
-
#undef BSS_SECTION_ASM_OP
#define BSS_SECTION_ASM_OP "\t.section\t.bss"
#undef SBSS_SECTION_ASM_OP
diff --git a/gcc/config/alpha/vms.h b/gcc/config/alpha/vms.h
index 8caec548ef8..6f90122fef3 100644
--- a/gcc/config/alpha/vms.h
+++ b/gcc/config/alpha/vms.h
@@ -206,7 +206,7 @@ typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info;
#undef EH_RETURN_HANDLER_RTX
#define EH_RETURN_HANDLER_RTX \
- gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 8))
+ gen_rtx_MEM (Pmode, plus_constant (Pmode, stack_pointer_rtx, 8))
#define LINK_EH_SPEC "vms-dwarf2eh.o%s "
#define LINK_GCC_C_SEQUENCE_SPEC "%G"
@@ -257,7 +257,15 @@ typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info;
#undef ASM_FINAL_SPEC
/* The VMS convention is to always provide minimal debug info
- for a traceback unless specifically overridden. */
+ for a traceback unless specifically overridden.
+
+ Because ASM_OUTPUT_ADDR_DIFF_ELT is not defined for alpha-vms,
+ jump tables cannot be output for PIC code, because you can't put
+ an absolute address in a readonly section. Putting the table in
+ a writable section is a security hole. Therefore, we unset the
+ flag_jump_tables flag, forcing switch statements to be expanded
+ using decision trees. There are probably other ways to address
+ this issue, but using a decision tree is clearly safe. */
#undef SUBTARGET_OVERRIDE_OPTIONS
#define SUBTARGET_OVERRIDE_OPTIONS \
@@ -268,6 +276,8 @@ do { \
write_symbols = VMS_DEBUG; \
debug_info_level = DINFO_LEVEL_TERSE; \
} \
+ if (flag_pic) \
+ flag_jump_tables = 0; \
} while (0)
#undef LINK_SPEC
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 53c2aef257e..cb74d707c21 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -250,4 +250,6 @@ extern int vfp3_const_double_for_fract_bits (rtx);
extern void arm_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
extern bool arm_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
+extern bool arm_autoinc_modes_ok_p (enum machine_mode, enum arm_auto_incmodes);
+
#endif /* ! GCC_ARM_PROTOS_H */
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 68350b158da..2c62c518e67 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -2226,7 +2226,7 @@ arm_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
a_tramp = XEXP (m_tramp, 0);
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
LCT_NORMAL, VOIDmode, 2, a_tramp, Pmode,
- plus_constant (a_tramp, TRAMPOLINE_SIZE), Pmode);
+ plus_constant (Pmode, a_tramp, TRAMPOLINE_SIZE), Pmode);
}
/* Thumb trampolines should be entered in thumb mode, so set
@@ -5458,7 +5458,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
}
if (GET_CODE (offset) == CONST_INT)
- return plus_constant (base, INTVAL (offset));
+ return plus_constant (Pmode, base, INTVAL (offset));
}
if (GET_MODE_SIZE (mode) > 4
@@ -5575,7 +5575,7 @@ arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
/* On the ARM the PC register contains 'dot + 8' at the time of the
addition, on the Thumb it is 'dot + 4'. */
- pic_rtx = plus_constant (l1, TARGET_ARM ? 8 : 4);
+ pic_rtx = plus_constant (Pmode, l1, TARGET_ARM ? 8 : 4);
pic_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, pic_rtx),
UNSPEC_GOTSYM_OFF);
pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
@@ -5623,7 +5623,7 @@ arm_pic_static_addr (rtx orig, rtx reg)
/* On the ARM the PC register contains 'dot + 8' at the time of the
addition, on the Thumb it is 'dot + 4'. */
- offset_rtx = plus_constant (l1, TARGET_ARM ? 8 : 4);
+ offset_rtx = plus_constant (Pmode, l1, TARGET_ARM ? 8 : 4);
offset_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, orig, offset_rtx),
UNSPEC_SYMBOL_OFFSET);
offset_rtx = gen_rtx_CONST (Pmode, offset_rtx);
@@ -6513,9 +6513,9 @@ arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
}
base_reg = gen_reg_rtx (SImode);
- val = force_operand (plus_constant (xop0, n), NULL_RTX);
+ val = force_operand (plus_constant (Pmode, xop0, n), NULL_RTX);
emit_move_insn (base_reg, val);
- x = plus_constant (base_reg, low_n);
+ x = plus_constant (Pmode, base_reg, low_n);
}
else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
x = gen_rtx_PLUS (SImode, xop0, xop1);
@@ -6563,7 +6563,7 @@ arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
index -= mask;
}
base_reg = force_reg (SImode, GEN_INT (base));
- x = plus_constant (base_reg, index);
+ x = plus_constant (Pmode, base_reg, index);
}
if (flag_pic)
@@ -6612,9 +6612,9 @@ thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
else
delta = offset & (~31 * GET_MODE_SIZE (mode));
- xop0 = force_operand (plus_constant (xop0, offset - delta),
+ xop0 = force_operand (plus_constant (Pmode, xop0, offset - delta),
NULL_RTX);
- x = plus_constant (xop0, delta);
+ x = plus_constant (Pmode, xop0, delta);
}
else if (offset < 0 && offset > -256)
/* Small negative offsets are best done with a subtract before the
@@ -10781,7 +10781,7 @@ arm_gen_load_multiple_1 (int count, int *regs, rtx *mems, rtx basereg,
emit_move_insn (gen_rtx_REG (SImode, regs[i]), mems[i]);
if (wback_offset != 0)
- emit_move_insn (basereg, plus_constant (basereg, wback_offset));
+ emit_move_insn (basereg, plus_constant (Pmode, basereg, wback_offset));
seq = get_insns ();
end_sequence ();
@@ -10795,7 +10795,7 @@ arm_gen_load_multiple_1 (int count, int *regs, rtx *mems, rtx basereg,
{
XVECEXP (result, 0, 0)
= gen_rtx_SET (VOIDmode, basereg,
- plus_constant (basereg, wback_offset));
+ plus_constant (Pmode, basereg, wback_offset));
i = 1;
count++;
}
@@ -10833,7 +10833,7 @@ arm_gen_store_multiple_1 (int count, int *regs, rtx *mems, rtx basereg,
emit_move_insn (mems[i], gen_rtx_REG (SImode, regs[i]));
if (wback_offset != 0)
- emit_move_insn (basereg, plus_constant (basereg, wback_offset));
+ emit_move_insn (basereg, plus_constant (Pmode, basereg, wback_offset));
seq = get_insns ();
end_sequence ();
@@ -10847,7 +10847,7 @@ arm_gen_store_multiple_1 (int count, int *regs, rtx *mems, rtx basereg,
{
XVECEXP (result, 0, 0)
= gen_rtx_SET (VOIDmode, basereg,
- plus_constant (basereg, wback_offset));
+ plus_constant (Pmode, basereg, wback_offset));
i = 1;
count++;
}
@@ -10889,7 +10889,7 @@ arm_gen_multiple_op (bool is_load, int *regs, int count, rtx basereg,
for (i = 0; i < count; i++)
{
- rtx addr = plus_constant (basereg, i * 4);
+ rtx addr = plus_constant (Pmode, basereg, i * 4);
mems[i] = adjust_automodify_address_nv (basemem, SImode, addr, offset);
offset += 4;
}
@@ -10978,7 +10978,7 @@ gen_ldm_seq (rtx *operands, int nops, bool sort_regs)
for (i = 0; i < nops; i++)
{
- addr = plus_constant (base_reg_rtx, offset + i * 4);
+ addr = plus_constant (Pmode, base_reg_rtx, offset + i * 4);
mems[i] = adjust_automodify_address_nv (operands[nops + mem_order[i]],
SImode, addr, 0);
}
@@ -11028,11 +11028,11 @@ gen_stm_seq (rtx *operands, int nops)
offset = 0;
}
- addr = plus_constant (base_reg_rtx, offset);
+ addr = plus_constant (Pmode, base_reg_rtx, offset);
for (i = 0; i < nops; i++)
{
- addr = plus_constant (base_reg_rtx, offset + i * 4);
+ addr = plus_constant (Pmode, base_reg_rtx, offset + i * 4);
mems[i] = adjust_automodify_address_nv (operands[nops + mem_order[i]],
SImode, addr, 0);
}
@@ -11144,11 +11144,11 @@ gen_const_stm_seq (rtx *operands, int nops)
offset = 0;
}
- addr = plus_constant (base_reg_rtx, offset);
+ addr = plus_constant (Pmode, base_reg_rtx, offset);
for (i = 0; i < nops; i++)
{
- addr = plus_constant (base_reg_rtx, offset + i * 4);
+ addr = plus_constant (Pmode, base_reg_rtx, offset + i * 4);
mems[i] = adjust_automodify_address_nv (operands[nops + mem_order[i]],
SImode, addr, 0);
}
@@ -11220,8 +11220,8 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
{
for (j = 0; j < interleave_factor; j++)
{
- addr = plus_constant (src, srcoffset + j * UNITS_PER_WORD
- - src_autoinc);
+ addr = plus_constant (Pmode, src, (srcoffset + j * UNITS_PER_WORD
+ - src_autoinc));
mem = adjust_automodify_address (srcbase, SImode, addr,
srcoffset + j * UNITS_PER_WORD);
emit_insn (gen_unaligned_loadsi (regs[j], mem));
@@ -11240,8 +11240,8 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
{
for (j = 0; j < interleave_factor; j++)
{
- addr = plus_constant (dst, dstoffset + j * UNITS_PER_WORD
- - dst_autoinc);
+ addr = plus_constant (Pmode, dst, (dstoffset + j * UNITS_PER_WORD
+ - dst_autoinc));
mem = adjust_automodify_address (dstbase, SImode, addr,
dstoffset + j * UNITS_PER_WORD);
emit_insn (gen_unaligned_storesi (mem, regs[j]));
@@ -11269,7 +11269,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
{
for (j = 0; j < words; j++)
{
- addr = plus_constant (src,
+ addr = plus_constant (Pmode, src,
srcoffset + j * UNITS_PER_WORD - src_autoinc);
mem = adjust_automodify_address (srcbase, SImode, addr,
srcoffset + j * UNITS_PER_WORD);
@@ -11288,7 +11288,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
{
for (j = 0; j < words; j++)
{
- addr = plus_constant (dst,
+ addr = plus_constant (Pmode, dst,
dstoffset + j * UNITS_PER_WORD - dst_autoinc);
mem = adjust_automodify_address (dstbase, SImode, addr,
dstoffset + j * UNITS_PER_WORD);
@@ -11307,7 +11307,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
{
halfword_tmp = gen_reg_rtx (SImode);
- addr = plus_constant (src, srcoffset - src_autoinc);
+ addr = plus_constant (Pmode, src, srcoffset - src_autoinc);
mem = adjust_automodify_address (srcbase, HImode, addr, srcoffset);
emit_insn (gen_unaligned_loadhiu (halfword_tmp, mem));
@@ -11315,7 +11315,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
byte, depending on interleave factor. */
if (interleave_factor == 1)
{
- addr = plus_constant (dst, dstoffset - dst_autoinc);
+ addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
mem = adjust_automodify_address (dstbase, HImode, addr, dstoffset);
emit_insn (gen_unaligned_storehi (mem,
gen_lowpart (HImode, halfword_tmp)));
@@ -11335,13 +11335,13 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
{
byte_tmp = gen_reg_rtx (SImode);
- addr = plus_constant (src, srcoffset - src_autoinc);
+ addr = plus_constant (Pmode, src, srcoffset - src_autoinc);
mem = adjust_automodify_address (srcbase, QImode, addr, srcoffset);
emit_move_insn (gen_lowpart (QImode, byte_tmp), mem);
if (interleave_factor == 1)
{
- addr = plus_constant (dst, dstoffset - dst_autoinc);
+ addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
mem = adjust_automodify_address (dstbase, QImode, addr, dstoffset);
emit_move_insn (mem, gen_lowpart (QImode, byte_tmp));
byte_tmp = NULL;
@@ -11356,7 +11356,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
if (halfword_tmp)
{
- addr = plus_constant (dst, dstoffset - dst_autoinc);
+ addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
mem = adjust_automodify_address (dstbase, HImode, addr, dstoffset);
emit_insn (gen_unaligned_storehi (mem,
gen_lowpart (HImode, halfword_tmp)));
@@ -11367,7 +11367,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
if (byte_tmp)
{
- addr = plus_constant (dst, dstoffset - dst_autoinc);
+ addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
mem = adjust_automodify_address (dstbase, QImode, addr, dstoffset);
emit_move_insn (mem, gen_lowpart (QImode, byte_tmp));
dstoffset++;
@@ -11433,8 +11433,8 @@ arm_block_move_unaligned_loop (rtx dest, rtx src, HOST_WIDE_INT length,
interleave_factor);
/* Move on to the next block. */
- emit_move_insn (src_reg, plus_constant (src_reg, bytes_per_iter));
- emit_move_insn (dest_reg, plus_constant (dest_reg, bytes_per_iter));
+ emit_move_insn (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
+ emit_move_insn (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
/* Emit the loop condition. */
test = gen_rtx_NE (VOIDmode, src_reg, final_src);
@@ -11595,7 +11595,8 @@ arm_gen_movmemqi (rtx *operands)
while (last_bytes)
{
mem = adjust_automodify_address (dstbase, QImode,
- plus_constant (dst, last_bytes - 1),
+ plus_constant (Pmode, dst,
+ last_bytes - 1),
dstoffset + last_bytes - 1);
emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
@@ -11964,6 +11965,9 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
}
}
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
+ return GET_MODE (x);
+
return CCmode;
}
@@ -12117,11 +12121,11 @@ arm_reload_in_hi (rtx *operands)
emit_insn (gen_zero_extendqisi2 (scratch,
gen_rtx_MEM (QImode,
- plus_constant (base,
+ plus_constant (Pmode, base,
offset))));
emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
gen_rtx_MEM (QImode,
- plus_constant (base,
+ plus_constant (Pmode, base,
offset + 1))));
if (!BYTES_BIG_ENDIAN)
emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
@@ -12281,23 +12285,27 @@ arm_reload_out_hi (rtx *operands)
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_movqi (gen_rtx_MEM (QImode,
- plus_constant (base, offset + 1)),
+ plus_constant (Pmode, base,
+ offset + 1)),
gen_lowpart (QImode, outval)));
emit_insn (gen_lshrsi3 (scratch,
gen_rtx_SUBREG (SImode, outval, 0),
GEN_INT (8)));
- emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (Pmode, base,
+ offset)),
gen_lowpart (QImode, scratch)));
}
else
{
- emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (Pmode, base,
+ offset)),
gen_lowpart (QImode, outval)));
emit_insn (gen_lshrsi3 (scratch,
gen_rtx_SUBREG (SImode, outval, 0),
GEN_INT (8)));
emit_insn (gen_movqi (gen_rtx_MEM (QImode,
- plus_constant (base, offset + 1)),
+ plus_constant (Pmode, base,
+ offset + 1)),
gen_lowpart (QImode, scratch)));
}
}
@@ -13811,7 +13819,8 @@ arm_reorg (void)
if (GET_CODE (this_fix->insn) != BARRIER)
{
rtx addr
- = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
+ = plus_constant (Pmode,
+ gen_rtx_LABEL_REF (VOIDmode,
minipool_vector_label),
this_fix->minipool->offset);
*this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
@@ -14022,7 +14031,7 @@ vfp_emit_fstmd (int base_reg, int count)
gen_rtx_PRE_MODIFY (Pmode,
stack_pointer_rtx,
plus_constant
- (stack_pointer_rtx,
+ (Pmode, stack_pointer_rtx,
- (count * 8)))
),
gen_rtx_UNSPEC (BLKmode,
@@ -14030,7 +14039,7 @@ vfp_emit_fstmd (int base_reg, int count)
UNSPEC_PUSH_MULT));
tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -(count * 8)));
+ plus_constant (Pmode, stack_pointer_rtx, -(count * 8)));
RTX_FRAME_RELATED_P (tmp) = 1;
XVECEXP (dwarf, 0, 0) = tmp;
@@ -14048,7 +14057,8 @@ vfp_emit_fstmd (int base_reg, int count)
tmp = gen_rtx_SET (VOIDmode,
gen_frame_mem (DFmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
i * 8)),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
@@ -16535,7 +16545,7 @@ emit_multi_reg_push (unsigned long mask)
gen_rtx_PRE_MODIFY (Pmode,
stack_pointer_rtx,
plus_constant
- (stack_pointer_rtx,
+ (Pmode, stack_pointer_rtx,
-4 * num_regs))
),
gen_rtx_UNSPEC (BLKmode,
@@ -16570,7 +16580,7 @@ emit_multi_reg_push (unsigned long mask)
= gen_rtx_SET (VOIDmode,
gen_frame_mem
(SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
4 * j)),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
@@ -16585,7 +16595,7 @@ emit_multi_reg_push (unsigned long mask)
tmp = gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -4 * num_regs));
+ plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs));
RTX_FRAME_RELATED_P (tmp) = 1;
XVECEXP (dwarf, 0, 0) = tmp;
@@ -16628,7 +16638,7 @@ emit_sfm (int base_reg, int count)
gen_rtx_PRE_MODIFY (Pmode,
stack_pointer_rtx,
plus_constant
- (stack_pointer_rtx,
+ (Pmode, stack_pointer_rtx,
-12 * count))
),
gen_rtx_UNSPEC (BLKmode,
@@ -16646,7 +16656,8 @@ emit_sfm (int base_reg, int count)
tmp = gen_rtx_SET (VOIDmode,
gen_frame_mem (XFmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
i * 12)),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
@@ -16655,7 +16666,7 @@ emit_sfm (int base_reg, int count)
tmp = gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -12 * count));
+ plus_constant (Pmode, stack_pointer_rtx, -12 * count));
RTX_FRAME_RELATED_P (tmp) = 1;
XVECEXP (dwarf, 0, 0) = tmp;
@@ -17127,7 +17138,7 @@ thumb_set_frame_pointer (arm_stack_offsets *offsets)
stack_pointer_rtx));
}
dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
- plus_constant (stack_pointer_rtx, amount));
+ plus_constant (Pmode, stack_pointer_rtx, amount));
RTX_FRAME_RELATED_P (dwarf) = 1;
add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
}
@@ -17258,7 +17269,7 @@ arm_expand_prologue (void)
/* Just tell the dwarf backend that we adjusted SP. */
dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-fp_offset));
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
@@ -17286,7 +17297,8 @@ arm_expand_prologue (void)
}
insn = emit_set_insn (ip_rtx,
- plus_constant (stack_pointer_rtx, fp_offset));
+ plus_constant (Pmode, stack_pointer_rtx,
+ fp_offset));
RTX_FRAME_RELATED_P (insn) = 1;
}
@@ -17315,7 +17327,7 @@ arm_expand_prologue (void)
{
rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
- emit_set_insn (lr, plus_constant (lr, -4));
+ emit_set_insn (lr, plus_constant (SImode, lr, -4));
}
if (live_regs_mask)
@@ -17365,7 +17377,7 @@ arm_expand_prologue (void)
insn = gen_rtx_REG (SImode, 3);
else /* if (crtl->args.pretend_args_size == 0) */
{
- insn = plus_constant (hard_frame_pointer_rtx, 4);
+ insn = plus_constant (Pmode, hard_frame_pointer_rtx, 4);
insn = gen_frame_mem (SImode, insn);
}
emit_set_insn (ip_rtx, insn);
@@ -21592,7 +21604,7 @@ thumb1_emit_multi_reg_push (unsigned long mask, unsigned long real_regs)
par[i] = tmp;
}
- tmp = plus_constant (stack_pointer_rtx, -4 * i);
+ tmp = plus_constant (Pmode, stack_pointer_rtx, -4 * i);
tmp = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, tmp);
tmp = gen_frame_mem (BLKmode, tmp);
tmp = gen_rtx_SET (VOIDmode, tmp, par[0]);
@@ -21602,7 +21614,7 @@ thumb1_emit_multi_reg_push (unsigned long mask, unsigned long real_regs)
insn = emit_insn (tmp);
/* Always build the stack adjustment note for unwind info. */
- tmp = plus_constant (stack_pointer_rtx, -4 * i);
+ tmp = plus_constant (Pmode, stack_pointer_rtx, -4 * i);
tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp);
par[0] = tmp;
@@ -21612,7 +21624,7 @@ thumb1_emit_multi_reg_push (unsigned long mask, unsigned long real_regs)
regno = ctz_hwi (real_regs);
reg = gen_rtx_REG (SImode, regno);
- tmp = plus_constant (stack_pointer_rtx, j * 4);
+ tmp = plus_constant (Pmode, stack_pointer_rtx, j * 4);
tmp = gen_frame_mem (SImode, tmp);
tmp = gen_rtx_SET (VOIDmode, tmp, reg);
RTX_FRAME_RELATED_P (tmp) = 1;
@@ -22556,7 +22568,7 @@ thumb1_expand_prologue (void)
x = GEN_INT (offset + 16 + crtl->args.pretend_args_size);
emit_insn (gen_addsi3 (work_reg, stack_pointer_rtx, x));
- x = plus_constant (stack_pointer_rtx, offset + 4);
+ x = plus_constant (Pmode, stack_pointer_rtx, offset + 4);
x = gen_frame_mem (SImode, x);
emit_move_insn (x, work_reg);
@@ -22570,13 +22582,13 @@ thumb1_expand_prologue (void)
x = gen_rtx_REG (SImode, PC_REGNUM);
emit_move_insn (work_reg, x);
- x = plus_constant (stack_pointer_rtx, offset + 12);
+ x = plus_constant (Pmode, stack_pointer_rtx, offset + 12);
x = gen_frame_mem (SImode, x);
emit_move_insn (x, work_reg);
emit_move_insn (work_reg, arm_hfp_rtx);
- x = plus_constant (stack_pointer_rtx, offset);
+ x = plus_constant (Pmode, stack_pointer_rtx, offset);
x = gen_frame_mem (SImode, x);
emit_move_insn (x, work_reg);
}
@@ -22584,14 +22596,14 @@ thumb1_expand_prologue (void)
{
emit_move_insn (work_reg, arm_hfp_rtx);
- x = plus_constant (stack_pointer_rtx, offset);
+ x = plus_constant (Pmode, stack_pointer_rtx, offset);
x = gen_frame_mem (SImode, x);
emit_move_insn (x, work_reg);
x = gen_rtx_REG (SImode, PC_REGNUM);
emit_move_insn (work_reg, x);
- x = plus_constant (stack_pointer_rtx, offset + 12);
+ x = plus_constant (Pmode, stack_pointer_rtx, offset + 12);
x = gen_frame_mem (SImode, x);
emit_move_insn (x, work_reg);
}
@@ -22599,7 +22611,7 @@ thumb1_expand_prologue (void)
x = gen_rtx_REG (SImode, LR_REGNUM);
emit_move_insn (work_reg, x);
- x = plus_constant (stack_pointer_rtx, offset + 8);
+ x = plus_constant (Pmode, stack_pointer_rtx, offset + 8);
x = gen_frame_mem (SImode, x);
emit_move_insn (x, work_reg);
@@ -22733,7 +22745,7 @@ thumb1_expand_prologue (void)
stack_pointer_rtx, reg));
dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-amount));
add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -23084,8 +23096,10 @@ thumb_expand_movmemqi (rtx *operands)
{
rtx reg = gen_reg_rtx (HImode);
emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
- plus_constant (in, offset))));
- emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
+ plus_constant (Pmode, in,
+ offset))));
+ emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (Pmode, out,
+ offset)),
reg));
len -= 2;
offset += 2;
@@ -23095,8 +23109,10 @@ thumb_expand_movmemqi (rtx *operands)
{
rtx reg = gen_reg_rtx (QImode);
emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
- plus_constant (in, offset))));
- emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
+ plus_constant (Pmode, in,
+ offset))));
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (Pmode, out,
+ offset)),
reg));
}
}
@@ -23881,7 +23897,7 @@ arm_set_return_address (rtx source, rtx scratch)
else
{
if (frame_pointer_needed)
- addr = plus_constant(hard_frame_pointer_rtx, -4);
+ addr = plus_constant (Pmode, hard_frame_pointer_rtx, -4);
else
{
/* LR will be the first saved register. */
@@ -23898,7 +23914,7 @@ arm_set_return_address (rtx source, rtx scratch)
else
addr = stack_pointer_rtx;
- addr = plus_constant (addr, delta);
+ addr = plus_constant (Pmode, addr, delta);
}
emit_move_insn (gen_frame_mem (Pmode, addr), source);
}
@@ -23950,7 +23966,7 @@ thumb_set_return_address (rtx source, rtx scratch)
addr = scratch;
}
else
- addr = plus_constant (addr, delta);
+ addr = plus_constant (Pmode, addr, delta);
emit_move_insn (gen_frame_mem (Pmode, addr), source);
}
@@ -25870,5 +25886,51 @@ arm_vectorize_vec_perm_const_ok (enum machine_mode vmode,
return ret;
}
-
+bool
+arm_autoinc_modes_ok_p (enum machine_mode mode, enum arm_auto_incmodes code)
+{
+ /* If we are soft float and we do not have ldrd
+ then all auto increment forms are ok. */
+ if (TARGET_SOFT_FLOAT && (TARGET_LDRD || GET_MODE_SIZE (mode) <= 4))
+ return true;
+
+ switch (code)
+ {
+ /* Post increment and Pre Decrement are supported for all
+ instruction forms except for vector forms. */
+ case ARM_POST_INC:
+ case ARM_PRE_DEC:
+ if (VECTOR_MODE_P (mode))
+ {
+ if (code != ARM_PRE_DEC)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+
+ case ARM_POST_DEC:
+ case ARM_PRE_INC:
+ /* Without LDRD and mode size greater than
+ word size, there is no point in auto-incrementing
+ because ldm and stm will not have these forms. */
+ if (!TARGET_LDRD && GET_MODE_SIZE (mode) > 4)
+ return false;
+
+ /* Vector and floating point modes do not support
+ these auto increment forms. */
+ if (FLOAT_MODE_P (mode) || VECTOR_MODE_P (mode))
+ return false;
+
+ return true;
+
+ default:
+ return false;
+
+ }
+
+ return false;
+}
+
#include "gt-arm.h"
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index c6b4cc09a46..f4204e4857a 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1613,6 +1613,30 @@ typedef struct
#define HAVE_PRE_MODIFY_REG TARGET_32BIT
#define HAVE_POST_MODIFY_REG TARGET_32BIT
+enum arm_auto_incmodes
+ {
+ ARM_POST_INC,
+ ARM_PRE_INC,
+ ARM_POST_DEC,
+ ARM_PRE_DEC
+ };
+
+#define ARM_AUTOINC_VALID_FOR_MODE_P(mode, code) \
+ (TARGET_32BIT && arm_autoinc_modes_ok_p (mode, code))
+#define USE_LOAD_POST_INCREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_POST_INC)
+#define USE_LOAD_PRE_INCREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_PRE_INC)
+#define USE_LOAD_POST_DECREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_POST_DEC)
+#define USE_LOAD_PRE_DECREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_PRE_DEC)
+
+#define USE_STORE_PRE_DECREMENT(mode) USE_LOAD_PRE_DECREMENT(mode)
+#define USE_STORE_PRE_INCREMENT(mode) USE_LOAD_PRE_INCREMENT(mode)
+#define USE_STORE_POST_DECREMENT(mode) USE_LOAD_POST_DECREMENT(mode)
+#define USE_STORE_POST_INCREMENT(mode) USE_LOAD_POST_INCREMENT(mode)
+
/* Macros to check register numbers against specific register classes. */
/* These assume that REGNO is a hard or pseudo reg number.
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 0103c2bae5e..b1ad3bf34eb 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -4929,7 +4929,8 @@
rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
mem1 = change_address (operands[1], QImode, addr);
- mem2 = change_address (operands[1], QImode, plus_constant (addr, 1));
+ mem2 = change_address (operands[1], QImode,
+ plus_constant (Pmode, addr, 1));
operands[0] = gen_lowpart (SImode, operands[0]);
operands[1] = mem1;
operands[2] = gen_reg_rtx (SImode);
@@ -5445,7 +5446,7 @@
return thumb_load_double_from_address (operands);
case 6:
operands[2] = gen_rtx_MEM (SImode,
- plus_constant (XEXP (operands[0], 0), 4));
+ plus_constant (Pmode, XEXP (operands[0], 0), 4));
output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
return \"\";
case 7:
@@ -6223,7 +6224,8 @@
rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
mem1 = change_address (operands[1], QImode, addr);
- mem2 = change_address (operands[1], QImode, plus_constant (addr, 1));
+ mem2 = change_address (operands[1], QImode,
+ plus_constant (Pmode, addr, 1));
operands[0] = gen_lowpart (SImode, operands[0]);
operands[1] = mem1;
operands[2] = gen_reg_rtx (SImode);
@@ -6746,7 +6748,8 @@
return thumb_load_double_from_address (operands);
case 4:
operands[2] = gen_rtx_MEM (SImode,
- plus_constant (XEXP (operands[0], 0), 4));
+ plus_constant (Pmode,
+ XEXP (operands[0], 0), 4));
output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
return \"\";
case 5:
@@ -8756,7 +8759,7 @@
rtx reg = XEXP (XVECEXP (par, 0, i), 0);
if (size != 0)
- emit_move_insn (addr, plus_constant (addr, size));
+ emit_move_insn (addr, plus_constant (Pmode, addr, size));
mem = change_address (mem, GET_MODE (reg), NULL);
if (REGNO (reg) == R0_REGNUM)
@@ -8803,7 +8806,7 @@
rtx reg = SET_DEST (XVECEXP (operands[1], 0, i));
if (size != 0)
- emit_move_insn (addr, plus_constant (addr, size));
+ emit_move_insn (addr, plus_constant (Pmode, addr, size));
mem = change_address (mem, GET_MODE (reg), NULL);
if (REGNO (reg) == R0_REGNUM)
diff --git a/gcc/config/avr/avr-devices.c b/gcc/config/avr/avr-devices.c
index 41688c82553..47cfefd8612 100644
--- a/gcc/config/avr/avr-devices.c
+++ b/gcc/config/avr/avr-devices.c
@@ -55,6 +55,51 @@ avr_arch_types[] =
{ 0, 1, 1, 1, 1, 1, 1, 1, 1, 0x2000, 0, "107", "avrxmega7" }
};
+const struct arch_info_s
+avr_texinfo[] =
+{
+ { ARCH_AVR1,
+ "This ISA is implemented by the minimal AVR core and supported "
+ "for assembler only." },
+ { ARCH_AVR2,
+ "``Classic'' devices with up to 8@tie{}KiB of program memory." },
+ { ARCH_AVR25,
+ "``Classic'' devices with up to 8@tie{}KiB of program memory and with "
+ "the @code{MOVW} instruction." },
+ { ARCH_AVR3,
+ "``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of "
+ " program memory." },
+ { ARCH_AVR31,
+ "``Classic'' devices with 128@tie{}KiB of program memory." },
+ { ARCH_AVR35,
+ "``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of "
+ "program memory and with the @code{MOVW} instruction." },
+ { ARCH_AVR4,
+ "``Enhanced'' devices with up to 8@tie{}KiB of program memory." },
+ { ARCH_AVR5,
+ "``Enhanced'' devices with 16@tie{}KiB up to 64@tie{}KiB of "
+ "program memory." },
+ { ARCH_AVR51,
+ "``Enhanced'' devices with 128@tie{}KiB of program memory." },
+ { ARCH_AVR6,
+ "``Enhanced'' devices with 3-byte PC, i.e.@: with more than 128@tie{}KiB "
+ "of program memory." },
+ { ARCH_AVRXMEGA2,
+ "``XMEGA'' devices with more than 8@tie{}KiB and up to 64@tie{}KiB "
+ "of program memory." },
+ { ARCH_AVRXMEGA4,
+ "``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB "
+ "of program memory." },
+ { ARCH_AVRXMEGA5,
+ "``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB "
+ "of program memory and more than 64@tie{}KiB of RAM." },
+ { ARCH_AVRXMEGA6,
+ "``XMEGA'' devices with more than 128@tie{}KiB of program memory." },
+ { ARCH_AVRXMEGA7,
+ "``XMEGA'' devices with more than 128@tie{}KiB of program memory "
+ "and more than 64@tie{}KiB of RAM." }
+};
+
const struct mcu_type_s avr_mcu_types[] = {
#define AVR_MCU(NAME,ARCH,MACRO,SHORT_SP,ERRATA_SKIP,DATA_SEC,N_FLASH,LIB_NAME)\
{ NAME, ARCH, MACRO, SHORT_SP, ERRATA_SKIP, DATA_SEC, N_FLASH, LIB_NAME },
diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h
index 158a7be5b3e..fa1462ce886 100644
--- a/gcc/config/avr/avr-protos.h
+++ b/gcc/config/avr/avr-protos.h
@@ -26,7 +26,6 @@ extern int function_arg_regno_p (int r);
extern void avr_cpu_cpp_builtins (struct cpp_reader * pfile);
extern enum reg_class avr_regno_reg_class (int r);
extern void asm_globalize_label (FILE *file, const char *name);
-extern void avr_asm_declare_function_name (FILE *, const char *, tree);
extern void order_regs_for_local_alloc (void);
extern int avr_initial_elimination_offset (int from, int to);
extern int avr_simple_epilogue (void);
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index af00aeea07a..5b28096d799 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -138,12 +138,6 @@ static const char* out_movqi_mr_r (rtx, rtx[], int*);
static const char* out_movhi_mr_r (rtx, rtx[], int*);
static const char* out_movsi_mr_r (rtx, rtx[], int*);
-static int avr_naked_function_p (tree);
-static int interrupt_function_p (tree);
-static int signal_function_p (tree);
-static int avr_OS_task_function_p (tree);
-static int avr_OS_main_function_p (tree);
-static int avr_regs_to_save (HARD_REG_SET *);
static int get_sequence_length (rtx insns);
static int sequent_regs_live (void);
static const char *ptrreg_to_str (int);
@@ -491,7 +485,7 @@ avr_naked_function_p (tree func)
by the "interrupt" attribute. */
static int
-interrupt_function_p (tree func)
+avr_interrupt_function_p (tree func)
{
return avr_lookup_function_attribute1 (func, "interrupt");
}
@@ -500,7 +494,7 @@ interrupt_function_p (tree func)
by the "signal" attribute. */
static int
-signal_function_p (tree func)
+avr_signal_function_p (tree func)
{
return avr_lookup_function_attribute1 (func, "signal");
}
@@ -522,6 +516,80 @@ avr_OS_main_function_p (tree func)
}
+/* Implement `TARGET_SET_CURRENT_FUNCTION'. */
+/* Sanity cheching for above function attributes. */
+
+static void
+avr_set_current_function (tree decl)
+{
+ location_t loc;
+ const char *isr;
+
+ if (decl == NULL_TREE
+ || current_function_decl == NULL_TREE
+ || current_function_decl == error_mark_node
+ || cfun->machine->attributes_checked_p)
+ return;
+
+ loc = DECL_SOURCE_LOCATION (decl);
+
+ cfun->machine->is_naked = avr_naked_function_p (decl);
+ cfun->machine->is_signal = avr_signal_function_p (decl);
+ cfun->machine->is_interrupt = avr_interrupt_function_p (decl);
+ cfun->machine->is_OS_task = avr_OS_task_function_p (decl);
+ cfun->machine->is_OS_main = avr_OS_main_function_p (decl);
+
+ isr = cfun->machine->is_interrupt ? "interrupt" : "signal";
+
+ /* Too much attributes make no sense as they request conflicting features. */
+
+ if (cfun->machine->is_OS_task + cfun->machine->is_OS_main
+ + (cfun->machine->is_signal || cfun->machine->is_interrupt) > 1)
+ error_at (loc, "function attributes %qs, %qs and %qs are mutually"
+ " exclusive", "OS_task", "OS_main", isr);
+
+ /* 'naked' will hide effects of 'OS_task' and 'OS_main'. */
+
+ if (cfun->machine->is_naked
+ && (cfun->machine->is_OS_task || cfun->machine->is_OS_main))
+ warning_at (loc, OPT_Wattributes, "function attributes %qs and %qs have"
+ " no effect on %qs function", "OS_task", "OS_main", "naked");
+
+ if (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ {
+ tree args = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ tree ret = TREE_TYPE (TREE_TYPE (decl));
+ const char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
+
+ /* Silently ignore 'signal' if 'interrupt' is present. AVR-LibC startet
+ using this when it switched from SIGNAL and INTERRUPT to ISR. */
+
+ if (cfun->machine->is_interrupt)
+ cfun->machine->is_signal = 0;
+
+ /* Interrupt handlers must be void __vector (void) functions. */
+
+ if (args && TREE_CODE (TREE_VALUE (args)) != VOID_TYPE)
+ error_at (loc, "%qs function cannot have arguments", isr);
+
+ if (TREE_CODE (ret) != VOID_TYPE)
+ error_at (loc, "%qs function cannot return a value", isr);
+
+ /* If the function has the 'signal' or 'interrupt' attribute, ensure
+ that the name of the function is "__vector_NN" so as to catch
+ when the user misspells the vector name. */
+
+ if (!STR_PREFIX_P (name, "__vector"))
+ warning_at (loc, 0, "%qs appears to be a misspelled %s handler",
+ name, isr);
+ }
+
+ /* Avoid the above diagnosis to be printed more than once. */
+
+ cfun->machine->attributes_checked_p = 1;
+}
+
+
/* Implement `ACCUMULATE_OUTGOING_ARGS'. */
int
@@ -570,8 +638,7 @@ static int
avr_regs_to_save (HARD_REG_SET *set)
{
int reg, count;
- int int_or_sig_p = (interrupt_function_p (current_function_decl)
- || signal_function_p (current_function_decl));
+ int int_or_sig_p = cfun->machine->is_interrupt || cfun->machine->is_signal;
if (set)
CLEAR_HARD_REG_SET (*set);
@@ -683,9 +750,9 @@ avr_simple_epilogue (void)
&& get_frame_size () == 0
&& avr_outgoing_args_size() == 0
&& avr_regs_to_save (NULL) == 0
- && ! interrupt_function_p (current_function_decl)
- && ! signal_function_p (current_function_decl)
- && ! avr_naked_function_p (current_function_decl)
+ && ! cfun->machine->is_interrupt
+ && ! cfun->machine->is_signal
+ && ! cfun->machine->is_naked
&& ! TREE_THIS_VOLATILE (current_function_decl));
}
@@ -770,7 +837,7 @@ avr_incoming_return_addr_rtx (void)
{
/* The return address is at the top of the stack. Note that the push
was via post-decrement, which means the actual address is off by one. */
- return gen_frame_mem (HImode, plus_constant (stack_pointer_rtx, 1));
+ return gen_frame_mem (HImode, plus_constant (Pmode, stack_pointer_rtx, 1));
}
/* Helper for expand_prologue. Emit a push of a byte register. */
@@ -866,7 +933,7 @@ avr_prologue_setup_frame (HOST_WIDE_INT size, HARD_REG_SET set)
gen_rtx_SET (VOIDmode, (frame_pointer_needed
? frame_pointer_rtx
: stack_pointer_rtx),
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-(size + live_seq))));
/* Note that live_seq always contains r28+r29, but the other
@@ -880,7 +947,8 @@ avr_prologue_setup_frame (HOST_WIDE_INT size, HARD_REG_SET set)
{
rtx m, r;
- m = gen_rtx_MEM (QImode, plus_constant (stack_pointer_rtx, offset));
+ m = gen_rtx_MEM (QImode, plus_constant (Pmode, stack_pointer_rtx,
+ offset));
r = gen_rtx_REG (QImode, reg);
add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, m, r));
}
@@ -995,13 +1063,15 @@ avr_prologue_setup_frame (HOST_WIDE_INT size, HARD_REG_SET set)
gen_rtx_SET (VOIDmode, fp, stack_pointer_rtx));
}
- insn = emit_move_insn (my_fp, plus_constant (my_fp, -size));
+ insn = emit_move_insn (my_fp, plus_constant (GET_MODE (my_fp),
+ my_fp, -size));
if (frame_pointer_needed)
{
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_CFA_ADJUST_CFA,
gen_rtx_SET (VOIDmode, fp,
- plus_constant (fp, -size_cfa)));
+ plus_constant (Pmode, fp,
+ -size_cfa)));
}
/* Copy to stack pointer. Note that since we've already
@@ -1028,7 +1098,8 @@ avr_prologue_setup_frame (HOST_WIDE_INT size, HARD_REG_SET set)
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_CFA_ADJUST_CFA,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
-size_cfa)));
}
@@ -1047,11 +1118,13 @@ avr_prologue_setup_frame (HOST_WIDE_INT size, HARD_REG_SET set)
start_sequence ();
insn = emit_move_insn (stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -size));
+ plus_constant (Pmode, stack_pointer_rtx,
+ -size));
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_CFA_ADJUST_CFA,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
-size_cfa)));
if (frame_pointer_needed)
{
@@ -1090,12 +1163,6 @@ expand_prologue (void)
size = get_frame_size() + avr_outgoing_args_size();
- /* Init cfun->machine. */
- cfun->machine->is_naked = avr_naked_function_p (current_function_decl);
- cfun->machine->is_interrupt = interrupt_function_p (current_function_decl);
- cfun->machine->is_signal = signal_function_p (current_function_decl);
- cfun->machine->is_OS_task = avr_OS_task_function_p (current_function_decl);
- cfun->machine->is_OS_main = avr_OS_main_function_p (current_function_decl);
cfun->machine->stack_usage = 0;
/* Prologue: naked. */
@@ -1273,7 +1340,7 @@ expand_epilogue (bool sibcall_p)
if (size)
{
emit_move_insn (frame_pointer_rtx,
- plus_constant (frame_pointer_rtx, size));
+ plus_constant (Pmode, frame_pointer_rtx, size));
}
emit_insn (gen_epilogue_restores (gen_int_mode (live_seq, HImode)));
@@ -1319,7 +1386,7 @@ expand_epilogue (bool sibcall_p)
if (!frame_pointer_needed)
emit_move_insn (fp, stack_pointer_rtx);
- emit_move_insn (my_fp, plus_constant (my_fp, size));
+ emit_move_insn (my_fp, plus_constant (GET_MODE (my_fp), my_fp, size));
/* Copy to stack pointer. */
@@ -1344,7 +1411,7 @@ expand_epilogue (bool sibcall_p)
start_sequence ();
emit_move_insn (stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, size));
+ plus_constant (Pmode, stack_pointer_rtx, size));
sp_plus_insns = get_insns ();
end_sequence ();
@@ -2452,17 +2519,17 @@ avr_function_ok_for_sibcall (tree decl_callee, tree exp_callee)
/* Ensure that caller and callee have compatible epilogues */
- if (interrupt_function_p (current_function_decl)
- || signal_function_p (current_function_decl)
+ if (cfun->machine->is_interrupt
+ || cfun->machine->is_signal
+ || cfun->machine->is_naked
|| avr_naked_function_p (decl_callee)
- || avr_naked_function_p (current_function_decl)
/* FIXME: For OS_task and OS_main, we are over-conservative.
This is due to missing documentation of these attributes
and what they actually should do and should not do. */
|| (avr_OS_task_function_p (decl_callee)
- != avr_OS_task_function_p (current_function_decl))
+ != cfun->machine->is_OS_task)
|| (avr_OS_main_function_p (decl_callee)
- != avr_OS_main_function_p (current_function_decl)))
+ != cfun->machine->is_OS_main))
{
return false;
}
@@ -6594,7 +6661,7 @@ avr_const_address_lo16 (rtx x)
const char *name = XSTR (XEXP (XEXP (x, 0), 0), 0);
lo16 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
- lo16 = gen_rtx_CONST (Pmode, plus_constant (lo16, offset));
+ lo16 = gen_rtx_CONST (Pmode, plus_constant (Pmode, lo16, offset));
return lo16;
}
@@ -6650,40 +6717,6 @@ avr_assemble_integer (rtx x, unsigned int size, int aligned_p)
}
-/* Worker function for ASM_DECLARE_FUNCTION_NAME. */
-
-void
-avr_asm_declare_function_name (FILE *file, const char *name, tree decl)
-{
-
- /* If the function has the 'signal' or 'interrupt' attribute, test to
- make sure that the name of the function is "__vector_NN" so as to
- catch when the user misspells the interrupt vector name. */
-
- if (cfun->machine->is_interrupt)
- {
- if (!STR_PREFIX_P (name, "__vector"))
- {
- warning_at (DECL_SOURCE_LOCATION (decl), 0,
- "%qs appears to be a misspelled interrupt handler",
- name);
- }
- }
- else if (cfun->machine->is_signal)
- {
- if (!STR_PREFIX_P (name, "__vector"))
- {
- warning_at (DECL_SOURCE_LOCATION (decl), 0,
- "%qs appears to be a misspelled signal handler",
- name);
- }
- }
-
- ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
- ASM_OUTPUT_LABEL (file, name);
-}
-
-
/* Return value is nonzero if pseudos that have been
assigned to registers of class CLASS would likely be spilled
because registers of CLASS are needed for spill registers. */
@@ -10859,6 +10892,9 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
#undef TARGET_FUNCTION_ARG_ADVANCE
#define TARGET_FUNCTION_ARG_ADVANCE avr_function_arg_advance
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION avr_set_current_function
+
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY avr_return_in_memory
diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
index 591e21dd5b9..dfbd071d192 100644
--- a/gcc/config/avr/avr.h
+++ b/gcc/config/avr/avr.h
@@ -133,6 +133,14 @@ struct mcu_type_s {
const char *const library_name;
};
+struct arch_info_s {
+ /* Architecture ID. */
+ enum avr_arch arch;
+
+ /* textinfo source to describe the archtiecture. */
+ const char *texinfo;
+};
+
/* Preprocessor macros to define depending on MCU type. */
extern const char *avr_extra_arch_macro;
extern const struct base_arch_s *avr_current_arch;
@@ -699,6 +707,10 @@ struct GTY(()) machine_function
/* 'true' if a callee might be tail called */
int sibcall_fails;
+
+ /* 'true' if the above is_foo predicates are sanity-checked to avoid
+ multiple diagnose for the same function. */
+ int attributes_checked_p;
};
/* AVR does not round pushes, but the existance of this macro is
diff --git a/gcc/config/avr/elf.h b/gcc/config/avr/elf.h
index ebda5dd11e0..6d79dc38cb8 100644
--- a/gcc/config/avr/elf.h
+++ b/gcc/config/avr/elf.h
@@ -32,11 +32,6 @@
#undef STRING_LIMIT
#define STRING_LIMIT ((unsigned) 64)
-/* Take care of `signal' and `interrupt' attributes. */
-#undef ASM_DECLARE_FUNCTION_NAME
-#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
- avr_asm_declare_function_name ((FILE), (NAME), (DECL))
-
/* Output alignment 2**1 for jump tables. */
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
diff --git a/gcc/config/avr/gen-avr-mmcu-texi.c b/gcc/config/avr/gen-avr-mmcu-texi.c
new file mode 100644
index 00000000000..0bbd3a30b71
--- /dev/null
+++ b/gcc/config/avr/gen-avr-mmcu-texi.c
@@ -0,0 +1,73 @@
+/* Copyright (C) 2012
+ Free Software Foundation, Inc.
+ Contributed by Georg-Johann Lay (avr@gjlay.de)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "avr-devices.c"
+
+int main (void)
+{
+ enum avr_arch arch = 0;
+ unsigned i, first = 1;
+ const struct mcu_type_s *mcu;
+
+ printf ("@c Copyright (C) 2012 Free Software Foundation, Inc.\n");
+ printf ("@c This is part of the GCC manual.\n");
+ printf ("@c For copying conditions, see the file "
+ "gcc/doc/include/fdl.texi.\n\n");
+
+ printf ("@c This file is generated automatically using\n");
+ printf ("@c gcc/config/avr/gen-avr-mmcu-texi.c from:\n");
+ printf ("@c gcc/config/avr/avr-devices.c\n");
+ printf ("@c gcc/config/avr/avr-mcus.def\n\n");
+
+ printf ("@c Please do not edit manually.\n\n");
+
+ printf ("@table @code\n\n");
+
+ for (mcu = avr_mcu_types; mcu->name; mcu++)
+ {
+ if (mcu->macro == NULL)
+ {
+ arch = mcu->arch;
+
+ for (i = 0; i < sizeof (avr_texinfo) / sizeof (*avr_texinfo); i++)
+ {
+ if (arch == avr_texinfo[i].arch)
+ {
+ if (mcu != avr_mcu_types)
+ printf (".\n\n");
+ printf ("@item %s\n%s\n", mcu->name, avr_texinfo[i].texinfo);
+ printf ("@*@var{mcu}@tie{}=");
+ first = 1;
+ break;
+ }
+ }
+ }
+ else if (arch == (enum avr_arch) mcu->arch)
+ {
+ printf ("%s @code{%s}", first ? "" : ",", mcu->name);
+ first = 0;
+ }
+ }
+
+ printf (".\n\n");
+ printf ("@end table\n");
+
+ return EXIT_SUCCESS;
+}
diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
index 99638333204..24cdd92590a 100644
--- a/gcc/config/avr/t-avr
+++ b/gcc/config/avr/t-avr
@@ -43,6 +43,26 @@ AVR_MCUS = $(srcdir)/config/avr/avr-mcus.def
$(srcdir)/config/avr/avr-tables.opt: $(srcdir)/config/avr/genopt.sh $(AVR_MCUS)
$(SHELL) $< $(AVR_MCUS) > $@
+gen-avr-mmcu-texi$(build_exeext): $(srcdir)/config/avr/gen-avr-mmcu-texi.c \
+ $(TM_H) $(AVR_MCUS) $(srcdir)/config/avr/avr-devices.c
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
+
+avr-devices.o: s-avr-mmcu-texi
+
+s-avr-mmcu-texi: gen-avr-mmcu-texi$(build_exeext)
+ $(RUN_GEN) $< | sed -e 's:\r::g' > avr-mmcu.texi
+ @if cmp -s $(srcdir)/doc/avr-mmcu.texi avr-mmcu.texi; then \
+ $(STAMP) $@; \
+ else \
+ echo >&2 ; \
+ echo "***" >&2 ; \
+ echo "*** Verify that you have permission to grant a" >&2 ; \
+ echo "*** GFDL license for all new text in" >&2 ; \
+ echo "*** avr-mmcu.texi, then copy it to $(srcdir)/doc/avr-mmcu.texi" >&2 ; \
+ echo "***" >&2 ; \
+ false; \
+ fi
+
# MULTILIB_OPTIONS
# MULTILIB_DIRNAMES
# MULTILIB_EXCEPTIONS
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 577f091a473..1342c568fd0 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -580,7 +580,8 @@ setup_incoming_varargs (cumulative_args_t cum,
for (i = get_cumulative_args (cum)->words + 1; i < max_arg_registers; i++)
{
mem = gen_rtx_MEM (Pmode,
- plus_constant (arg_pointer_rtx, (i * UNITS_PER_WORD)));
+ plus_constant (Pmode, arg_pointer_rtx,
+ (i * UNITS_PER_WORD)));
emit_move_insn (mem, gen_rtx_REG (Pmode, i));
}
@@ -1050,7 +1051,8 @@ bfin_load_pic_reg (rtx dest)
return pic_offset_table_rtx;
if (global_options_set.x_bfin_library_id)
- addr = plus_constant (pic_offset_table_rtx, -4 - bfin_library_id * 4);
+ addr = plus_constant (Pmode, pic_offset_table_rtx,
+ -4 - bfin_library_id * 4);
else
addr = gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
@@ -1111,7 +1113,7 @@ bfin_expand_prologue (void)
}
else
{
- rtx limit = plus_constant (lim, offset);
+ rtx limit = plus_constant (Pmode, lim, offset);
emit_move_insn (p2reg, limit);
lim = p2reg;
}
@@ -1883,7 +1885,7 @@ bfin_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
if (TARGET_FDPIC)
{
- rtx a = force_reg (Pmode, plus_constant (XEXP (m_tramp, 0), 8));
+ rtx a = force_reg (Pmode, plus_constant (Pmode, XEXP (m_tramp, 0), 8));
mem = adjust_address (m_tramp, Pmode, 0);
emit_move_insn (mem, a);
i = 8;
@@ -2077,7 +2079,7 @@ bfin_expand_call (rtx retval, rtx fnaddr, rtx callarg1, rtx cookie, int sibcall)
picreg = gen_reg_rtx (SImode);
emit_insn (gen_load_funcdescsi (picreg,
- plus_constant (addr, 4)));
+ plus_constant (Pmode, addr, 4)));
}
nelts++;
@@ -4942,7 +4944,8 @@ bfin_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
output_asm_insn ("%2 = r0; %2 = [%2];", xops);
/* Adjust the this parameter. */
- xops[0] = gen_rtx_MEM (Pmode, plus_constant (p2tmp, vcall_offset));
+ xops[0] = gen_rtx_MEM (Pmode, plus_constant (Pmode, p2tmp,
+ vcall_offset));
if (!memory_operand (xops[0], Pmode))
{
rtx tmp2 = gen_rtx_REG (Pmode, REG_P1);
diff --git a/gcc/config/bfin/bfin.h b/gcc/config/bfin/bfin.h
index cc3b14f504b..03759bffdea 100644
--- a/gcc/config/bfin/bfin.h
+++ b/gcc/config/bfin/bfin.h
@@ -784,7 +784,8 @@ typedef struct {
#define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) : INVALID_REGNUM)
#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, REG_P2)
#define EH_RETURN_HANDLER_RTX \
- gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx, UNITS_PER_WORD))
+ gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx, \
+ UNITS_PER_WORD))
/* Addressing Modes */
diff --git a/gcc/config/c6x/c6x.c b/gcc/config/c6x/c6x.c
index 4cb4ffb4c4c..8a368892bb2 100644
--- a/gcc/config/c6x/c6x.c
+++ b/gcc/config/c6x/c6x.c
@@ -735,7 +735,8 @@ c6x_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt)
tramp = XEXP (tramp, 0);
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__gnu_clear_cache"),
LCT_NORMAL, VOIDmode, 2, tramp, Pmode,
- plus_constant (tramp, TRAMPOLINE_SIZE), Pmode);
+ plus_constant (Pmode, tramp, TRAMPOLINE_SIZE),
+ Pmode);
#endif
}
@@ -822,7 +823,8 @@ c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
output_asm_insn ("ldw .d1t1 %3, %2", xops);
/* Adjust the this parameter. */
- xops[0] = gen_rtx_MEM (Pmode, plus_constant (a0tmp, vcall_offset));
+ xops[0] = gen_rtx_MEM (Pmode, plus_constant (Pmode, a0tmp,
+ vcall_offset));
if (!memory_operand (xops[0], Pmode))
{
rtx tmp2 = gen_rtx_REG (Pmode, REG_A1);
diff --git a/gcc/config/cr16/cr16.c b/gcc/config/cr16/cr16.c
index 65968f8cda7..852c808f571 100644
--- a/gcc/config/cr16/cr16.c
+++ b/gcc/config/cr16/cr16.c
@@ -1851,7 +1851,7 @@ cr16_create_dwarf_for_multi_push (rtx insn)
tmp = gen_rtx_SET (VOIDmode,
gen_frame_mem (mode,
plus_constant
- (stack_pointer_rtx,
+ (Pmode, stack_pointer_rtx,
total_push_bytes - offset)),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
diff --git a/gcc/config/cr16/cr16.h b/gcc/config/cr16/cr16.h
index 01577ca5448..54794e1fd27 100644
--- a/gcc/config/cr16/cr16.h
+++ b/gcc/config/cr16/cr16.h
@@ -238,7 +238,7 @@ while (0)
#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 4)
#define EH_RETURN_HANDLER_RTX \
- gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, -4))
+ gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, -4))
#define INCOMING_RETURN_ADDR_RTX gen_rtx_RA
diff --git a/gcc/config/cris/cris.c b/gcc/config/cris/cris.c
index de9e26984ea..349f6019e3e 100644
--- a/gcc/config/cris/cris.c
+++ b/gcc/config/cris/cris.c
@@ -1181,7 +1181,7 @@ cris_return_addr_rtx (int count, rtx frameaddr ATTRIBUTE_UNUSED)
present). Apparently we can't eliminate from the frame-pointer in
that direction, so use the incoming args (maybe pretended) pointer. */
return count == 0
- ? gen_rtx_MEM (Pmode, plus_constant (virtual_incoming_args_rtx, -4))
+ ? gen_rtx_MEM (Pmode, plus_constant (Pmode, virtual_incoming_args_rtx, -4))
: NULL_RTX;
}
@@ -1695,6 +1695,7 @@ cris_normal_notice_update_cc (rtx exp, rtx insn)
&& (REGNO (SET_SRC (exp))
> CRIS_LAST_GENERAL_REGISTER))
|| (TARGET_V32
+ && REG_P (SET_DEST (exp))
&& satisfies_constraint_I (SET_SRC (exp))))
{
/* There's no CC0 change for this case. Just check
@@ -2815,14 +2816,14 @@ cris_split_movdx (rtx *operands)
operand_subword (dest, reverse, TRUE, mode),
change_address
(src, SImode,
- plus_constant (addr,
+ plus_constant (Pmode, addr,
reverse * UNITS_PER_WORD))));
emit_insn (gen_rtx_SET
(VOIDmode,
operand_subword (dest, ! reverse, TRUE, mode),
change_address
(src, SImode,
- plus_constant (addr,
+ plus_constant (Pmode, addr,
(! reverse) *
UNITS_PER_WORD))));
}
@@ -2882,7 +2883,7 @@ cris_split_movdx (rtx *operands)
emit_insn (gen_rtx_SET
(VOIDmode,
change_address (dest, SImode,
- plus_constant (addr,
+ plus_constant (Pmode, addr,
UNITS_PER_WORD)),
operand_subword (src, 1, TRUE, mode)));
}
@@ -2954,7 +2955,8 @@ cris_expand_prologue (void)
{
insn = emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
-4)));
/* FIXME: When dwarf2 frame output and unless asynchronous
exceptions, make dwarf2 bundle together all stack
@@ -2982,7 +2984,7 @@ cris_expand_prologue (void)
{
insn = emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-4 - pretend)));
pretend = 0;
RTX_FRAME_RELATED_P (insn) = 1;
@@ -2999,7 +3001,7 @@ cris_expand_prologue (void)
{
insn = emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-4 - pretend)));
pretend = 0;
RTX_FRAME_RELATED_P (insn) = 1;
@@ -3052,7 +3054,7 @@ cris_expand_prologue (void)
{
mem
= gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-(n_saved * 4 + size)));
set_mem_alias_set (mem, get_frame_alias_set ());
insn
@@ -3065,7 +3067,7 @@ cris_expand_prologue (void)
insn
= gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-(n_saved * 4 + size)));
insn = emit_insn (insn);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -3083,7 +3085,8 @@ cris_expand_prologue (void)
insn = emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
-4 - size)));
RTX_FRAME_RELATED_P (insn) = 1;
@@ -3113,7 +3116,7 @@ cris_expand_prologue (void)
{
mem
= gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-(n_saved * 4 + size)));
set_mem_alias_set (mem, get_frame_alias_set ());
insn = cris_emit_movem_store (mem, GEN_INT (n_saved),
@@ -3124,7 +3127,7 @@ cris_expand_prologue (void)
insn
= gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-(n_saved * 4 + size)));
insn = emit_insn (insn);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -3140,7 +3143,8 @@ cris_expand_prologue (void)
{
insn = emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
-cfoa_size)));
RTX_FRAME_RELATED_P (insn) = 1;
framesize += cfoa_size;
@@ -3150,7 +3154,8 @@ cris_expand_prologue (void)
{
insn = emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
-(cfoa_size + size))));
RTX_FRAME_RELATED_P (insn) = 1;
framesize += size + cfoa_size;
@@ -3248,7 +3253,7 @@ cris_expand_epilogue (void)
the saved registers. We have to adjust for that. */
emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
argspace_offset)));
/* Make sure we only do this once. */
argspace_offset = 0;
@@ -3274,7 +3279,7 @@ cris_expand_epilogue (void)
{
emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
argspace_offset)));
argspace_offset = 0;
}
@@ -3333,7 +3338,7 @@ cris_expand_epilogue (void)
emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, size)));
+ plus_constant (Pmode, stack_pointer_rtx, size)));
}
/* If this function has no pushed register parameters
@@ -3395,7 +3400,8 @@ cris_expand_epilogue (void)
emit_insn (gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, pretend)));
+ plus_constant (Pmode, stack_pointer_rtx,
+ pretend)));
}
/* Perform the "physical" unwinding that the EH machinery calculated. */
@@ -3443,7 +3449,8 @@ cris_gen_movem_load (rtx src, rtx nregs_rtx, int nprefix)
if (GET_CODE (XEXP (src, 0)) == POST_INC)
{
RTVEC_ELT (vec, nprefix + 1)
- = gen_rtx_SET (VOIDmode, srcreg, plus_constant (srcreg, nregs * 4));
+ = gen_rtx_SET (VOIDmode, srcreg,
+ plus_constant (Pmode, srcreg, nregs * 4));
eltno++;
}
@@ -3514,7 +3521,8 @@ cris_emit_movem_store (rtx dest, rtx nregs_rtx, int increment,
RTVEC_ELT (vec, 0) = mov;
RTVEC_ELT (vec, 1) = gen_rtx_SET (VOIDmode, destreg,
- plus_constant (destreg, increment));
+ plus_constant (Pmode, destreg,
+ increment));
if (frame_related)
{
RTX_FRAME_RELATED_P (mov) = 1;
@@ -3527,7 +3535,7 @@ cris_emit_movem_store (rtx dest, rtx nregs_rtx, int increment,
RTVEC_ELT (vec, 0)
= gen_rtx_SET (VOIDmode,
replace_equiv_address (dest,
- plus_constant (destreg,
+ plus_constant (Pmode, destreg,
increment)),
gen_rtx_REG (SImode, regno));
regno += regno_inc;
@@ -3542,7 +3550,7 @@ cris_emit_movem_store (rtx dest, rtx nregs_rtx, int increment,
{
RTVEC_ELT (vec, 1)
= gen_rtx_SET (VOIDmode, destreg,
- plus_constant (destreg,
+ plus_constant (Pmode, destreg,
increment != 0
? increment : nregs * 4));
eltno++;
@@ -4143,7 +4151,7 @@ cris_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
if (TARGET_V32)
{
mem = adjust_address (m_tramp, SImode, 6);
- emit_move_insn (mem, plus_constant (tramp, 38));
+ emit_move_insn (mem, plus_constant (Pmode, tramp, 38));
mem = adjust_address (m_tramp, SImode, 22);
emit_move_insn (mem, chain_value);
mem = adjust_address (m_tramp, SImode, 28);
diff --git a/gcc/config/cris/cris.md b/gcc/config/cris/cris.md
index 92657d12750..b4ead76f38b 100644
--- a/gcc/config/cris/cris.md
+++ b/gcc/config/cris/cris.md
@@ -976,7 +976,7 @@
tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym),
CRIS_UNSPEC_PCREL);
if (offs != 0)
- tem = plus_constant (tem, offs);
+ tem = plus_constant (Pmode, tem, offs);
rm = rn;
emit_move_insn (rm, gen_rtx_CONST (Pmode, tem));
}
@@ -988,7 +988,7 @@
tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym),
CRIS_UNSPEC_GOTREL);
if (offs != 0)
- tem = plus_constant (tem, offs);
+ tem = plus_constant (Pmode, tem, offs);
rm = gen_reg_rtx (Pmode);
emit_move_insn (rm, gen_rtx_CONST (Pmode, tem));
if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx,
@@ -3868,7 +3868,7 @@
(use (label_ref (match_operand 3 "" "")))])]
""
{
- operands[2] = plus_constant (operands[2], 1);
+ operands[2] = plus_constant (SImode, operands[2], 1);
operands[5] = gen_reg_rtx (SImode);
operands[6] = gen_reg_rtx (SImode);
operands[7] = gen_reg_rtx (SImode);
@@ -3903,7 +3903,7 @@
rtx xlabel = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
for (i = 5; i <= 10; i++)
operands[i] = gen_reg_rtx (SImode);
- operands[2] = plus_constant (operands[2], 1);
+ operands[2] = plus_constant (SImode, operands[2], 1);
/* Don't forget to decorate labels too, for PIC. */
operands[11] = flag_pic
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 15efaa3d8a7..10cbdc39a3f 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -687,7 +687,7 @@ machopic_indirect_data_reference (rtx orig, rtx reg)
orig = machopic_indirect_data_reference (XEXP (orig, 1),
(base == reg ? 0 : reg));
if (MACHOPIC_INDIRECT && (GET_CODE (orig) == CONST_INT))
- result = plus_constant (base, INTVAL (orig));
+ result = plus_constant (Pmode, base, INTVAL (orig));
else
result = gen_rtx_PLUS (Pmode, base, orig);
@@ -972,7 +972,7 @@ machopic_legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
Pmode, (base == reg ? 0 : reg));
if (GET_CODE (orig) == CONST_INT)
{
- pic_ref = plus_constant (base, INTVAL (orig));
+ pic_ref = plus_constant (Pmode, base, INTVAL (orig));
is_complex = 1;
}
else
diff --git a/gcc/config/epiphany/epiphany.c b/gcc/config/epiphany/epiphany.c
index 422fe2fe795..f1a8db76353 100644
--- a/gcc/config/epiphany/epiphany.c
+++ b/gcc/config/epiphany/epiphany.c
@@ -1555,7 +1555,8 @@ epiphany_emit_save_restore (int min, int limit, rtx addr, int epilogue_p)
if (current_frame_info.first_slot_size > UNITS_PER_WORD)
{
mode = DImode;
- addr = plus_constant (addr, - (HOST_WIDE_INT) UNITS_PER_WORD);
+ addr = plus_constant (Pmode, addr,
+ - (HOST_WIDE_INT) UNITS_PER_WORD);
}
if (i-- < min || !epilogue_p)
goto next_slot;
@@ -1588,7 +1589,8 @@ epiphany_emit_save_restore (int min, int limit, rtx addr, int epilogue_p)
{
mode = DImode;
i++;
- addr = plus_constant (addr, - (HOST_WIDE_INT) UNITS_PER_WORD);
+ addr = plus_constant (Pmode, addr,
+ - (HOST_WIDE_INT) UNITS_PER_WORD);
}
/* If it fits in the following stack slot pair, that's fine, too. */
else if (GET_CODE (addr) == PLUS && (stack_offset & 7) == 4
@@ -1603,7 +1605,8 @@ epiphany_emit_save_restore (int min, int limit, rtx addr, int epilogue_p)
skipped_mem = gen_mem (mode, addr);
mode = DImode;
i++;
- addr = plus_constant (addr, - (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
+ addr = plus_constant (Pmode, addr,
+ - (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
}
}
reg = gen_rtx_REG (mode, n);
@@ -1621,7 +1624,7 @@ epiphany_emit_save_restore (int min, int limit, rtx addr, int epilogue_p)
continue;
}
next_slot:
- addr = plus_constant (addr, - (HOST_WIDE_INT) UNITS_PER_WORD);
+ addr = plus_constant (Pmode, addr, -(HOST_WIDE_INT) UNITS_PER_WORD);
stack_offset -= GET_MODE_SIZE (mode);
}
}
@@ -1646,7 +1649,7 @@ epiphany_expand_prologue (void)
if (interrupt_p)
{
- addr = plus_constant (stack_pointer_rtx,
+ addr = plus_constant (Pmode, stack_pointer_rtx,
- (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
if (!lookup_attribute ("forwarder_section",
DECL_ATTRIBUTES (current_function_decl))
@@ -1663,13 +1666,13 @@ epiphany_expand_prologue (void)
frame_insn (gen_stack_adjust_add (off, mem));
if (!epiphany_uninterruptible_p (current_function_decl))
emit_insn (gen_gie ());
- addr = plus_constant (stack_pointer_rtx,
+ addr = plus_constant (Pmode, stack_pointer_rtx,
current_frame_info.first_slot_offset
- (HOST_WIDE_INT) 3 * UNITS_PER_WORD);
}
else
{
- addr = plus_constant (stack_pointer_rtx,
+ addr = plus_constant (Pmode, stack_pointer_rtx,
epiphany_stack_offset
- (HOST_WIDE_INT) UNITS_PER_WORD);
epiphany_emit_save_restore (0, current_frame_info.small_threshold,
@@ -1689,7 +1692,8 @@ epiphany_expand_prologue (void)
(gen_frame_mem (mode, stack_pointer_rtx),
gen_rtx_REG (mode, current_frame_info.first_slot),
off, mem));
- addr = plus_constant (addr, current_frame_info.first_slot_offset);
+ addr = plus_constant (Pmode, addr,
+ current_frame_info.first_slot_offset);
}
}
epiphany_emit_save_restore (current_frame_info.small_threshold,
@@ -1718,7 +1722,7 @@ epiphany_expand_prologue (void)
else if (current_frame_info.last_slot_offset)
{
mem = gen_frame_mem (BLKmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
current_frame_info.last_slot_offset));
off = GEN_INT (-current_frame_info.last_slot_offset);
if (!SIMM11 (INTVAL (off)))
@@ -1797,7 +1801,7 @@ epiphany_expand_epilogue (int sibcall_p)
restore_offset = (interrupt_p
? - 3 * UNITS_PER_WORD
: epiphany_stack_offset - (HOST_WIDE_INT) UNITS_PER_WORD);
- addr = plus_constant (stack_pointer_rtx,
+ addr = plus_constant (Pmode, stack_pointer_rtx,
(current_frame_info.first_slot_offset
+ restore_offset));
epiphany_emit_save_restore (current_frame_info.small_threshold,
@@ -1832,12 +1836,12 @@ epiphany_expand_epilogue (int sibcall_p)
gen_rtx_REG (SImode, GPR_0));
emit_move_insn (gen_rtx_REG (word_mode, IRET_REGNUM),
gen_rtx_REG (SImode, GPR_0+1));
- addr = plus_constant (stack_pointer_rtx,
+ addr = plus_constant (Pmode, stack_pointer_rtx,
- (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
emit_move_insn (gen_rtx_REG (DImode, GPR_0),
gen_frame_mem (DImode, addr));
}
- addr = plus_constant (stack_pointer_rtx,
+ addr = plus_constant (Pmode, stack_pointer_rtx,
epiphany_stack_offset - (HOST_WIDE_INT) UNITS_PER_WORD);
epiphany_emit_save_restore (0, current_frame_info.small_threshold, addr, 1);
if (!sibcall_p)
@@ -2181,19 +2185,19 @@ epiphany_trampoline_init (rtx tramp_mem, tree fndecl, rtx cxt)
rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
rtx tramp = force_reg (Pmode, XEXP (tramp_mem, 0));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, tramp, 0)),
gen_rtx_IOR (SImode, GEN_INT (0x4002000b),
EPIPHANY_LOW_RTX (fnaddr)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, tramp, 4)),
gen_rtx_IOR (SImode, GEN_INT (0x5002000b),
EPIPHANY_HIGH_RTX (fnaddr)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, tramp, 8)),
gen_rtx_IOR (SImode, GEN_INT (0x2002800b),
EPIPHANY_LOW_RTX (cxt)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, tramp, 12)),
gen_rtx_IOR (SImode, GEN_INT (0x3002800b),
EPIPHANY_HIGH_RTX (cxt)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 16)),
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, tramp, 16)),
GEN_INT (0x0802014f));
}
diff --git a/gcc/config/epiphany/epiphany.md b/gcc/config/epiphany/epiphany.md
index b192153ccf4..22863e8150e 100644
--- a/gcc/config/epiphany/epiphany.md
+++ b/gcc/config/epiphany/epiphany.md
@@ -265,7 +265,7 @@
rtx addr
= (frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx);
- addr = plus_constant (addr, MACHINE_FUNCTION (cfun)->lr_slot_offset);
+ addr = plus_constant (Pmode, addr, MACHINE_FUNCTION (cfun)->lr_slot_offset);
operands[1] = gen_frame_mem (SImode, addr);
})
@@ -373,12 +373,12 @@
if (post_modify_operand (operands[0], <MODE>mode))
operands[2]
= change_address (operands[2], VOIDmode,
- plus_constant (XEXP (XEXP (operands[0], 0), 0),
+ plus_constant (Pmode, XEXP (XEXP (operands[0], 0), 0),
UNITS_PER_WORD));
if (post_modify_operand (operands[1], <MODE>mode))
operands[3]
= change_address (operands[3], VOIDmode,
- plus_constant (XEXP (XEXP (operands[1], 0), 0),
+ plus_constant (Pmode, XEXP (XEXP (operands[1], 0), 0),
UNITS_PER_WORD));
}
[(set_attr "type" "move,move,load,store")
diff --git a/gcc/config/fr30/fr30.c b/gcc/config/fr30/fr30.c
index e6a3712a3f9..edb8dc4669b 100644
--- a/gcc/config/fr30/fr30.c
+++ b/gcc/config/fr30/fr30.c
@@ -936,7 +936,8 @@ fr30_move_double (rtx * operands)
emit_insn (gen_rtx_SET (VOIDmode, dest0,
adjust_address (src, SImode, 0)));
emit_insn (gen_rtx_SET (SImode, dest1,
- plus_constant (dest1, UNITS_PER_WORD)));
+ plus_constant (SImode, dest1,
+ UNITS_PER_WORD)));
new_mem = gen_rtx_MEM (SImode, dest1);
MEM_COPY_ATTRIBUTES (new_mem, src);
diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c
index 1354d374396..ace9e437118 100644
--- a/gcc/config/frv/frv.c
+++ b/gcc/config/frv/frv.c
@@ -1586,7 +1586,7 @@ frv_dwarf_store (rtx reg, int offset)
{
rtx set = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (GET_MODE (reg),
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
offset)),
reg);
RTX_FRAME_RELATED_P (set) = 1;
@@ -1821,9 +1821,9 @@ frv_expand_prologue (void)
/* ASM_SRC and DWARF_SRC both point to the frame header. ASM_SRC is
based on ACCESSOR.BASE but DWARF_SRC is always based on the stack
pointer. */
- rtx asm_src = plus_constant (accessor.base,
+ rtx asm_src = plus_constant (Pmode, accessor.base,
fp_offset - accessor.base_offset);
- rtx dwarf_src = plus_constant (sp, fp_offset);
+ rtx dwarf_src = plus_constant (Pmode, sp, fp_offset);
/* Store the old frame pointer at (sp + FP_OFFSET). */
frv_frame_access (&accessor, fp, fp_offset);
@@ -2272,8 +2272,8 @@ frv_expand_block_move (rtx operands[])
}
else
{
- src_addr = plus_constant (src_reg, offset);
- dest_addr = plus_constant (dest_reg, offset);
+ src_addr = plus_constant (Pmode, src_reg, offset);
+ dest_addr = plus_constant (Pmode, dest_reg, offset);
}
/* Generate the appropriate load and store, saving the stores
@@ -2357,7 +2357,7 @@ frv_expand_block_clear (rtx operands[])
/* Calculate the correct offset for src/dest. */
dest_addr = ((offset == 0)
? dest_reg
- : plus_constant (dest_reg, offset));
+ : plus_constant (Pmode, dest_reg, offset));
/* Generate the appropriate store of gr0. */
if (bytes >= 4 && align >= 4)
@@ -2471,7 +2471,7 @@ frv_return_addr_rtx (int count, rtx frame)
if (count != 0)
return const0_rtx;
cfun->machine->frame_needed = 1;
- return gen_rtx_MEM (Pmode, plus_constant (frame, 8));
+ return gen_rtx_MEM (Pmode, plus_constant (Pmode, frame, 8));
}
/* Given a memory reference MEMREF, interpret the referenced memory as
@@ -2489,7 +2489,8 @@ frv_index_memory (rtx memref, enum machine_mode mode, int index)
if (GET_CODE (base) == PRE_MODIFY)
base = XEXP (base, 0);
return change_address (memref, mode,
- plus_constant (base, index * GET_MODE_SIZE (mode)));
+ plus_constant (Pmode, base,
+ index * GET_MODE_SIZE (mode)));
}
@@ -3741,7 +3742,8 @@ static void
frv_output_const_unspec (FILE *stream, const struct frv_unspec *unspec)
{
fprintf (stream, "#%s(", unspec_got_name (unspec->reloc));
- output_addr_const (stream, plus_constant (unspec->symbol, unspec->offset));
+ output_addr_const (stream, plus_constant (Pmode, unspec->symbol,
+ unspec->offset));
fputs (")", stream);
}
@@ -3756,7 +3758,7 @@ frv_find_base_term (rtx x)
if (frv_const_unspec_p (x, &unspec)
&& frv_small_data_reloc_p (unspec.symbol, unspec.reloc))
- return plus_constant (unspec.symbol, unspec.offset);
+ return plus_constant (Pmode, unspec.symbol, unspec.offset);
return x;
}
@@ -9647,7 +9649,7 @@ frv_output_dwarf_dtprel (FILE *file, int size, rtx x)
fputs ("\t.picptr\ttlsmoff(", file);
/* We want the unbiased TLS offset, so add the bias to the
expression, such that the implicit biasing cancels out. */
- output_addr_const (file, plus_constant (x, TLS_BIAS));
+ output_addr_const (file, plus_constant (Pmode, x, TLS_BIAS));
fputs (")", file);
}
diff --git a/gcc/config/h8300/h8300.c b/gcc/config/h8300/h8300.c
index 7eaaf202f00..f165fdf66f6 100644
--- a/gcc/config/h8300/h8300.c
+++ b/gcc/config/h8300/h8300.c
@@ -678,12 +678,13 @@ h8300_push_pop (int regno, int nregs, bool pop_p, bool return_p)
/* Register REGNO + NREGS - 1 is popped first. Before the
stack adjustment, its slot is at address @sp. */
lhs = gen_rtx_REG (SImode, regno + j);
- rhs = gen_rtx_MEM (SImode, plus_constant (sp, (nregs - j - 1) * 4));
+ rhs = gen_rtx_MEM (SImode, plus_constant (Pmode, sp,
+ (nregs - j - 1) * 4));
}
else
{
/* Register REGNO is pushed first and will be stored at @(-4,sp). */
- lhs = gen_rtx_MEM (SImode, plus_constant (sp, (j + 1) * -4));
+ lhs = gen_rtx_MEM (SImode, plus_constant (Pmode, sp, (j + 1) * -4));
rhs = gen_rtx_REG (SImode, regno + j);
}
RTVEC_ELT (vec, i + j) = gen_rtx_SET (VOIDmode, lhs, rhs);
@@ -2002,7 +2003,8 @@ h8300_return_addr_rtx (int count, rtx frame)
else
ret = gen_rtx_MEM (Pmode,
memory_address (Pmode,
- plus_constant (frame, UNITS_PER_WORD)));
+ plus_constant (Pmode, frame,
+ UNITS_PER_WORD)));
set_mem_alias_set (ret, get_frame_alias_set ());
return ret;
}
@@ -2719,17 +2721,17 @@ h8300_swap_into_er6 (rtx addr)
rtx insn = push (HARD_FRAME_POINTER_REGNUM);
if (frame_pointer_needed)
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (gen_rtx_MEM (Pmode, stack_pointer_rtx),
+ plus_constant (Pmode, gen_rtx_MEM (Pmode, stack_pointer_rtx),
2 * UNITS_PER_WORD));
else
add_reg_note (insn, REG_CFA_ADJUST_CFA,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, 4)));
+ plus_constant (Pmode, stack_pointer_rtx, 4)));
emit_move_insn (hard_frame_pointer_rtx, addr);
if (REGNO (addr) == SP_REG)
emit_move_insn (hard_frame_pointer_rtx,
- plus_constant (hard_frame_pointer_rtx,
+ plus_constant (Pmode, hard_frame_pointer_rtx,
GET_MODE_SIZE (word_mode)));
}
@@ -2748,11 +2750,12 @@ h8300_swap_out_of_er6 (rtx addr)
RTX_FRAME_RELATED_P (insn) = 1;
if (frame_pointer_needed)
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (hard_frame_pointer_rtx, 2 * UNITS_PER_WORD));
+ plus_constant (Pmode, hard_frame_pointer_rtx,
+ 2 * UNITS_PER_WORD));
else
add_reg_note (insn, REG_CFA_ADJUST_CFA,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -4)));
+ plus_constant (Pmode, stack_pointer_rtx, -4)));
}
/* Return the length of mov instruction. */
diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
index 6696b7a7303..a9d25c565cf 100644
--- a/gcc/config/i386/cpuid.h
+++ b/gcc/config/i386/cpuid.h
@@ -66,6 +66,7 @@
/* Extended Features (%eax == 7) */
#define bit_FSGSBASE (1 << 0)
#define bit_BMI (1 << 3)
+#define bit_HLE (1 << 4)
#define bit_AVX2 (1 << 5)
#define bit_BMI2 (1 << 8)
#define bit_RTM (1 << 11)
diff --git a/gcc/config/i386/driver-i386.c b/gcc/config/i386/driver-i386.c
index 09de555bc6e..8fe7ab828b4 100644
--- a/gcc/config/i386/driver-i386.c
+++ b/gcc/config/i386/driver-i386.c
@@ -397,6 +397,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
unsigned int has_pclmul = 0, has_abm = 0, has_lwp = 0;
unsigned int has_fma = 0, has_fma4 = 0, has_xop = 0;
unsigned int has_bmi = 0, has_bmi2 = 0, has_tbm = 0, has_lzcnt = 0;
+ unsigned int has_hle = 0;
bool arch;
@@ -456,6 +457,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
__cpuid_count (7, 0, eax, ebx, ecx, edx);
has_bmi = ebx & bit_BMI;
+ has_hle = ebx & bit_HLE;
has_avx2 = ebx & bit_AVX2;
has_bmi2 = ebx & bit_BMI2;
}
@@ -472,6 +474,8 @@ const char *host_detect_local_cpu (int argc, const char **argv)
has_abm = ecx & bit_ABM;
has_lwp = ecx & bit_LWP;
has_fma4 = ecx & bit_FMA4;
+ if (vendor == SIG_AMD && has_fma4 && has_fma)
+ has_fma4 = 0;
has_xop = ecx & bit_XOP;
has_tbm = ecx & bit_TBM;
has_lzcnt = ecx & bit_LZCNT;
@@ -726,10 +730,12 @@ const char *host_detect_local_cpu (int argc, const char **argv)
const char *sse4_2 = has_sse4_2 ? " -msse4.2" : " -mno-sse4.2";
const char *sse4_1 = has_sse4_1 ? " -msse4.1" : " -mno-sse4.1";
const char *lzcnt = has_lzcnt ? " -mlzcnt" : " -mno-lzcnt";
+ const char *hle = has_hle ? " -mhle" : " -mno-hle";
options = concat (options, cx16, sahf, movbe, ase, pclmul,
popcnt, abm, lwp, fma, fma4, xop, bmi, bmi2,
- tbm, avx, avx2, sse4_2, sse4_1, lzcnt, NULL);
+ tbm, avx, avx2, sse4_2, sse4_1, lzcnt,
+ hle, NULL);
}
done:
diff --git a/gcc/config/i386/i386-c.c b/gcc/config/i386/i386-c.c
index 49fd4d92ca8..23427bf034f 100644
--- a/gcc/config/i386/i386-c.c
+++ b/gcc/config/i386/i386-c.c
@@ -1,5 +1,5 @@
/* Subroutines used for macro/preprocessor support on the ia-32.
- Copyright (C) 2008, 2009, 2010
+ Copyright (C) 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
This file is part of GCC.
@@ -396,6 +396,9 @@ ix86_target_macros (void)
builtin_define_std ("i386");
}
+ cpp_define_formatted (parse_in, "__ATOMIC_HLE_ACQUIRE=%d", IX86_HLE_ACQUIRE);
+ cpp_define_formatted (parse_in, "__ATOMIC_HLE_RELEASE=%d", IX86_HLE_RELEASE);
+
ix86_target_macros_internal (ix86_isa_flags,
ix86_arch,
ix86_tune,
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 903683e3500..f09b2bb3289 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -2190,7 +2190,7 @@ unsigned char ix86_arch_features[X86_ARCH_LAST];
/* Feature tests against the various architecture variations, used to create
ix86_arch_features based on the processor mask. */
static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
- /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
+ /* X86_ARCH_CMOV: Conditional move was added for pentiumpro. */
~(m_386 | m_486 | m_PENT | m_K6),
/* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
@@ -2679,6 +2679,7 @@ ix86_target_string (HOST_WIDE_INT isa, int flags, const char *arch,
{ "-mbmi", OPTION_MASK_ISA_BMI },
{ "-mbmi2", OPTION_MASK_ISA_BMI2 },
{ "-mlzcnt", OPTION_MASK_ISA_LZCNT },
+ { "-mhle", OPTION_MASK_ISA_HLE },
{ "-mtbm", OPTION_MASK_ISA_TBM },
{ "-mpopcnt", OPTION_MASK_ISA_POPCNT },
{ "-mmovbe", OPTION_MASK_ISA_MOVBE },
@@ -2954,6 +2955,7 @@ ix86_option_override_internal (bool main_args_p)
#define PTA_AVX2 (HOST_WIDE_INT_1 << 30)
#define PTA_BMI2 (HOST_WIDE_INT_1 << 31)
#define PTA_RTM (HOST_WIDE_INT_1 << 32)
+#define PTA_HLE (HOST_WIDE_INT_1 << 33)
/* if this reaches 64, need to widen struct pta flags below */
static struct pta
@@ -3012,7 +3014,7 @@ ix86_option_override_internal (bool main_args_p)
| PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX | PTA_AVX2
| PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FSGSBASE
| PTA_RDRND | PTA_F16C | PTA_BMI | PTA_BMI2 | PTA_LZCNT
- | PTA_FMA | PTA_MOVBE | PTA_RTM},
+ | PTA_FMA | PTA_MOVBE | PTA_RTM | PTA_HLE},
{"atom", PROCESSOR_ATOM, CPU_ATOM,
PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
| PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
@@ -3075,9 +3077,10 @@ ix86_option_override_internal (bool main_args_p)
PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
| PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
{"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
- 0 /* flags are only used for -march switch. */ },
+ PTA_HLE /* flags are only used for -march switch. */ },
{"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
- PTA_64BIT /* flags are only used for -march switch. */ },
+ PTA_64BIT
+ | PTA_HLE /* flags are only used for -march switch. */ },
};
/* -mrecip options. */
@@ -3430,6 +3433,9 @@ ix86_option_override_internal (bool main_args_p)
if (processor_alias_table[i].flags & PTA_RTM
&& !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RTM))
ix86_isa_flags |= OPTION_MASK_ISA_RTM;
+ if (processor_alias_table[i].flags & PTA_HLE
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_HLE))
+ ix86_isa_flags |= OPTION_MASK_ISA_HLE;
if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
x86_prefetch_sse = true;
@@ -3498,7 +3504,7 @@ ix86_option_override_internal (bool main_args_p)
-mtune (rather than -march) points us to a processor that has them.
However, the VIA C3 gives a SIGILL, so we only do that for i686 and
higher processors. */
- if (TARGET_CMOVE
+ if (TARGET_CMOV
&& (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
x86_prefetch_sse = true;
break;
@@ -3774,12 +3780,6 @@ ix86_option_override_internal (bool main_args_p)
target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
}
- /* For sane SSE instruction set generation we need fcomi instruction.
- It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic
- expands to a sequence that includes conditional move. */
- if (TARGET_SSE || TARGET_RDRND)
- TARGET_CMOVE = 1;
-
/* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
{
char *p;
@@ -4251,6 +4251,7 @@ ix86_valid_target_attribute_inner_p (tree args, char *p_strings[],
IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
IX86_ATTR_ISA ("f16c", OPT_mf16c),
IX86_ATTR_ISA ("rtm", OPT_mrtm),
+ IX86_ATTR_ISA ("hle", OPT_mhle),
/* enum options */
IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
@@ -7698,7 +7699,7 @@ setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
for (i = cum->regno; i < max; i++)
{
mem = gen_rtx_MEM (word_mode,
- plus_constant (save_area, i * UNITS_PER_WORD));
+ plus_constant (Pmode, save_area, i * UNITS_PER_WORD));
MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
emit_move_insn (mem,
@@ -7734,7 +7735,8 @@ setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
for (i = cum->sse_regno; i < max; ++i)
{
- mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
+ mem = plus_constant (Pmode, save_area,
+ i * 16 + ix86_varargs_gpr_size);
mem = gen_rtx_MEM (smode, mem);
MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
@@ -7763,7 +7765,7 @@ setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
rtx reg, mem;
mem = gen_rtx_MEM (Pmode,
- plus_constant (virtual_incoming_args_rtx,
+ plus_constant (Pmode, virtual_incoming_args_rtx,
i * UNITS_PER_WORD));
MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
@@ -8431,20 +8433,16 @@ standard_sse_constant_opcode (rtx insn, rtx x)
switch (get_attr_mode (insn))
{
case MODE_TI:
- if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vpxor\t%0, %d0";
+ return "%vpxor\t%0, %d0";
case MODE_V2DF:
- if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vxorpd\t%0, %d0";
+ return "%vxorpd\t%0, %d0";
case MODE_V4SF:
return "%vxorps\t%0, %d0";
case MODE_OI:
- if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vpxor\t%x0, %x0, %x0";
+ return "vpxor\t%x0, %x0, %x0";
case MODE_V4DF:
- if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vxorpd\t%x0, %x0, %x0";
+ return "vxorpd\t%x0, %x0, %x0";
case MODE_V8SF:
return "vxorps\t%x0, %x0, %x0";
@@ -8612,6 +8610,7 @@ ix86_code_end (void)
NULL_TREE, void_type_node);
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
#if TARGET_MACHO
if (TARGET_MACHO)
@@ -9231,7 +9230,7 @@ choose_baseaddr (HOST_WIDE_INT cfa_offset)
}
gcc_assert (base_reg != NULL);
- return plus_constant (base_reg, base_offset);
+ return plus_constant (Pmode, base_reg, base_offset);
}
/* Emit code to save registers in the prologue. */
@@ -9286,7 +9285,7 @@ ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
the re-aligned stack frame, which provides us with a copy
of the CFA that will last past the prologue. Install it. */
gcc_checking_assert (cfun->machine->fs.fp_valid);
- addr = plus_constant (hard_frame_pointer_rtx,
+ addr = plus_constant (Pmode, hard_frame_pointer_rtx,
cfun->machine->fs.fp_offset - cfa_offset);
mem = gen_rtx_MEM (mode, addr);
add_reg_note (insn, REG_CFA_DEF_CFA, mem);
@@ -9296,7 +9295,7 @@ ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
/* The frame pointer is a stable reference within the
aligned frame. Use it. */
gcc_checking_assert (cfun->machine->fs.fp_valid);
- addr = plus_constant (hard_frame_pointer_rtx,
+ addr = plus_constant (Pmode, hard_frame_pointer_rtx,
cfun->machine->fs.fp_offset - cfa_offset);
mem = gen_rtx_MEM (mode, addr);
add_reg_note (insn, REG_CFA_EXPRESSION,
@@ -9309,7 +9308,8 @@ ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
use by the unwind info. */
else if (base != m->fs.cfa_reg)
{
- addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
+ addr = plus_constant (Pmode, m->fs.cfa_reg,
+ m->fs.cfa_offset - cfa_offset);
mem = gen_rtx_MEM (mode, addr);
add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
}
@@ -9755,7 +9755,8 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
adjust = PROBE_INTERVAL;
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -adjust)));
+ plus_constant (Pmode, stack_pointer_rtx,
+ -adjust)));
emit_stack_probe (stack_pointer_rtx);
}
@@ -9765,12 +9766,13 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
adjust = size + PROBE_INTERVAL - i;
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -adjust)));
+ plus_constant (Pmode, stack_pointer_rtx,
+ -adjust)));
emit_stack_probe (stack_pointer_rtx);
/* Adjust back to account for the additional first interval. */
last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
PROBE_INTERVAL + dope)));
}
@@ -9796,7 +9798,7 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
/* SP = SP_0 + PROBE_INTERVAL. */
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
- (PROBE_INTERVAL + dope))));
/* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
@@ -9826,14 +9828,14 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
if (size != rounded_size)
{
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
rounded_size - size)));
emit_stack_probe (stack_pointer_rtx);
}
/* Adjust back to account for the additional first interval. */
last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
PROBE_INTERVAL + dope)));
release_scratch_register_on_entry (&sr);
@@ -9849,10 +9851,10 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
XVECEXP (expr, 0, 0)
= gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -size));
+ plus_constant (Pmode, stack_pointer_rtx, -size));
XVECEXP (expr, 0, 1)
= gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
PROBE_INTERVAL + dope + size));
add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
RTX_FRAME_RELATED_P (last) = 1;
@@ -9921,9 +9923,11 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
it exceeds SIZE. If only one probe is needed, this will not
generate any code. Then probe at FIRST + SIZE. */
for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
- emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -(first + i)));
- emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -(first + size)));
}
/* Otherwise, do the same as above, but in a loop. Note that we must be
@@ -9971,7 +9975,8 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
that SIZE is equal to ROUNDED_SIZE. */
if (size != rounded_size)
- emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
+ emit_stack_probe (plus_constant (Pmode,
+ gen_rtx_PLUS (Pmode,
stack_pointer_rtx,
sr.reg),
rounded_size - size));
@@ -10224,7 +10229,7 @@ ix86_expand_prologue (void)
/* We don't want to interpret this push insn as a register save,
only as a stack adjustment. The real copy of the register as
a save will be done later, if needed. */
- t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
+ t = plus_constant (Pmode, stack_pointer_rtx, -UNITS_PER_WORD);
t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -10245,7 +10250,7 @@ ix86_expand_prologue (void)
}
/* Grab the argument pointer. */
- t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
+ t = plus_constant (Pmode, stack_pointer_rtx, m->fs.sp_offset);
insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
RTX_FRAME_RELATED_P (insn) = 1;
m->fs.cfa_reg = crtl->drap_reg;
@@ -10261,7 +10266,7 @@ ix86_expand_prologue (void)
address can be reached via (argp - 1) slot. This is needed
to implement macro RETURN_ADDR_RTX and intrinsic function
expand_builtin_return_addr etc. */
- t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
+ t = plus_constant (Pmode, crtl->drap_reg, -UNITS_PER_WORD);
t = gen_frame_mem (word_mode, t);
insn = emit_insn (gen_push (t));
RTX_FRAME_RELATED_P (insn) = 1;
@@ -10451,7 +10456,7 @@ ix86_expand_prologue (void)
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
-allocate)));
}
m->fs.sp_offset += allocate;
@@ -10606,7 +10611,7 @@ ix86_emit_restore_reg_using_pop (rtx reg)
if (m->fs.cfa_reg == stack_pointer_rtx)
{
- rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
+ rtx x = plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD);
x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -10668,7 +10673,8 @@ ix86_emit_leave (void)
m->fs.cfa_offset = m->fs.sp_offset;
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (stack_pointer_rtx, m->fs.sp_offset));
+ plus_constant (Pmode, stack_pointer_rtx,
+ m->fs.sp_offset));
RTX_FRAME_RELATED_P (insn) = 1;
}
ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
@@ -10881,7 +10887,7 @@ ix86_expand_epilogue (int style)
if (frame_pointer_needed)
{
t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
- t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
+ t = plus_constant (Pmode, t, m->fs.fp_offset - UNITS_PER_WORD);
emit_insn (gen_rtx_SET (VOIDmode, sa, t));
t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
@@ -10896,7 +10902,7 @@ ix86_expand_epilogue (int style)
bother resetting the CFA to the SP for the duration of
the return insn. */
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (sa, UNITS_PER_WORD));
+ plus_constant (Pmode, sa, UNITS_PER_WORD));
ix86_add_queued_cfa_restore_notes (insn);
add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -10911,7 +10917,7 @@ ix86_expand_epilogue (int style)
else
{
t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
- t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
+ t = plus_constant (Pmode, t, m->fs.sp_offset - UNITS_PER_WORD);
insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
ix86_add_queued_cfa_restore_notes (insn);
@@ -10920,7 +10926,7 @@ ix86_expand_epilogue (int style)
{
m->fs.cfa_offset = UNITS_PER_WORD;
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
UNITS_PER_WORD));
RTX_FRAME_RELATED_P (insn) = 1;
}
@@ -12585,7 +12591,7 @@ legitimize_pic_address (rtx orig, rtx reg)
base == reg ? NULL_RTX : reg);
if (CONST_INT_P (new_rtx))
- new_rtx = plus_constant (base, INTVAL (new_rtx));
+ new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
else
{
if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
@@ -13105,7 +13111,8 @@ ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
x = gen_rtx_PLUS (Pmode,
gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
XEXP (XEXP (XEXP (x, 0), 1), 0)),
- plus_constant (other, INTVAL (constant)));
+ plus_constant (Pmode, other,
+ INTVAL (constant)));
}
}
@@ -14340,6 +14347,24 @@ ix86_print_operand (FILE *file, rtx x, int code)
x = adjust_address_nv (x, DImode, 8);
break;
+ case 'K':
+ gcc_assert (CONST_INT_P (x));
+
+ if (INTVAL (x) & IX86_HLE_ACQUIRE)
+#ifdef HAVE_AS_IX86_HLE
+ fputs ("xacquire ", file);
+#else
+ fputs ("\n" ASM_BYTE "0xf2\n\t", file);
+#endif
+ else if (INTVAL (x) & IX86_HLE_RELEASE)
+#ifdef HAVE_AS_IX86_HLE
+ fputs ("xrelease ", file);
+#else
+ fputs ("\n" ASM_BYTE "0xf3\n\t", file);
+#endif
+ /* We do not want to print value of the operand. */
+ return;
+
case '+':
{
rtx x;
@@ -15878,60 +15903,19 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
op0 = operands[0];
op1 = operands[1];
- if (TARGET_AVX)
+ if (TARGET_AVX
+ && GET_MODE_SIZE (mode) == 32)
{
switch (GET_MODE_CLASS (mode))
{
case MODE_VECTOR_INT:
case MODE_INT:
- switch (GET_MODE_SIZE (mode))
- {
- case 16:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- {
- op0 = gen_lowpart (V4SFmode, op0);
- op1 = gen_lowpart (V4SFmode, op1);
- emit_insn (gen_sse_movups (op0, op1));
- }
- else
- {
- op0 = gen_lowpart (V16QImode, op0);
- op1 = gen_lowpart (V16QImode, op1);
- emit_insn (gen_sse2_movdqu (op0, op1));
- }
- break;
- case 32:
- op0 = gen_lowpart (V32QImode, op0);
- op1 = gen_lowpart (V32QImode, op1);
- ix86_avx256_split_vector_move_misalign (op0, op1);
- break;
- default:
- gcc_unreachable ();
- }
- break;
+ op0 = gen_lowpart (V32QImode, op0);
+ op1 = gen_lowpart (V32QImode, op1);
+ /* FALLTHRU */
+
case MODE_VECTOR_FLOAT:
- switch (mode)
- {
- case V4SFmode:
- emit_insn (gen_sse_movups (op0, op1));
- break;
- case V2DFmode:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- {
- op0 = gen_lowpart (V4SFmode, op0);
- op1 = gen_lowpart (V4SFmode, op1);
- emit_insn (gen_sse_movups (op0, op1));
- }
- else
- emit_insn (gen_sse2_movupd (op0, op1));
- break;
- case V8SFmode:
- case V4DFmode:
- ix86_avx256_split_vector_move_misalign (op0, op1);
- break;
- default:
- gcc_unreachable ();
- }
+ ix86_avx256_split_vector_move_misalign (op0, op1);
break;
default:
@@ -15943,16 +15927,6 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
if (MEM_P (op1))
{
- /* If we're optimizing for size, movups is the smallest. */
- if (optimize_insn_for_size_p ()
- || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- {
- op0 = gen_lowpart (V4SFmode, op0);
- op1 = gen_lowpart (V4SFmode, op1);
- emit_insn (gen_sse_movups (op0, op1));
- return;
- }
-
/* ??? If we have typed data, then it would appear that using
movdqu is the only way to get unaligned data loaded with
integer type. */
@@ -15960,16 +15934,19 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
{
op0 = gen_lowpart (V16QImode, op0);
op1 = gen_lowpart (V16QImode, op1);
+ /* We will eventually emit movups based on insn attributes. */
emit_insn (gen_sse2_movdqu (op0, op1));
- return;
}
-
- if (TARGET_SSE2 && mode == V2DFmode)
+ else if (TARGET_SSE2 && mode == V2DFmode)
{
rtx zero;
- if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
+ if (TARGET_AVX
+ || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
+ || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
+ || optimize_function_for_size_p (cfun))
{
+ /* We will eventually emit movups based on insn attributes. */
emit_insn (gen_sse2_movupd (op0, op1));
return;
}
@@ -16001,7 +15978,10 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
}
else
{
- if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
+ if (TARGET_AVX
+ || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
+ || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
+ || optimize_function_for_size_p (cfun))
{
op0 = gen_lowpart (V4SFmode, op0);
op1 = gen_lowpart (V4SFmode, op1);
@@ -16016,6 +15996,7 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
if (mode != V4SFmode)
op0 = gen_lowpart (V4SFmode, op0);
+
m = adjust_address (op1, V2SFmode, 0);
emit_insn (gen_sse_loadlps (op0, op0, m));
m = adjust_address (op1, V2SFmode, 8);
@@ -16024,30 +16005,20 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
}
else if (MEM_P (op0))
{
- /* If we're optimizing for size, movups is the smallest. */
- if (optimize_insn_for_size_p ()
- || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- {
- op0 = gen_lowpart (V4SFmode, op0);
- op1 = gen_lowpart (V4SFmode, op1);
- emit_insn (gen_sse_movups (op0, op1));
- return;
- }
-
- /* ??? Similar to above, only less clear
- because of typeless stores. */
- if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
- && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
{
op0 = gen_lowpart (V16QImode, op0);
op1 = gen_lowpart (V16QImode, op1);
+ /* We will eventually emit movups based on insn attributes. */
emit_insn (gen_sse2_movdqu (op0, op1));
- return;
}
-
- if (TARGET_SSE2 && mode == V2DFmode)
+ else if (TARGET_SSE2 && mode == V2DFmode)
{
- if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
+ if (TARGET_AVX
+ || TARGET_SSE_UNALIGNED_STORE_OPTIMAL
+ || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
+ || optimize_function_for_size_p (cfun))
+ /* We will eventually emit movups based on insn attributes. */
emit_insn (gen_sse2_movupd (op0, op1));
else
{
@@ -16062,7 +16033,10 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
if (mode != V4SFmode)
op1 = gen_lowpart (V4SFmode, op1);
- if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
+ if (TARGET_AVX
+ || TARGET_SSE_UNALIGNED_STORE_OPTIMAL
+ || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
+ || optimize_function_for_size_p (cfun))
{
op0 = gen_lowpart (V4SFmode, op0);
emit_insn (gen_sse_movups (op0, op1));
@@ -18806,6 +18780,11 @@ ix86_expand_int_movcc (rtx operands[])
rtx op0 = XEXP (operands[1], 0);
rtx op1 = XEXP (operands[1], 1);
+ if (GET_MODE (op0) == TImode
+ || (GET_MODE (op0) == DImode
+ && !TARGET_64BIT))
+ return false;
+
start_sequence ();
compare_op = ix86_expand_compare (code, op0, op1);
compare_seq = get_insns ();
@@ -20660,7 +20639,7 @@ ix86_split_long_move (rtx operands[])
/* Compensate for the stack decrement by 4. */
if (!TARGET_64BIT && nparts == 3
&& mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
- src_base = plus_constant (src_base, 4);
+ src_base = plus_constant (Pmode, src_base, 4);
/* src_base refers to the stack pointer and is
automatically decreased by emitted push. */
@@ -20724,7 +20703,7 @@ ix86_split_long_move (rtx operands[])
part[1][0] = replace_equiv_address (part[1][0], base);
for (i = 1; i < nparts; i++)
{
- tmp = plus_constant (base, UNITS_PER_WORD * i);
+ tmp = plus_constant (Pmode, base, UNITS_PER_WORD * i);
part[1][i] = replace_equiv_address (part[1][i], tmp);
}
}
@@ -22340,7 +22319,8 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
sufficiently aligned, maintain aliasing info accurately. */
dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
desired_align, align_bytes);
- count_exp = plus_constant (count_exp, -align_bytes);
+ count_exp = plus_constant (counter_mode (count_exp),
+ count_exp, -align_bytes);
count -= align_bytes;
}
if (need_zero_guard
@@ -22732,7 +22712,8 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
sufficiently aligned, maintain aliasing info accurately. */
dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
desired_align, align_bytes);
- count_exp = plus_constant (count_exp, -align_bytes);
+ count_exp = plus_constant (counter_mode (count_exp),
+ count_exp, -align_bytes);
count -= align_bytes;
}
if (need_zero_guard
@@ -24439,7 +24420,8 @@ ix86_static_chain (const_tree fndecl, bool incoming_p)
if (fndecl == current_function_decl)
ix86_static_chain_on_stack = true;
return gen_frame_mem (SImode,
- plus_constant (arg_pointer_rtx, -8));
+ plus_constant (Pmode,
+ arg_pointer_rtx, -8));
}
regno = SI_REG;
}
@@ -24561,7 +24543,7 @@ ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
(call-saved) register static chain; this push is 1 byte. */
offset += 5;
disp = expand_binop (SImode, sub_optab, fnaddr,
- plus_constant (XEXP (m_tramp, 0),
+ plus_constant (Pmode, XEXP (m_tramp, 0),
offset - (MEM_P (chain) ? 1 : 0)),
NULL_RTX, 1, OPTAB_DIRECT);
emit_move_insn (mem, disp);
@@ -27679,7 +27661,7 @@ ix86_init_mmx_sse_builtins (void)
}
/* This builds the processor_model struct type defined in
- libgcc/config/i386/i386-cpuinfo.c */
+ libgcc/config/i386/cpuinfo.c */
static tree
build_processor_model_struct (void)
@@ -27739,7 +27721,7 @@ make_var_decl (tree type, const char *name)
}
/* FNDECL is a __builtin_cpu_is or a __builtin_cpu_supports call that is folded
- into an integer defined in libgcc/config/i386/i386-cpuinfo.c */
+ into an integer defined in libgcc/config/i386/cpuinfo.c */
static tree
fold_builtin_cpu (tree fndecl, tree *args)
@@ -27749,8 +27731,7 @@ fold_builtin_cpu (tree fndecl, tree *args)
DECL_FUNCTION_CODE (fndecl);
tree param_string_cst = NULL;
- /* This is the order of bit-fields in __processor_features in
- i386-cpuinfo.c */
+ /* This is the order of bit-fields in __processor_features in cpuinfo.c */
enum processor_features
{
F_CMOV = 0,
@@ -27768,7 +27749,7 @@ fold_builtin_cpu (tree fndecl, tree *args)
};
/* These are the values for vendor types and cpu types and subtypes
- in i386-cpuinfo.c. Cpu types and subtypes should be subtracted by
+ in cpuinfo.c. Cpu types and subtypes should be subtracted by
the corresponding start value. */
enum processor_model
{
@@ -31824,6 +31805,52 @@ ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
return false;
}
+/* Return the cost of moving between two registers of mode MODE. */
+
+static int
+ix86_set_reg_reg_cost (enum machine_mode mode)
+{
+ unsigned int units = UNITS_PER_WORD;
+
+ switch (GET_MODE_CLASS (mode))
+ {
+ default:
+ break;
+
+ case MODE_CC:
+ units = GET_MODE_SIZE (CCmode);
+ break;
+
+ case MODE_FLOAT:
+ if ((TARGET_SSE2 && mode == TFmode)
+ || (TARGET_80387 && mode == XFmode)
+ || ((TARGET_80387 || TARGET_SSE2) && mode == DFmode)
+ || ((TARGET_80387 || TARGET_SSE) && mode == SFmode))
+ units = GET_MODE_SIZE (mode);
+ break;
+
+ case MODE_COMPLEX_FLOAT:
+ if ((TARGET_SSE2 && mode == TCmode)
+ || (TARGET_80387 && mode == XCmode)
+ || ((TARGET_80387 || TARGET_SSE2) && mode == DCmode)
+ || ((TARGET_80387 || TARGET_SSE) && mode == SCmode))
+ units = GET_MODE_SIZE (mode);
+ break;
+
+ case MODE_VECTOR_INT:
+ case MODE_VECTOR_FLOAT:
+ if ((TARGET_AVX && VALID_AVX256_REG_MODE (mode))
+ || (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
+ || (TARGET_SSE && VALID_SSE_REG_MODE (mode))
+ || (TARGET_MMX && VALID_MMX_REG_MODE (mode)))
+ units = GET_MODE_SIZE (mode);
+ }
+
+ /* Return the cost of moving between two registers of mode MODE,
+ assuming that the move will be in pieces of at most UNITS bytes. */
+ return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
+}
+
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
scanned. In either case, *TOTAL contains the cost result. */
@@ -31838,6 +31865,15 @@ ix86_rtx_costs (rtx x, int code, int outer_code_i, int opno, int *total,
switch (code)
{
+ case SET:
+ if (register_operand (SET_DEST (x), VOIDmode)
+ && reg_or_0_operand (SET_SRC (x), VOIDmode))
+ {
+ *total = ix86_set_reg_reg_cost (GET_MODE (SET_DEST (x)));
+ return true;
+ }
+ return false;
+
case CONST_INT:
case CONST:
case LABEL_REF:
@@ -32542,7 +32578,7 @@ x86_this_parameter (tree function)
regno = CX_REG;
if (aggr)
return gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, 4));
+ plus_constant (Pmode, stack_pointer_rtx, 4));
}
else
{
@@ -32552,13 +32588,15 @@ x86_this_parameter (tree function)
regno = DX_REG;
if (nregs == 1)
return gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, 4));
+ plus_constant (Pmode,
+ stack_pointer_rtx, 4));
}
}
return gen_rtx_REG (SImode, regno);
}
- return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
+ return gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx,
+ aggr ? 8 : 4));
}
/* Determine whether x86_output_mi_thunk can succeed. */
@@ -32660,7 +32698,7 @@ x86_output_mi_thunk (FILE *file,
emit_move_insn (tmp, this_mem);
/* Adjust the this parameter. */
- vcall_addr = plus_constant (tmp, vcall_offset);
+ vcall_addr = plus_constant (Pmode, tmp, vcall_offset);
if (TARGET_64BIT
&& !ix86_legitimate_address_p (ptr_mode, vcall_addr, true))
{
@@ -39203,7 +39241,7 @@ do_dispatch (rtx insn, int mode)
static bool
has_dispatch (rtx insn, int action)
{
- if ((ix86_tune == PROCESSOR_BDVER1 || ix86_tune == PROCESSOR_BDVER2)
+ if ((TARGET_BDVER1 || TARGET_BDVER2)
&& flag_dispatch_scheduler)
switch (action)
{
@@ -39302,6 +39340,38 @@ ix86_autovectorize_vector_sizes (void)
return (TARGET_AVX && !TARGET_PREFER_AVX128) ? 32 | 16 : 0;
}
+/* Validate target specific memory model bits in VAL. */
+
+static unsigned HOST_WIDE_INT
+ix86_memmodel_check (unsigned HOST_WIDE_INT val)
+{
+ unsigned HOST_WIDE_INT model = val & MEMMODEL_MASK;
+ unsigned HOST_WIDE_INT strong;
+
+ if (val & ~(unsigned HOST_WIDE_INT)(IX86_HLE_ACQUIRE|IX86_HLE_RELEASE
+ |MEMMODEL_MASK)
+ || ((val & IX86_HLE_ACQUIRE) && (val & IX86_HLE_RELEASE)))
+ {
+ warning (OPT_Winvalid_memory_model,
+ "Unknown architecture specific memory model");
+ return MEMMODEL_SEQ_CST;
+ }
+ strong = (model == MEMMODEL_ACQ_REL || model == MEMMODEL_SEQ_CST);
+ if (val & IX86_HLE_ACQUIRE && !(model == MEMMODEL_ACQUIRE || strong))
+ {
+ warning (OPT_Winvalid_memory_model,
+ "HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
+ return MEMMODEL_SEQ_CST | IX86_HLE_ACQUIRE;
+ }
+ if (val & IX86_HLE_RELEASE && !(model == MEMMODEL_RELEASE || strong))
+ {
+ warning (OPT_Winvalid_memory_model,
+ "HLE_RELEASE not used with RELEASE or stronger memory model");
+ return MEMMODEL_SEQ_CST | IX86_HLE_RELEASE;
+ }
+ return val;
+}
+
/* Initialize the GCC target structure. */
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
@@ -39401,6 +39471,9 @@ ix86_autovectorize_vector_sizes (void)
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
#define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
+#undef TARGET_MEMMODEL_CHECK
+#define TARGET_MEMMODEL_CHECK ix86_memmodel_check
+
#ifdef HAVE_AS_TLS
#undef TARGET_HAVE_TLS
#define TARGET_HAVE_TLS true
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 8942ea86edf..ddb36459e93 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -75,6 +75,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define TARGET_RDRND OPTION_ISA_RDRND
#define TARGET_F16C OPTION_ISA_F16C
#define TARGET_RTM OPTION_ISA_RTM
+#define TARGET_HLE OPTION_ISA_HLE
#define TARGET_LP64 OPTION_ABI_64
#define TARGET_X32 OPTION_ABI_X32
@@ -429,7 +430,7 @@ extern unsigned char ix86_tune_features[X86_TUNE_LAST];
/* Feature tests against the various architecture variations. */
enum ix86_arch_indices {
- X86_ARCH_CMOVE, /* || TARGET_SSE */
+ X86_ARCH_CMOV,
X86_ARCH_CMPXCHG,
X86_ARCH_CMPXCHG8B,
X86_ARCH_XADD,
@@ -440,12 +441,17 @@ enum ix86_arch_indices {
extern unsigned char ix86_arch_features[X86_ARCH_LAST];
-#define TARGET_CMOVE ix86_arch_features[X86_ARCH_CMOVE]
+#define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV]
#define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG]
#define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B]
#define TARGET_XADD ix86_arch_features[X86_ARCH_XADD]
#define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP]
+/* For sane SSE instruction set generation we need fcomi instruction.
+ It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic
+ expands to a sequence that includes conditional move. */
+#define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND)
+
#define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387)
extern int x86_prefetch_sse;
@@ -1906,8 +1912,9 @@ extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER];
/* After the prologue, RA is at -4(AP) in the current frame. */
#define RETURN_ADDR_RTX(COUNT, FRAME) \
((COUNT) == 0 \
- ? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, -UNITS_PER_WORD)) \
- : gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD)))
+ ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \
+ -UNITS_PER_WORD)) \
+ : gen_rtx_MEM (Pmode, plus_constant (Pmode, FRAME, UNITS_PER_WORD)))
/* PC is dbx register 8; let's use that column for RA. */
#define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8)
@@ -2344,6 +2351,9 @@ extern void debug_dispatch_window (int);
#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0)
#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0)
+#define IX86_HLE_ACQUIRE (1 << 16)
+#define IX86_HLE_RELEASE (1 << 17)
+
/*
Local variables:
version-control: t
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index ea77c203b49..cce78b5c535 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -58,6 +58,7 @@
;; X -- don't print any sort of PIC '@' suffix for a symbol.
;; & -- print some in-use local-dynamic symbol name.
;; H -- print a memory address offset by 8; used for sse high-parts
+;; K -- print HLE lock prefix
;; Y -- print condition for XOP pcom* instruction.
;; + -- print a branch hint as 'cs' or 'ds' prefix
;; ; -- print a semicolon (after prefixes due to bug in older gas).
@@ -1809,8 +1810,8 @@
(set_attr "length_immediate" "1")])
(define_insn "*movoi_internal_avx"
- [(set (match_operand:OI 0 "nonimmediate_operand" "=x,x,m")
- (match_operand:OI 1 "vector_move_operand" "C,xm,x"))]
+ [(set (match_operand:OI 0 "nonimmediate_operand" "=x,x ,m")
+ (match_operand:OI 1 "vector_move_operand" "C ,xm,x"))]
"TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (which_alternative)
@@ -1821,20 +1822,37 @@
case 2:
if (misaligned_operand (operands[0], OImode)
|| misaligned_operand (operands[1], OImode))
- return "vmovdqu\t{%1, %0|%0, %1}";
+ {
+ if (get_attr_mode (insn) == MODE_V8SF)
+ return "vmovups\t{%1, %0|%0, %1}";
+ else
+ return "vmovdqu\t{%1, %0|%0, %1}";
+ }
else
- return "vmovdqa\t{%1, %0|%0, %1}";
+ {
+ if (get_attr_mode (insn) == MODE_V8SF)
+ return "vmovaps\t{%1, %0|%0, %1}";
+ else
+ return "vmovdqa\t{%1, %0|%0, %1}";
+ }
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "prefix" "vex")
- (set_attr "mode" "OI")])
+ (set (attr "mode")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V8SF")
+ (and (eq_attr "alternative" "2")
+ (match_test "TARGET_SSE_TYPELESS_STORES"))
+ (const_string "V8SF")
+ ]
+ (const_string "OI")))])
(define_insn "*movti_internal_rex64"
- [(set (match_operand:TI 0 "nonimmediate_operand" "=!r,o,x,x,xm")
- (match_operand:TI 1 "general_operand" "riFo,riF,C,xm,x"))]
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=!r ,o ,x,x ,m")
+ (match_operand:TI 1 "general_operand" "riFo,riF,C,xm,x"))]
"TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (which_alternative)
@@ -1870,18 +1888,19 @@
[(set_attr "type" "*,*,sselog1,ssemov,ssemov")
(set_attr "prefix" "*,*,maybe_vex,maybe_vex,maybe_vex")
(set (attr "mode")
- (cond [(eq_attr "alternative" "2,3")
- (if_then_else
- (match_test "optimize_function_for_size_p (cfun)")
- (const_string "V4SF")
- (const_string "TI"))
- (eq_attr "alternative" "4")
- (if_then_else
- (ior (match_test "TARGET_SSE_TYPELESS_STORES")
- (match_test "optimize_function_for_size_p (cfun)"))
- (const_string "V4SF")
- (const_string "TI"))]
- (const_string "DI")))])
+ (cond [(eq_attr "alternative" "0,1")
+ (const_string "DI")
+ (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (and (eq_attr "alternative" "4")
+ (match_test "TARGET_SSE_TYPELESS_STORES"))
+ (const_string "V4SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
+ ]
+ (const_string "TI")))])
(define_split
[(set (match_operand:TI 0 "nonimmediate_operand")
@@ -1892,8 +1911,8 @@
"ix86_split_long_move (operands); DONE;")
(define_insn "*movti_internal_sse"
- [(set (match_operand:TI 0 "nonimmediate_operand" "=x,x,m")
- (match_operand:TI 1 "vector_move_operand" "C,xm,x"))]
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=x,x ,m")
+ (match_operand:TI 1 "vector_move_operand" "C ,xm,x"))]
"TARGET_SSE && !TARGET_64BIT
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
@@ -1927,12 +1946,17 @@
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
- (cond [(ior (not (match_test "TARGET_SSE2"))
- (match_test "optimize_function_for_size_p (cfun)"))
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(and (eq_attr "alternative" "2")
(match_test "TARGET_SSE_TYPELESS_STORES"))
- (const_string "V4SF")]
+ (const_string "V4SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ ]
(const_string "TI")))])
(define_insn "*movdi_internal_rex64"
@@ -1951,8 +1975,11 @@
return "movdq2q\t{%1, %0|%0, %1}";
case TYPE_SSEMOV:
- if (get_attr_mode (insn) == MODE_TI)
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "%vmovaps\t{%1, %0|%0, %1}";
+ else if (get_attr_mode (insn) == MODE_TI)
return "%vmovdqa\t{%1, %0|%0, %1}";
+
/* Handle broken assemblers that require movd instead of movq. */
if (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1]))
return "%vmovd\t{%1, %0|%0, %1}";
@@ -2029,7 +2056,20 @@
(if_then_else (eq_attr "alternative" "10,11,12,13,14,15")
(const_string "maybe_vex")
(const_string "orig")))
- (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,DI,DI,TI,DI,TI,DI,DI,DI,DI,DI")])
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "0,4")
+ (const_string "SI")
+ (eq_attr "alternative" "10,12")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
+ ]
+ (const_string "DI")))])
;; Reload patterns to support multi-word load/store
;; with non-offsetable address.
@@ -2123,7 +2163,7 @@
case MODE_DI:
return "%vmovq\t{%1, %0|%0, %1}";
case MODE_V4SF:
- return "movaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
case MODE_V2SF:
return "movlps\t{%1, %0|%0, %1}";
default:
@@ -2170,7 +2210,22 @@
(if_then_else (eq_attr "alternative" "5,6,7,8")
(const_string "maybe_vex")
(const_string "orig")))
- (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,TI,DI,V4SF,V2SF,V4SF,V2SF,DI,DI")])
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "9,11")
+ (const_string "V4SF")
+ (eq_attr "alternative" "10,12")
+ (const_string "V2SF")
+ (eq_attr "alternative" "5,7")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
+ ]
+ (const_string "DI")))])
(define_split
[(set (match_operand:DI 0 "nonimmediate_operand")
@@ -2252,10 +2307,15 @@
(cond [(eq_attr "alternative" "2,3")
(const_string "DI")
(eq_attr "alternative" "6,7")
- (if_then_else
- (not (match_test "TARGET_SSE2"))
- (const_string "V4SF")
- (const_string "TI"))
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
(and (eq_attr "alternative" "8,9,10,11")
(not (match_test "TARGET_SSE2")))
(const_string "SF")
@@ -2405,7 +2465,7 @@
(set_attr "memory" "load")
(set_attr "mode" "<MODE>")])
-(define_insn "*swap<mode>"
+(define_insn "swap<mode>"
[(set (match_operand:SWI48 0 "register_operand" "+r")
(match_operand:SWI48 1 "register_operand" "+r"))
(set (match_dup 1)
@@ -2812,8 +2872,8 @@
"ix86_expand_move (<MODE>mode, operands); DONE;")
(define_insn "*movtf_internal"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=x,m,x,?*r ,!o")
- (match_operand:TF 1 "general_operand" "xm,x,C,*roF,F*r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=x,x ,m,?*r ,!o")
+ (match_operand:TF 1 "general_operand" "C ,xm,x,*roF,F*r"))]
"TARGET_SSE2
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
@@ -2828,7 +2888,9 @@
switch (which_alternative)
{
case 0:
+ return standard_sse_constant_opcode (insn, operands[1]);
case 1:
+ case 2:
/* Handle misaligned load/store since we
don't have movmisaligntf pattern. */
if (misaligned_operand (operands[0], TFmode)
@@ -2847,9 +2909,6 @@
return "%vmovdqa\t{%1, %0|%0, %1}";
}
- case 2:
- return standard_sse_constant_opcode (insn, operands[1]);
-
case 3:
case 4:
return "#";
@@ -2858,21 +2917,22 @@
gcc_unreachable ();
}
}
- [(set_attr "type" "ssemov,ssemov,sselog1,*,*")
+ [(set_attr "type" "sselog1,ssemov,ssemov,*,*")
(set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,*,*")
(set (attr "mode")
- (cond [(eq_attr "alternative" "0,2")
- (if_then_else
- (match_test "optimize_function_for_size_p (cfun)")
- (const_string "V4SF")
- (const_string "TI"))
- (eq_attr "alternative" "1")
- (if_then_else
- (ior (match_test "TARGET_SSE_TYPELESS_STORES")
- (match_test "optimize_function_for_size_p (cfun)"))
- (const_string "V4SF")
- (const_string "TI"))]
- (const_string "DI")))])
+ (cond [(eq_attr "alternative" "3,4")
+ (const_string "DI")
+ (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (and (eq_attr "alternative" "2")
+ (match_test "TARGET_SSE_TYPELESS_STORES"))
+ (const_string "V4SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
+ ]
+ (const_string "TI")))])
;; Possible store forwarding (partial memory) stall in alternative 4.
(define_insn "*movxf_internal"
@@ -2952,8 +3012,7 @@
switch (get_attr_mode (insn))
{
case MODE_V2DF:
- if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovapd\t{%1, %0|%0, %1}";
+ return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_V4SF:
return "%vmovaps\t{%1, %0|%0, %1}";
@@ -3015,9 +3074,11 @@
(eq_attr "alternative" "3,4,5,6,11,12")
(const_string "DI")
- /* xorps is one byte shorter. */
+ /* xorps is one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "7")
- (cond [(match_test "optimize_function_for_size_p (cfun)")
+ (cond [(match_test "TARGET_AVX")
+ (const_string "V2DF")
+ (match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
(match_test "TARGET_SSE_LOAD0_BY_PXOR")
(const_string "TI")
@@ -3028,13 +3089,16 @@
whole SSE registers use APD move to break dependency
chains, otherwise use short move to avoid extra work.
- movaps encodes one byte shorter. */
+ movaps encodes one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "8")
- (cond
- [(match_test "optimize_function_for_size_p (cfun)")
- (const_string "V4SF")
- (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
- (const_string "V2DF")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (const_string "V2DF")
+ (match_test "TARGET_AVX")
+ (const_string "DF")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
]
(const_string "DF"))
/* For architectures resolving dependencies on register
@@ -3093,8 +3157,7 @@
switch (get_attr_mode (insn))
{
case MODE_V2DF:
- if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovapd\t{%1, %0|%0, %1}";
+ return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_V4SF:
return "%vmovaps\t{%1, %0|%0, %1}";
@@ -3150,9 +3213,11 @@
(const_string "V4SF")
(const_string "V2SF"))
- /* xorps is one byte shorter. */
+ /* xorps is one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "5,9")
- (cond [(match_test "optimize_function_for_size_p (cfun)")
+ (cond [(match_test "TARGET_AVX")
+ (const_string "V2DF")
+ (match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
(match_test "TARGET_SSE_LOAD0_BY_PXOR")
(const_string "TI")
@@ -3163,15 +3228,19 @@
whole SSE registers use APD move to break dependency
chains, otherwise use short move to avoid extra work.
- movaps encodes one byte shorter. */
+ movaps encodes one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "6,10")
- (cond
- [(match_test "optimize_function_for_size_p (cfun)")
- (const_string "V4SF")
- (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
- (const_string "V2DF")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "V4SF")
+ (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (const_string "V2DF")
+ (match_test "TARGET_AVX")
+ (const_string "DF")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
]
(const_string "DF"))
+
/* For architectures resolving dependencies on register
parts we may avoid extra work to zero out upper part
of register. */
@@ -3261,12 +3330,16 @@
(cond [(eq_attr "alternative" "3,4,9,10")
(const_string "SI")
(eq_attr "alternative" "5")
- (if_then_else
- (and (and (match_test "TARGET_SSE_LOAD0_BY_PXOR")
- (match_test "TARGET_SSE2"))
- (not (match_test "optimize_function_for_size_p (cfun)")))
- (const_string "TI")
- (const_string "V4SF"))
+ (cond [(match_test "TARGET_AVX")
+ (const_string "V4SF")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ (match_test "TARGET_SSE_LOAD0_BY_PXOR")
+ (const_string "TI")
+ ]
+ (const_string "V4SF"))
+
/* For architectures resolving dependencies on
whole SSE registers use APS move to break dependency
chains, otherwise use short move to avoid extra work.
@@ -6348,7 +6421,7 @@
operands[2] = GEN_INT (1 << INTVAL (operands[2]));
- pat = plus_constant (gen_rtx_MULT (mode, operands[1], operands[2]),
+ pat = plus_constant (mode, gen_rtx_MULT (mode, operands[1], operands[2]),
INTVAL (operands[3]));
emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat));
@@ -11963,7 +12036,7 @@
be returning into someone else's stack frame, one word below the
stack address we wish to restore. */
tmp = gen_rtx_PLUS (Pmode, arg_pointer_rtx, sa);
- tmp = plus_constant (tmp, -UNITS_PER_WORD);
+ tmp = plus_constant (Pmode, tmp, -UNITS_PER_WORD);
tmp = gen_rtx_MEM (Pmode, tmp);
emit_move_insn (tmp, ra);
@@ -12111,9 +12184,26 @@
(set (match_operand:SWI48 0 "register_operand" "=r")
(ctz:SWI48 (match_dup 1)))]
""
- "bsf{<imodesuffix>}\t{%1, %0|%0, %1}"
+{
+ if (TARGET_BMI)
+ return "tzcnt{<imodesuffix>}\t{%1, %0|%0, %1}";
+ else if (optimize_function_for_size_p (cfun))
+ ;
+ else if (TARGET_GENERIC)
+ /* tzcnt expands to rep;bsf and we can use it even if !TARGET_BMI. */
+ return "rep; bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
+
+ return "bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
+}
[(set_attr "type" "alu1")
(set_attr "prefix_0f" "1")
+ (set (attr "prefix_rep")
+ (if_then_else
+ (ior (match_test "TARGET_BMI")
+ (and (not (match_test "optimize_function_for_size_p (cfun)"))
+ (match_test "TARGET_GENERIC")))
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "<MODE>")])
(define_insn "ctz<mode>2"
@@ -12124,12 +12214,23 @@
{
if (TARGET_BMI)
return "tzcnt{<imodesuffix>}\t{%1, %0|%0, %1}";
- else
- return "bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
+ else if (optimize_function_for_size_p (cfun))
+ ;
+ else if (TARGET_GENERIC)
+ /* tzcnt expands to rep;bsf and we can use it even if !TARGET_BMI. */
+ return "rep; bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
+
+ return "bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
}
[(set_attr "type" "alu1")
(set_attr "prefix_0f" "1")
- (set (attr "prefix_rep") (symbol_ref "TARGET_BMI"))
+ (set (attr "prefix_rep")
+ (if_then_else
+ (ior (match_test "TARGET_BMI")
+ (and (not (match_test "optimize_function_for_size_p (cfun)"))
+ (match_test "TARGET_GENERIC")))
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "<MODE>")])
(define_expand "clz<mode>2"
@@ -12486,12 +12587,70 @@
(set_attr "type" "bitmanip")
(set_attr "mode" "SI")])
-(define_expand "bswap<mode>2"
- [(set (match_operand:SWI48 0 "register_operand")
- (bswap:SWI48 (match_operand:SWI48 1 "register_operand")))]
+(define_expand "bswapdi2"
+ [(set (match_operand:DI 0 "register_operand")
+ (bswap:DI (match_operand:DI 1 "nonimmediate_operand")))]
""
{
- if (<MODE>mode == SImode && !(TARGET_BSWAP || TARGET_MOVBE))
+ if (TARGET_64BIT && !TARGET_MOVBE)
+ operands[1] = force_reg (DImode, operands[1]);
+})
+
+(define_insn_and_split "*bswapdi2_doubleword"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m")
+ (bswap:DI
+ (match_operand:DI 1 "nonimmediate_operand" "0,m,r")))]
+ "!TARGET_64BIT
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (bswap:SI (match_dup 1)))
+ (set (match_dup 0)
+ (bswap:SI (match_dup 3)))]
+{
+ split_double_mode (DImode, &operands[0], 2, &operands[0], &operands[2]);
+
+ if (REG_P (operands[0]) && REG_P (operands[1]))
+ {
+ emit_insn (gen_swapsi (operands[0], operands[2]));
+ emit_insn (gen_bswapsi2 (operands[0], operands[0]));
+ emit_insn (gen_bswapsi2 (operands[2], operands[2]));
+ DONE;
+ }
+
+ if (!TARGET_MOVBE)
+ {
+ if (MEM_P (operands[0]))
+ {
+ emit_insn (gen_bswapsi2 (operands[3], operands[3]));
+ emit_insn (gen_bswapsi2 (operands[1], operands[1]));
+
+ emit_move_insn (operands[0], operands[3]);
+ emit_move_insn (operands[2], operands[1]);
+ }
+ if (MEM_P (operands[1]))
+ {
+ emit_move_insn (operands[2], operands[1]);
+ emit_move_insn (operands[0], operands[3]);
+
+ emit_insn (gen_bswapsi2 (operands[2], operands[2]));
+ emit_insn (gen_bswapsi2 (operands[0], operands[0]));
+ }
+ DONE;
+ }
+})
+
+(define_expand "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand")
+ (bswap:SI (match_operand:SI 1 "nonimmediate_operand")))]
+ ""
+{
+ if (TARGET_MOVBE)
+ ;
+ else if (TARGET_BSWAP)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
{
rtx x = operands[0];
@@ -12518,7 +12677,7 @@
(set_attr "prefix_extra" "*,1,1")
(set_attr "mode" "<MODE>")])
-(define_insn "*bswap<mode>2_1"
+(define_insn "*bswap<mode>2"
[(set (match_operand:SWI48 0 "register_operand" "=r")
(bswap:SWI48 (match_operand:SWI48 1 "register_operand" "0")))]
"TARGET_BSWAP"
@@ -18320,7 +18479,7 @@
{
emit_insn (gen_xtest_1 ());
- ix86_expand_setcc (operands[0], EQ,
+ ix86_expand_setcc (operands[0], NE,
gen_rtx_REG (CCZmode, FLAGS_REG), const0_rtx);
DONE;
})
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index bf50aed47a8..db34e1fc073 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -528,6 +528,10 @@ mlzcnt
Target Report Mask(ISA_LZCNT) Var(ix86_isa_flags) Save
Support LZCNT built-in function and code generation
+mhle
+Target Report Mask(ISA_HLE) Var(ix86_isa_flags) Save
+Support Hardware Lock Elision prefixes
+
mtbm
Target Report Mask(ISA_TBM) Var(ix86_isa_flags) Save
Support TBM built-in functions and code generation
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index d270c634ae0..d4b3daafccc 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -337,6 +337,16 @@
(V8SF "V4SF") (V4DF "V2DF")
(V4SF "V2SF")])
+;; Mapping of vector modes ti packed single mode of the same size
+(define_mode_attr ssePSmode
+ [(V32QI "V8SF") (V16QI "V4SF")
+ (V16HI "V8SF") (V8HI "V4SF")
+ (V8SI "V8SF") (V4SI "V4SF")
+ (V4DI "V8SF") (V2DI "V4SF")
+ (V2TI "V8SF") (V1TI "V4SF")
+ (V8SF "V8SF") (V4SF "V4SF")
+ (V4DF "V8SF") (V2DF "V4SF")])
+
;; Mapping of vector modes back to the scalar modes
(define_mode_attr ssescalarmode
[(V32QI "QI") (V16HI "HI") (V8SI "SI") (V4DI "DI")
@@ -420,7 +430,7 @@
})
(define_insn "*mov<mode>_internal"
- [(set (match_operand:V16 0 "nonimmediate_operand" "=x,x ,m")
+ [(set (match_operand:V16 0 "nonimmediate_operand" "=x,x ,m")
(match_operand:V16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
"TARGET_SSE
&& (register_operand (operands[0], <MODE>mode)
@@ -449,8 +459,6 @@
&& (misaligned_operand (operands[0], <MODE>mode)
|| misaligned_operand (operands[1], <MODE>mode)))
return "vmovupd\t{%1, %0|%0, %1}";
- else if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovaps\t{%1, %0|%0, %1}";
else
return "%vmovapd\t{%1, %0|%0, %1}";
@@ -460,8 +468,6 @@
&& (misaligned_operand (operands[0], <MODE>mode)
|| misaligned_operand (operands[1], <MODE>mode)))
return "vmovdqu\t{%1, %0|%0, %1}";
- else if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "%vmovaps\t{%1, %0|%0, %1}";
else
return "%vmovdqa\t{%1, %0|%0, %1}";
@@ -475,19 +481,21 @@
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
- (cond [(match_test "TARGET_AVX")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "<ssePSmode>")
+ (and (eq_attr "alternative" "2")
+ (match_test "TARGET_SSE_TYPELESS_STORES"))
+ (const_string "<ssePSmode>")
+ (match_test "TARGET_AVX")
(const_string "<sseinsnmode>")
- (ior (ior (match_test "optimize_function_for_size_p (cfun)")
- (not (match_test "TARGET_SSE2")))
- (and (eq_attr "alternative" "2")
- (match_test "TARGET_SSE_TYPELESS_STORES")))
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
- (eq (const_string "<MODE>mode") (const_string "V4SFmode"))
- (const_string "V4SF")
- (eq (const_string "<MODE>mode") (const_string "V2DFmode"))
- (const_string "V2DF")
+ (and (eq_attr "alternative" "0")
+ (match_test "TARGET_SSE_LOAD0_BY_PXOR"))
+ (const_string "TI")
]
- (const_string "TI")))])
+ (const_string "<sseinsnmode>")))])
(define_insn "sse2_movq128"
[(set (match_operand:V2DI 0 "register_operand" "=x")
@@ -597,11 +605,31 @@
[(match_operand:VF 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "%vmovu<ssemodesuffix>\t{%1, %0|%0, %1}"
+{
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V8SF:
+ case MODE_V4SF:
+ return "%vmovups\t{%1, %0|%0, %1}";
+ default:
+ return "%vmovu<ssemodesuffix>\t{%1, %0|%0, %1}";
+ }
+}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "<MODE>")])
+ (set (attr "mode")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "<ssePSmode>")
+ (and (eq_attr "alternative" "1")
+ (match_test "TARGET_SSE_TYPELESS_STORES"))
+ (const_string "<ssePSmode>")
+ (match_test "TARGET_AVX")
+ (const_string "<MODE>")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
+ ]
+ (const_string "<MODE>")))])
(define_expand "<sse2>_movdqu<avxsizesuffix>"
[(set (match_operand:VI1 0 "nonimmediate_operand")
@@ -618,7 +646,16 @@
(unspec:VI1 [(match_operand:VI1 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "%vmovdqu\t{%1, %0|%0, %1}"
+{
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V8SF:
+ case MODE_V4SF:
+ return "%vmovups\t{%1, %0|%0, %1}";
+ default:
+ return "%vmovdqu\t{%1, %0|%0, %1}";
+ }
+}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
(set (attr "prefix_data16")
@@ -627,7 +664,18 @@
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "<sseinsnmode>")])
+ (set (attr "mode")
+ (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (const_string "<ssePSmode>")
+ (and (eq_attr "alternative" "1")
+ (match_test "TARGET_SSE_TYPELESS_STORES"))
+ (const_string "<ssePSmode>")
+ (match_test "TARGET_AVX")
+ (const_string "<sseinsnmode>")
+ (match_test "optimize_function_for_size_p (cfun)")
+ (const_string "V4SF")
+ ]
+ (const_string "<sseinsnmode>")))])
(define_insn "<sse3>_lddqu<avxsizesuffix>"
[(set (match_operand:VI1 0 "register_operand" "=x")
@@ -5706,11 +5754,15 @@
if (TARGET_XOP)
{
+ rtx t3 = gen_reg_rtx (V2DImode);
+
emit_insn (gen_sse2_pshufd_1 (t1, op1, GEN_INT (0), GEN_INT (2),
GEN_INT (1), GEN_INT (3)));
emit_insn (gen_sse2_pshufd_1 (t2, op2, GEN_INT (0), GEN_INT (2),
GEN_INT (1), GEN_INT (3)));
- emit_insn (gen_xop_mulv2div2di3_high (operands[0], t1, t2));
+ emit_move_insn (t3, CONST0_RTX (V2DImode));
+
+ emit_insn (gen_xop_pmacsdqh (operands[0], t1, t2, t3));
DONE;
}
@@ -5735,11 +5787,15 @@
if (TARGET_XOP)
{
+ rtx t3 = gen_reg_rtx (V2DImode);
+
emit_insn (gen_sse2_pshufd_1 (t1, op1, GEN_INT (0), GEN_INT (2),
GEN_INT (1), GEN_INT (3)));
emit_insn (gen_sse2_pshufd_1 (t2, op2, GEN_INT (0), GEN_INT (2),
GEN_INT (1), GEN_INT (3)));
- emit_insn (gen_xop_mulv2div2di3_low (operands[0], t1, t2));
+ emit_move_insn (t3, CONST0_RTX (V2DImode));
+
+ emit_insn (gen_xop_pmacsdql (operands[0], t1, t2, t3));
DONE;
}
@@ -9750,11 +9806,11 @@
(sign_extend:V2DI
(vec_select:V2SI
(match_operand:V4SI 1 "nonimmediate_operand" "%x")
- (parallel [(const_int 1) (const_int 3)])))
+ (parallel [(const_int 0) (const_int 2)])))
(sign_extend:V2DI
(vec_select:V2SI
(match_operand:V4SI 2 "nonimmediate_operand" "xm")
- (parallel [(const_int 1) (const_int 3)]))))
+ (parallel [(const_int 0) (const_int 2)]))))
(match_operand:V2DI 3 "nonimmediate_operand" "x")))]
"TARGET_XOP"
"vp<macs>dql\t{%3, %2, %1, %0|%0, %1, %2, %3}"
@@ -9768,93 +9824,17 @@
(sign_extend:V2DI
(vec_select:V2SI
(match_operand:V4SI 1 "nonimmediate_operand" "%x")
- (parallel [(const_int 0) (const_int 2)])))
+ (parallel [(const_int 1) (const_int 3)])))
(sign_extend:V2DI
(vec_select:V2SI
(match_operand:V4SI 2 "nonimmediate_operand" "xm")
- (parallel [(const_int 0) (const_int 2)]))))
+ (parallel [(const_int 1) (const_int 3)]))))
(match_operand:V2DI 3 "nonimmediate_operand" "x")))]
"TARGET_XOP"
"vp<macs>dqh\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-;; We don't have a straight 32-bit parallel multiply and extend on XOP, so
-;; fake it with a multiply/add. In general, we expect the define_split to
-;; occur before register allocation, so we have to handle the corner case where
-;; the target is the same as operands 1/2
-(define_insn_and_split "xop_mulv2div2di3_low"
- [(set (match_operand:V2DI 0 "register_operand" "=&x")
- (mult:V2DI
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_operand:V4SI 1 "register_operand" "%x")
- (parallel [(const_int 1) (const_int 3)])))
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "xm")
- (parallel [(const_int 1) (const_int 3)])))))]
- "TARGET_XOP"
- "#"
- "&& reload_completed"
- [(set (match_dup 0)
- (match_dup 3))
- (set (match_dup 0)
- (plus:V2DI
- (mult:V2DI
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_dup 1)
- (parallel [(const_int 1) (const_int 3)])))
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_dup 2)
- (parallel [(const_int 1) (const_int 3)]))))
- (match_dup 0)))]
-{
- operands[3] = CONST0_RTX (V2DImode);
-}
- [(set_attr "type" "ssemul")
- (set_attr "mode" "TI")])
-
-;; We don't have a straight 32-bit parallel multiply and extend on XOP, so
-;; fake it with a multiply/add. In general, we expect the define_split to
-;; occur before register allocation, so we have to handle the corner case where
-;; the target is the same as either operands[1] or operands[2]
-(define_insn_and_split "xop_mulv2div2di3_high"
- [(set (match_operand:V2DI 0 "register_operand" "=&x")
- (mult:V2DI
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_operand:V4SI 1 "register_operand" "%x")
- (parallel [(const_int 0) (const_int 2)])))
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "xm")
- (parallel [(const_int 0) (const_int 2)])))))]
- "TARGET_XOP"
- "#"
- "&& reload_completed"
- [(set (match_dup 0)
- (match_dup 3))
- (set (match_dup 0)
- (plus:V2DI
- (mult:V2DI
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_dup 1)
- (parallel [(const_int 0) (const_int 2)])))
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_dup 2)
- (parallel [(const_int 0) (const_int 2)]))))
- (match_dup 0)))]
-{
- operands[3] = CONST0_RTX (V2DImode);
-}
- [(set_attr "type" "ssemul")
- (set_attr "mode" "TI")])
-
;; XOP parallel integer multiply/add instructions for the intrinisics
(define_insn "xop_p<macs>wd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
diff --git a/gcc/config/i386/sync.md b/gcc/config/i386/sync.md
index faf65ba8a23..e02a949631e 100644
--- a/gcc/config/i386/sync.md
+++ b/gcc/config/i386/sync.md
@@ -315,8 +315,9 @@
(match_operand:SI 7 "const_int_operand")] ;; failure model
"TARGET_CMPXCHG"
{
- emit_insn (gen_atomic_compare_and_swap_single<mode>
- (operands[1], operands[2], operands[3], operands[4]));
+ emit_insn
+ (gen_atomic_compare_and_swap_single<mode>
+ (operands[1], operands[2], operands[3], operands[4], operands[6]));
ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
const0_rtx);
DONE;
@@ -344,8 +345,9 @@
{
if (<MODE>mode == DImode && TARGET_64BIT)
{
- emit_insn (gen_atomic_compare_and_swap_singledi
- (operands[1], operands[2], operands[3], operands[4]));
+ emit_insn
+ (gen_atomic_compare_and_swap_singledi
+ (operands[1], operands[2], operands[3], operands[4], operands[6]));
}
else
{
@@ -370,7 +372,7 @@
mem = replace_equiv_address (mem, force_reg (Pmode, XEXP (mem, 0)));
emit_insn (gen_atomic_compare_and_swap_double<mode>
- (lo_o, hi_o, mem, lo_e, hi_e, lo_n, hi_n));
+ (lo_o, hi_o, mem, lo_e, hi_e, lo_n, hi_n, operands[6]));
}
ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
const0_rtx);
@@ -382,14 +384,15 @@
(unspec_volatile:SWI
[(match_operand:SWI 1 "memory_operand" "+m")
(match_operand:SWI 2 "register_operand" "0")
- (match_operand:SWI 3 "register_operand" "<r>")]
+ (match_operand:SWI 3 "register_operand" "<r>")
+ (match_operand:SI 4 "const_int_operand")]
UNSPECV_CMPXCHG_1))
(set (match_dup 1)
(unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG_2))
(set (reg:CCZ FLAGS_REG)
(unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG_3))]
"TARGET_CMPXCHG"
- "lock{%;} cmpxchg{<imodesuffix>}\t{%3, %1|%1, %3}")
+ "lock{%;} %K4cmpxchg{<imodesuffix>}\t{%3, %1|%1, %3}")
;; For double-word compare and swap, we are obliged to play tricks with
;; the input newval (op5:op6) because the Intel register numbering does
@@ -403,7 +406,8 @@
(match_operand:<DCASHMODE> 3 "register_operand" "0")
(match_operand:<DCASHMODE> 4 "register_operand" "1")
(match_operand:<DCASHMODE> 5 "register_operand" "b")
- (match_operand:<DCASHMODE> 6 "register_operand" "c")]
+ (match_operand:<DCASHMODE> 6 "register_operand" "c")
+ (match_operand:SI 7 "const_int_operand")]
UNSPECV_CMPXCHG_1))
(set (match_operand:<DCASHMODE> 1 "register_operand" "=d")
(unspec_volatile:<DCASHMODE> [(const_int 0)] UNSPECV_CMPXCHG_2))
@@ -412,7 +416,7 @@
(set (reg:CCZ FLAGS_REG)
(unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG_4))]
""
- "lock{%;} cmpxchg<doublemodesuffix>b\t%2")
+ "lock{%;} %K7cmpxchg<doublemodesuffix>b\t%2")
;; Theoretically we'd like to use constraint "r" (any reg) for op5,
;; but that includes ecx. If op5 and op6 are the same (like when
@@ -430,7 +434,8 @@
(match_operand:SI 3 "register_operand" "0")
(match_operand:SI 4 "register_operand" "1")
(match_operand:SI 5 "register_operand" "SD")
- (match_operand:SI 6 "register_operand" "c")]
+ (match_operand:SI 6 "register_operand" "c")
+ (match_operand:SI 7 "const_int_operand")]
UNSPECV_CMPXCHG_1))
(set (match_operand:SI 1 "register_operand" "=d")
(unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_2))
@@ -439,7 +444,7 @@
(set (reg:CCZ FLAGS_REG)
(unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG_4))]
"!TARGET_64BIT && TARGET_CMPXCHG8B && flag_pic"
- "xchg{l}\t%%ebx, %5\;lock{%;} cmpxchg8b\t%2\;xchg{l}\t%%ebx, %5")
+ "xchg{l}\t%%ebx, %5\;lock{%;} %K7cmpxchg8b\t%2\;xchg{l}\t%%ebx, %5")
;; For operand 2 nonmemory_operand predicate is used instead of
;; register_operand to allow combiner to better optimize atomic
@@ -455,7 +460,7 @@
(match_operand:SWI 2 "nonmemory_operand" "0")))
(clobber (reg:CC FLAGS_REG))]
"TARGET_XADD"
- "lock{%;} xadd{<imodesuffix>}\t{%0, %1|%1, %0}")
+ "lock{%;} %K3xadd{<imodesuffix>}\t{%0, %1|%1, %0}")
;; This peephole2 and following insn optimize
;; __sync_fetch_and_add (x, -N) == N into just lock {add,sub,inc,dec}
@@ -526,7 +531,7 @@
(set (match_dup 1)
(match_operand:SWI 2 "register_operand" "0"))] ;; input
""
- "xchg{<imodesuffix>}\t{%1, %0|%0, %1}")
+ "%K3xchg{<imodesuffix>}\t{%1, %0|%0, %1}")
(define_insn "atomic_add<mode>"
[(set (match_operand:SWI 0 "memory_operand" "+m")
@@ -541,15 +546,15 @@
if (TARGET_USE_INCDEC)
{
if (operands[1] == const1_rtx)
- return "lock{%;} inc{<imodesuffix>}\t%0";
+ return "lock{%;} %K2inc{<imodesuffix>}\t%0";
if (operands[1] == constm1_rtx)
- return "lock{%;} dec{<imodesuffix>}\t%0";
+ return "lock{%;} %K2dec{<imodesuffix>}\t%0";
}
if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
- return "lock{%;} sub{<imodesuffix>}\t{%1, %0|%0, %1}";
+ return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
- return "lock{%;} add{<imodesuffix>}\t{%1, %0|%0, %1}";
+ return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
})
(define_insn "atomic_sub<mode>"
@@ -565,15 +570,15 @@
if (TARGET_USE_INCDEC)
{
if (operands[1] == const1_rtx)
- return "lock{%;} dec{<imodesuffix>}\t%0";
+ return "lock{%;} %K2dec{<imodesuffix>}\t%0";
if (operands[1] == constm1_rtx)
- return "lock{%;} inc{<imodesuffix>}\t%0";
+ return "lock{%;} %K2inc{<imodesuffix>}\t%0";
}
if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
- return "lock{%;} add{<imodesuffix>}\t{%1, %0|%0, %1}";
+ return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
- return "lock{%;} sub{<imodesuffix>}\t{%1, %0|%0, %1}";
+ return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
})
(define_insn "atomic_<logic><mode>"
@@ -585,4 +590,4 @@
UNSPECV_LOCK))
(clobber (reg:CC FLAGS_REG))]
""
- "lock{%;} <logic>{<imodesuffix>}\t{%1, %0|%0, %1}")
+ "lock{%;} %K2<logic>{<imodesuffix>}\t{%1, %0|%0, %1}")
diff --git a/gcc/config/ia64/hpux.h b/gcc/config/ia64/hpux.h
index edbf339fbf1..ad106b4dee2 100644
--- a/gcc/config/ia64/hpux.h
+++ b/gcc/config/ia64/hpux.h
@@ -228,3 +228,10 @@ do { \
#define TARGET_ASM_FUNCTION_SECTION ia64_hpux_function_section
#define TARGET_POSIX_IO
+
+/* Define this to be nonzero if static stack checking is supported. */
+#define STACK_CHECK_STATIC_BUILTIN 1
+
+/* Minimum amount of stack required to recover from an anticipated stack
+ overflow detection. */
+#define STACK_CHECK_PROTECT (24 * 1024)
diff --git a/gcc/config/ia64/ia64-protos.h b/gcc/config/ia64/ia64-protos.h
index f7bd4c60240..458b1201c94 100644
--- a/gcc/config/ia64/ia64-protos.h
+++ b/gcc/config/ia64/ia64-protos.h
@@ -61,6 +61,7 @@ extern int ia64_hard_regno_rename_ok (int, int);
extern enum reg_class ia64_secondary_reload_class (enum reg_class,
enum machine_mode, rtx);
extern const char *get_bundle_name (int);
+extern const char *output_probe_stack_range (rtx, rtx);
extern void ia64_expand_vec_perm_even_odd (rtx, rtx, rtx, int);
extern bool ia64_expand_vec_perm_const (rtx op[4]);
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index c6595064606..ccffa37fd87 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -47,6 +47,7 @@ along with GCC; see the file COPYING3. If not see
#include "timevar.h"
#include "target.h"
#include "target-def.h"
+#include "common/common-target.h"
#include "tm_p.h"
#include "hashtab.h"
#include "langhooks.h"
@@ -272,6 +273,7 @@ static int get_template (state_t, int);
static rtx get_next_important_insn (rtx, rtx);
static bool important_for_bundling_p (rtx);
+static bool unknown_for_bundling_p (rtx);
static void bundling (FILE *, int, rtx, rtx);
static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
@@ -1104,7 +1106,7 @@ ia64_expand_load_address (rtx dest, rtx src)
if (lo != 0)
{
addend = lo;
- src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
+ src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
}
}
@@ -1213,7 +1215,7 @@ ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
addend_hi = addend - addend_lo;
- op1 = plus_constant (op1, addend_hi);
+ op1 = plus_constant (Pmode, op1, addend_hi);
addend = addend_lo;
tmp = gen_reg_rtx (Pmode);
@@ -1290,7 +1292,7 @@ ia64_expand_move (rtx op0, rtx op1)
if (addend_lo != 0)
{
- op1 = plus_constant (sym, addend_hi);
+ op1 = plus_constant (mode, sym, addend_hi);
addend = addend_lo;
}
else
@@ -1441,7 +1443,8 @@ ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
(in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
out[1] = adjust_automodify_address
(in, DImode,
- gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
+ gen_rtx_POST_MODIFY (Pmode, base,
+ plus_constant (Pmode, base, -24)),
8);
break;
@@ -2671,6 +2674,10 @@ ia64_compute_frame_size (HOST_WIDE_INT size)
if (cfun->machine->ia64_eh_epilogue_bsp)
mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
+ /* Static stack checking uses r2 and r3. */
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ current_frame_info.gr_used_mask |= 0xc;
+
/* Find the size of the register stack frame. We have only 80 local
registers, because we reserve 8 for the inputs and 8 for the
outputs. */
@@ -3190,7 +3197,8 @@ do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
add_reg_note (insn, REG_CFA_OFFSET,
gen_rtx_SET (VOIDmode,
gen_rtx_MEM (GET_MODE (reg),
- plus_constant (base, off)),
+ plus_constant (Pmode,
+ base, off)),
frame_reg));
}
}
@@ -3228,6 +3236,213 @@ gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
return gen_fr_restore (dest, src);
}
+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+
+/* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
+#define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
+
+/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
+ inclusive. These are offsets from the current stack pointer. SOL is the
+ size of local registers. ??? This clobbers r2 and r3. */
+
+static void
+ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size, int sol)
+{
+ /* On the IA-64 there is a second stack in memory, namely the Backing Store
+ of the Register Stack Engine. We also need to probe it after checking
+ that the 2 stacks don't overlap. */
+ const int bs_size = BACKING_STORE_SIZE (sol);
+ rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
+ rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
+
+ /* Detect collision of the 2 stacks if necessary. */
+ if (bs_size > 0 || size > 0)
+ {
+ rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
+
+ emit_insn (gen_bsp_value (r3));
+ emit_move_insn (r2, GEN_INT (-(first + size)));
+
+ /* Compare current value of BSP and SP registers. */
+ emit_insn (gen_rtx_SET (VOIDmode, p6,
+ gen_rtx_fmt_ee (LTU, BImode,
+ r3, stack_pointer_rtx)));
+
+ /* Compute the address of the probe for the Backing Store (which grows
+ towards higher addresses). We probe only at the first offset of
+ the next page because some OS (eg Linux/ia64) only extend the
+ backing store when this specific address is hit (but generate a SEGV
+ on other address). Page size is the worst case (4KB). The reserve
+ size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
+ Also compute the address of the last probe for the memory stack
+ (which grows towards lower addresses). */
+ emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (r3, 4095)));
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+
+ /* Compare them and raise SEGV if the former has topped the latter. */
+ emit_insn (gen_rtx_COND_EXEC (VOIDmode,
+ gen_rtx_fmt_ee (NE, VOIDmode, p6,
+ const0_rtx),
+ gen_rtx_SET (VOIDmode, p6,
+ gen_rtx_fmt_ee (GEU, BImode,
+ r3, r2))));
+ emit_insn (gen_rtx_SET (VOIDmode,
+ gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
+ const0_rtx),
+ const0_rtx));
+ emit_insn (gen_rtx_COND_EXEC (VOIDmode,
+ gen_rtx_fmt_ee (NE, VOIDmode, p6,
+ const0_rtx),
+ gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
+ GEN_INT (11))));
+ }
+
+ /* Probe the Backing Store if necessary. */
+ if (bs_size > 0)
+ emit_stack_probe (r3);
+
+ /* Probe the memory stack if necessary. */
+ if (size == 0)
+ ;
+
+ /* See if we have a constant small number of probes to generate. If so,
+ that's the easy case. */
+ else if (size <= PROBE_INTERVAL)
+ emit_stack_probe (r2);
+
+ /* The run-time loop is made up of 8 insns in the generic case while this
+ compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
+ else if (size <= 4 * PROBE_INTERVAL)
+ {
+ HOST_WIDE_INT i;
+
+ emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+ emit_stack_probe (r2);
+
+ /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
+ it exceeds SIZE. If only two probes are needed, this will not
+ generate any code. Then probe at FIRST + SIZE. */
+ for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ plus_constant (r2, -PROBE_INTERVAL)));
+ emit_stack_probe (r2);
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ plus_constant (r2,
+ (i - PROBE_INTERVAL) - size)));
+ emit_stack_probe (r2);
+ }
+
+ /* Otherwise, do the same as above, but in a loop. Note that we must be
+ extra careful with variables wrapping around because we might be at
+ the very top (or the very bottom) of the address space and we have
+ to be able to handle this case properly; in particular, we use an
+ equality test for the loop condition. */
+ else
+ {
+ HOST_WIDE_INT rounded_size;
+
+ emit_move_insn (r2, GEN_INT (-first));
+
+
+ /* Step 1: round SIZE to the previous multiple of the interval. */
+
+ rounded_size = size & -PROBE_INTERVAL;
+
+
+ /* Step 2: compute initial and final value of the loop counter. */
+
+ /* TEST_ADDR = SP + FIRST. */
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+
+ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
+ if (rounded_size > (1 << 21))
+ {
+ emit_move_insn (r3, GEN_INT (-rounded_size));
+ emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
+ }
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, r3,
+ gen_rtx_PLUS (Pmode, r2,
+ GEN_INT (-rounded_size))));
+
+
+ /* Step 3: the loop
+
+ while (TEST_ADDR != LAST_ADDR)
+ {
+ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
+ probe at TEST_ADDR
+ }
+
+ probes at FIRST + N * PROBE_INTERVAL for values of N from 1
+ until it is equal to ROUNDED_SIZE. */
+
+ emit_insn (gen_probe_stack_range (r2, r2, r3));
+
+
+ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
+ that SIZE is equal to ROUNDED_SIZE. */
+
+ /* TEMP = SIZE - ROUNDED_SIZE. */
+ if (size != rounded_size)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ plus_constant (r2, rounded_size - size)));
+ emit_stack_probe (r2);
+ }
+ }
+
+ /* Make sure nothing is scheduled before we are done. */
+ emit_insn (gen_blockage ());
+}
+
+/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
+ absolute addresses. */
+
+const char *
+output_probe_stack_range (rtx reg1, rtx reg2)
+{
+ static int labelno = 0;
+ char loop_lab[32], end_lab[32];
+ rtx xops[3];
+
+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
+ ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
+
+ /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
+ xops[0] = reg1;
+ xops[1] = reg2;
+ xops[2] = gen_rtx_REG (BImode, PR_REG (6));
+ output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
+ fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
+ assemble_name_raw (asm_out_file, end_lab);
+ fputc ('\n', asm_out_file);
+
+ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
+ xops[1] = GEN_INT (-PROBE_INTERVAL);
+ output_asm_insn ("addl %0 = %1, %0", xops);
+ fputs ("\t;;\n", asm_out_file);
+
+ /* Probe at TEST_ADDR and branch. */
+ output_asm_insn ("probe.w.fault %0, 0", xops);
+ fprintf (asm_out_file, "\tbr ");
+ assemble_name_raw (asm_out_file, loop_lab);
+ fputc ('\n', asm_out_file);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
+
+ return "";
+}
+
/* Called after register allocation to add any instructions needed for the
prologue. Using a prologue insn is favored compared to putting all of the
instructions in output_function_prologue(), since it allows the scheduler
@@ -3263,6 +3478,12 @@ ia64_expand_prologue (void)
if (flag_stack_usage_info)
current_function_static_stack_size = current_frame_info.total_size;
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ current_frame_info.total_size,
+ current_frame_info.n_input_regs
+ + current_frame_info.n_local_regs);
+
if (dump_file)
{
fprintf (dump_file, "ia64 frame related registers "
@@ -4253,7 +4474,7 @@ ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
- emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
+ emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
@@ -6527,6 +6748,7 @@ rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
return 1;
case UNSPECV_SET_BSP:
+ case UNSPECV_PROBE_STACK_RANGE:
need_barrier = 1;
break;
@@ -6537,6 +6759,10 @@ rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
case UNSPECV_PSAC_NORMAL:
return 0;
+ case UNSPECV_PROBE_STACK_ADDRESS:
+ need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
+ break;
+
default:
gcc_unreachable ();
}
@@ -6698,10 +6924,7 @@ group_barrier_needed (rtx insn)
gcc_unreachable ();
}
- if (first_instruction && INSN_P (insn)
- && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
- && GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER)
+ if (first_instruction && important_for_bundling_p (insn))
{
need_barrier = 0;
first_instruction = 0;
@@ -7395,8 +7618,7 @@ ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
&& scheduled_good_insn (last_scheduled_insn))))
|| (last_scheduled_insn
&& (GET_CODE (last_scheduled_insn) == CALL_INSN
- || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
- || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
+ || unknown_for_bundling_p (last_scheduled_insn))))
{
init_insn_group_barriers ();
@@ -7421,8 +7643,7 @@ ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
if (last_scheduled_insn)
{
- if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
- || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
+ if (unknown_for_bundling_p (last_scheduled_insn))
state_reset (curr_state);
else
{
@@ -8538,8 +8759,7 @@ issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
if (!try_issue_insn (curr_state, insn))
return;
curr_state->accumulated_insns_num++;
- gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
- && asm_noperands (PATTERN (insn)) < 0);
+ gcc_assert (!unknown_for_bundling_p (insn));
if (ia64_safe_type (insn) == TYPE_L)
curr_state->accumulated_insns_num++;
@@ -8565,8 +8785,7 @@ issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
if (!try_issue_insn (curr_state, insn))
return;
curr_state->accumulated_insns_num++;
- if (GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0)
+ if (unknown_for_bundling_p (insn))
{
/* Finish bundle containing asm insn. */
curr_state->after_nops_num
@@ -8700,6 +8919,7 @@ get_template (state_t state, int pos)
}
/* True when INSN is important for bundling. */
+
static bool
important_for_bundling_p (rtx insn)
{
@@ -8721,6 +8941,17 @@ get_next_important_insn (rtx insn, rtx tail)
return NULL_RTX;
}
+/* True when INSN is unknown, but important, for bundling. */
+
+static bool
+unknown_for_bundling_p (rtx insn)
+{
+ return (INSN_P (insn)
+ && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER);
+}
+
/* Add a bundle selector TEMPLATE0 before INSN. */
static void
@@ -8848,19 +9079,14 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
insn != tail;
insn = NEXT_INSN (insn))
if (INSN_P (insn)
- && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
- || GET_CODE (PATTERN (insn)) == USE
- || GET_CODE (PATTERN (insn)) == CLOBBER)
+ && !important_for_bundling_p (insn)
&& GET_MODE (insn) == TImode)
{
PUT_MODE (insn, VOIDmode);
for (next_insn = NEXT_INSN (insn);
next_insn != tail;
next_insn = NEXT_INSN (next_insn))
- if (INSN_P (next_insn)
- && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
- && GET_CODE (PATTERN (next_insn)) != USE
- && GET_CODE (PATTERN (next_insn)) != CLOBBER
+ if (important_for_bundling_p (next_insn)
&& INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
{
PUT_MODE (next_insn, TImode);
@@ -8872,10 +9098,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
insn != NULL_RTX;
insn = next_insn)
{
- gcc_assert (INSN_P (insn)
- && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
- && GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER);
+ gcc_assert (important_for_bundling_p (insn));
type = ia64_safe_type (insn);
next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
insn_num++;
@@ -8892,7 +9115,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
only_bundle_end_p
= (next_insn != NULL_RTX
&& INSN_CODE (insn) == CODE_FOR_insn_group_barrier
- && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
+ && unknown_for_bundling_p (next_insn));
/* We may fill up the current bundle if it is the cycle end
without a group barrier. */
bundle_end_p
@@ -8976,8 +9199,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
curr_state = curr_state->originator)
{
insn = curr_state->insn;
- asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0);
+ asm_p = unknown_for_bundling_p (insn);
insn_num++;
if (verbose >= 2 && dump)
{
@@ -9053,8 +9275,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
/* Move the position backward in the window. Group barrier has
no slot. Asm insn takes all bundle. */
if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
- && GET_CODE (PATTERN (insn)) != ASM_INPUT
- && asm_noperands (PATTERN (insn)) < 0)
+ && !unknown_for_bundling_p (insn))
pos--;
/* Long insn takes 2 slots. */
if (ia64_safe_type (insn) == TYPE_L)
@@ -9062,8 +9283,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
gcc_assert (pos >= 0);
if (pos % 3 == 0
&& INSN_CODE (insn) != CODE_FOR_insn_group_barrier
- && GET_CODE (PATTERN (insn)) != ASM_INPUT
- && asm_noperands (PATTERN (insn)) < 0)
+ && !unknown_for_bundling_p (insn))
{
/* The current insn is at the bundle start: emit the
template. */
@@ -9137,8 +9357,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
&& !start_bundle && !end_bundle
&& next_insn
- && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
- && asm_noperands (PATTERN (next_insn)) < 0)
+ && !unknown_for_bundling_p (next_insn))
num--;
start_bundle = false;
@@ -9268,8 +9487,7 @@ final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
&& important_for_bundling_p (insn))
seen_good_insn = 1;
need_barrier_p = (GET_CODE (insn) == CALL_INSN
- || GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0);
+ || unknown_for_bundling_p (insn));
}
}
}
diff --git a/gcc/config/ia64/ia64.md b/gcc/config/ia64/ia64.md
index 349da7ba996..aa5e78636ea 100644
--- a/gcc/config/ia64/ia64.md
+++ b/gcc/config/ia64/ia64.md
@@ -105,6 +105,8 @@
UNSPECV_PSAC_NORMAL
UNSPECV_SETJMP_RECEIVER
UNSPECV_GOTO_RECEIVER
+ UNSPECV_PROBE_STACK_ADDRESS
+ UNSPECV_PROBE_STACK_RANGE
])
(include "predicates.md")
@@ -5182,6 +5184,26 @@
"mov %0 = ip"
[(set_attr "itanium_class" "frbr")])
+;;
+;; Stack checking
+
+(define_insn "probe_stack_address"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
+ UNSPECV_PROBE_STACK_ADDRESS)]
+ ""
+ "probe.w.fault %0, 0"
+[(set_attr "itanium_class" "chk_s_i")])
+
+(define_insn "probe_stack_range"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "r")]
+ UNSPECV_PROBE_STACK_RANGE))]
+ ""
+ "* return output_probe_stack_range (operands[0], operands[2]);"
+ [(set_attr "itanium_class" "unknown")
+ (set_attr "predicable" "no")])
+
;; Vector operations
(include "vect.md")
;; Atomic operations
diff --git a/gcc/config/ia64/linux.h b/gcc/config/ia64/linux.h
index 00b0ddba1a0..0e3b9be459c 100644
--- a/gcc/config/ia64/linux.h
+++ b/gcc/config/ia64/linux.h
@@ -86,3 +86,6 @@ do { \
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS ia64_soft_fp_init_libfuncs
+
+/* Define this to be nonzero if static stack checking is supported. */
+#define STACK_CHECK_STATIC_BUILTIN 1
diff --git a/gcc/config/iq2000/iq2000.c b/gcc/config/iq2000/iq2000.c
index 7d8630e6bbb..e75eb2f806a 100644
--- a/gcc/config/iq2000/iq2000.c
+++ b/gcc/config/iq2000/iq2000.c
@@ -1412,7 +1412,7 @@ iq2000_va_start (tree valist, rtx nextarg)
/* Everything is in the GPR save area, or in the overflow
area which is contiguous with it. */
- nextarg = plus_constant (nextarg, - gpr_save_area_size);
+ nextarg = plus_constant (Pmode, nextarg, - gpr_save_area_size);
std_expand_builtin_va_start (valist, nextarg);
}
@@ -1783,7 +1783,7 @@ iq2000_annotate_frame_insn (rtx insn, rtx dwarf_pattern)
static void
iq2000_emit_frame_related_store (rtx mem, rtx reg, HOST_WIDE_INT offset)
{
- rtx dwarf_address = plus_constant (stack_pointer_rtx, offset);
+ rtx dwarf_address = plus_constant (Pmode, stack_pointer_rtx, offset);
rtx dwarf_mem = gen_rtx_MEM (GET_MODE (reg), dwarf_address);
iq2000_annotate_frame_insn (emit_move_insn (mem, reg),
@@ -2031,7 +2031,8 @@ iq2000_expand_prologue (void)
adjustment_rtx));
dwarf_pattern = gen_rtx_SET (Pmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -tsize));
+ plus_constant (Pmode, stack_pointer_rtx,
+ -tsize));
iq2000_annotate_frame_insn (insn, dwarf_pattern);
@@ -2120,7 +2121,7 @@ iq2000_expand_eh_return (rtx address)
HOST_WIDE_INT gp_offset = cfun->machine->gp_sp_offset;
rtx scratch;
- scratch = plus_constant (stack_pointer_rtx, gp_offset);
+ scratch = plus_constant (Pmode, stack_pointer_rtx, gp_offset);
emit_move_insn (gen_rtx_MEM (GET_MODE (address), scratch), address);
}
@@ -2873,9 +2874,9 @@ iq2000_setup_incoming_varargs (cumulative_args_t cum_v,
if (cum->arg_words < MAX_ARGS_IN_REGISTERS - iq2000_off)
{
rtx ptr, mem;
- ptr = plus_constant (virtual_incoming_args_rtx,
- - (iq2000_save_gp_regs
- * UNITS_PER_WORD));
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
+ - (iq2000_save_gp_regs
+ * UNITS_PER_WORD));
mem = gen_rtx_MEM (BLKmode, ptr);
move_block_from_reg
(cum->arg_words + GP_ARG_FIRST + iq2000_off,
@@ -3165,7 +3166,7 @@ iq2000_print_operand (FILE *file, rtx op, int letter)
else if (code == MEM)
{
if (letter == 'D')
- output_address (plus_constant (XEXP (op, 0), 4));
+ output_address (plus_constant (Pmode, XEXP (op, 0), 4));
else
output_address (XEXP (op, 0));
}
@@ -3273,7 +3274,7 @@ iq2000_legitimize_address (rtx xinsn, rtx old_x ATTRIBUTE_UNUSED,
ptr_reg,
gen_rtx_PLUS (Pmode, xplus0, int_reg)));
- return plus_constant (ptr_reg, INTVAL (xplus1) & 0x7fff);
+ return plus_constant (Pmode, ptr_reg, INTVAL (xplus1) & 0x7fff);
}
}
diff --git a/gcc/config/lm32/lm32.c b/gcc/config/lm32/lm32.c
index 0d0ee5f9a9e..2e1cf445428 100644
--- a/gcc/config/lm32/lm32.c
+++ b/gcc/config/lm32/lm32.c
@@ -715,7 +715,7 @@ lm32_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
rtx regblock;
regblock = gen_rtx_MEM (BLKmode,
- plus_constant (arg_pointer_rtx,
+ plus_constant (Pmode, arg_pointer_rtx,
FIRST_PARM_OFFSET (0)));
move_block_from_reg (first_reg_offset, regblock, size);
diff --git a/gcc/config/m32c/bitops.md b/gcc/config/m32c/bitops.md
index 3c8e8427b28..060362de71e 100644
--- a/gcc/config/m32c/bitops.md
+++ b/gcc/config/m32c/bitops.md
@@ -43,11 +43,11 @@
[(set (match_operand:QI 0 "memsym_operand" "+Si")
(ior:QI (subreg:QI (ashift:HI (const_int 1)
(subreg:QI (match_operand:HI 1 "a_qi_operand" "Raa") 0)) 0)
- (match_operand:QI 2 "memsym_operand" "0")))]
+ (match_dup 0)))]
"TARGET_A16"
"bset\t%0[%1]"
[(set_attr "flags" "n")]
- )
+ )
(define_insn "bset_hi"
[(set (zero_extract:HI (match_operand:QI 0 "memsym_operand" "+Si")
diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c
index 57586474d9c..79b03fa0650 100644
--- a/gcc/config/m32c/m32c.c
+++ b/gcc/config/m32c/m32c.c
@@ -1178,7 +1178,8 @@ m32c_return_addr_rtx (int count)
}
ra_mem =
- gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
+ gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
+ offset));
return copy_to_mode_reg (mode, ra_mem);
}
@@ -4178,7 +4179,8 @@ m32c_expand_insv (rtx *operands)
&& GET_CODE (op0) == MEM)
{
/* We are little endian. */
- rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
+ rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
+ XEXP (op0, 0), 1));
MEM_COPY_ATTRIBUTES (new_mem, op0);
mask >>= 8;
}
diff --git a/gcc/config/m32r/m32r.c b/gcc/config/m32r/m32r.c
index 97636703602..b27a3a5bb89 100644
--- a/gcc/config/m32r/m32r.c
+++ b/gcc/config/m32r/m32r.c
@@ -1293,7 +1293,7 @@ m32r_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
rtx regblock;
regblock = gen_frame_mem (BLKmode,
- plus_constant (arg_pointer_rtx,
+ plus_constant (Pmode, arg_pointer_rtx,
FIRST_PARM_OFFSET (0)));
set_mem_alias_set (regblock, get_varargs_alias_set ());
move_block_from_reg (first_reg_offset, regblock, size);
@@ -1984,7 +1984,7 @@ m32r_legitimize_pic_address (rtx orig, rtx reg)
if (CONST_INT_P (offset))
{
if (INT16_P (INTVAL (offset)))
- return plus_constant (base, INTVAL (offset));
+ return plus_constant (Pmode, base, INTVAL (offset));
else
{
gcc_assert (! reload_in_progress && ! reload_completed);
@@ -2087,9 +2087,9 @@ m32r_print_operand (FILE * file, rtx x, int code)
currently necessary, but keep it around. */
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0), 4));
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 4));
else
- output_address (plus_constant (XEXP (x, 0), 4));
+ output_address (plus_constant (Pmode, XEXP (x, 0), 4));
fputc (')', file);
}
else
@@ -2327,7 +2327,8 @@ m32r_print_operand_address (FILE * file, rtx addr)
fputs ("sda(", file);
else
fputs ("low(", file);
- output_addr_const (file, plus_constant (XEXP (base, 1), offset));
+ output_addr_const (file, plus_constant (Pmode, XEXP (base, 1),
+ offset));
fputs ("),", file);
fputs (reg_names[REGNO (XEXP (base, 0))], file);
}
diff --git a/gcc/config/m68k/linux.h b/gcc/config/m68k/linux.h
index 325faf73a75..8f5b5057bf9 100644
--- a/gcc/config/m68k/linux.h
+++ b/gcc/config/m68k/linux.h
@@ -190,7 +190,8 @@ along with GCC; see the file COPYING3. If not see
#define FINALIZE_TRAMPOLINE(TRAMP) \
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"), \
LCT_NORMAL, VOIDmode, 2, TRAMP, Pmode, \
- plus_constant (TRAMP, TRAMPOLINE_SIZE), Pmode);
+ plus_constant (Pmode, TRAMP, TRAMPOLINE_SIZE), \
+ Pmode);
/* Clear the instruction cache from `beg' to `end'. This makes an
inline system call to SYS_cacheflush. The arguments are as
diff --git a/gcc/config/m68k/m68k-protos.h b/gcc/config/m68k/m68k-protos.h
index df1888628cc..c77958812dc 100644
--- a/gcc/config/m68k/m68k-protos.h
+++ b/gcc/config/m68k/m68k-protos.h
@@ -81,7 +81,6 @@ extern enum attr_opx_type m68k_sched_attr_opx_type (rtx, int);
extern enum attr_opy_type m68k_sched_attr_opy_type (rtx, int);
extern enum attr_size m68k_sched_attr_size (rtx);
extern enum attr_op_mem m68k_sched_attr_op_mem (rtx);
-extern enum attr_type m68k_sched_branch_type (rtx);
#endif /* HAVE_ATTR_cpu */
#endif /* RTX_CODE */
diff --git a/gcc/config/m68k/m68k.c b/gcc/config/m68k/m68k.c
index 6fbd7001666..c6b2ce3aff3 100644
--- a/gcc/config/m68k/m68k.c
+++ b/gcc/config/m68k/m68k.c
@@ -916,16 +916,17 @@ m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
if (adjust_stack_p)
{
- src = plus_constant (base, (count
- * GET_MODE_SIZE (mode)
- * (HOST_WIDE_INT) (store_p ? -1 : 1)));
+ src = plus_constant (Pmode, base,
+ (count
+ * GET_MODE_SIZE (mode)
+ * (HOST_WIDE_INT) (store_p ? -1 : 1)));
XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
}
for (; mask != 0; mask >>= 1, regno++)
if (mask & 1)
{
- addr = plus_constant (base, offset);
+ addr = plus_constant (Pmode, base, offset);
operands[!store_p] = gen_frame_mem (mode, addr);
operands[store_p] = gen_rtx_REG (mode, regno);
XVECEXP (body, 0, i++)
@@ -971,7 +972,7 @@ m68k_expand_prologue (void)
if (crtl->limit_stack
&& GET_CODE (stack_limit_rtx) == SYMBOL_REF)
{
- limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
+ limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
if (!m68k_legitimate_constant_p (Pmode, limit))
{
emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
@@ -1205,12 +1206,12 @@ m68k_expand_epilogue (bool sibcall_p)
/* Generate the address -OFFSET(%fp,%a1.l). */
addr = gen_rtx_REG (Pmode, A1_REG);
addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
- addr = plus_constant (addr, -offset);
+ addr = plus_constant (Pmode, addr, -offset);
}
else if (restore_from_sp)
addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
else
- addr = plus_constant (frame_pointer_rtx, -offset);
+ addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
gen_frame_mem (SImode, addr));
offset -= GET_MODE_SIZE (SImode);
@@ -2450,7 +2451,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
base == reg ? 0 : reg);
if (GET_CODE (orig) == CONST_INT)
- pic_ref = plus_constant (base, INTVAL (orig));
+ pic_ref = plus_constant (Pmode, base, INTVAL (orig));
else
pic_ref = gen_rtx_PLUS (Pmode, base, orig);
}
@@ -5035,7 +5036,8 @@ m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
reload_completed = 1;
/* The "this" pointer is stored at 4(%sp). */
- this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
+ this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
+ stack_pointer_rtx, 4));
/* Add DELTA to THIS. */
if (delta != 0)
@@ -5060,7 +5062,7 @@ m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
/* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
- addr = plus_constant (tmp, vcall_offset);
+ addr = plus_constant (Pmode, tmp, vcall_offset);
if (!m68k_legitimate_address_p (Pmode, addr, true))
{
emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
@@ -5880,26 +5882,6 @@ m68k_sched_attr_op_mem (rtx insn)
return OP_MEM_I1;
}
-/* Jump instructions types. Indexed by INSN_UID.
- The same rtl insn can be expanded into different asm instructions
- depending on the cc0_status. To properly determine type of jump
- instructions we scan instruction stream and map jumps types to this
- array. */
-static enum attr_type *sched_branch_type;
-
-/* Return the type of the jump insn. */
-enum attr_type
-m68k_sched_branch_type (rtx insn)
-{
- enum attr_type type;
-
- type = sched_branch_type[INSN_UID (insn)];
-
- gcc_assert (type != 0);
-
- return type;
-}
-
/* Data for ColdFire V4 index bypass.
Producer modifies register that is used as index in consumer with
specified scale. */
@@ -6123,20 +6105,6 @@ m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
int n_insns ATTRIBUTE_UNUSED)
{
- /* Init branch types. */
- {
- rtx insn;
-
- sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
-
- for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
- {
- if (JUMP_P (insn))
- /* !!! FIXME: Implement real scan here. */
- sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
- }
- }
-
#ifdef ENABLE_CHECKING
/* Check that all instructions have DFA reservations and
that all instructions can be issued from a clean state. */
@@ -6218,9 +6186,6 @@ m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
sched_ib.records.adjust = NULL;
sched_ib.records.n_insns = 0;
max_insn_size = 0;
-
- free (sched_branch_type);
- sched_branch_type = NULL;
}
/* Implementation of targetm.sched.init () hook.
diff --git a/gcc/config/m68k/m68k.h b/gcc/config/m68k/m68k.h
index 42d377942e7..0a390d074d4 100644
--- a/gcc/config/m68k/m68k.h
+++ b/gcc/config/m68k/m68k.h
@@ -782,8 +782,9 @@ do { if (cc_prev_status.flags & CC_IN_68881) \
/* After the prologue, RA is at 4(AP) in the current frame. */
#define RETURN_ADDR_RTX(COUNT, FRAME) \
((COUNT) == 0 \
- ? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, UNITS_PER_WORD)) \
- : gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD)))
+ ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \
+ UNITS_PER_WORD)) \
+ : gen_rtx_MEM (Pmode, plus_constant (Pmode, FRAME, UNITS_PER_WORD)))
/* We must not use the DBX register numbers for the DWARF 2 CFA column
numbers because that maps to numbers beyond FIRST_PSEUDO_REGISTER.
@@ -814,7 +815,7 @@ do { if (cc_prev_status.flags & CC_IN_68881) \
#define EH_RETURN_HANDLER_RTX \
gen_rtx_MEM (Pmode, \
gen_rtx_PLUS (Pmode, arg_pointer_rtx, \
- plus_constant (EH_RETURN_STACKADJ_RTX, \
+ plus_constant (Pmode, EH_RETURN_STACKADJ_RTX, \
UNITS_PER_WORD)))
/* Select a format to encode pointers in exception handling data. CODE
diff --git a/gcc/config/m68k/m68k.md b/gcc/config/m68k/m68k.md
index 8104e75492b..8fc81b53e09 100644
--- a/gcc/config/m68k/m68k.md
+++ b/gcc/config/m68k/m68k.md
@@ -2409,7 +2409,8 @@
if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
{
operands[1] = gen_rtx_MEM (SImode,
- plus_constant (XEXP(operands[0], 0), -8));
+ plus_constant (Pmode,
+ XEXP(operands[0], 0), -8));
return "move%.l %0,%3\;add%.l %R2,%0\;addx%.l %2,%3\;move%.l %3,%1";
}
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
@@ -2907,7 +2908,8 @@
if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
{
operands[1]
- = gen_rtx_MEM (SImode, plus_constant (XEXP (operands[0], 0), -8));
+ = gen_rtx_MEM (SImode, plus_constant (Pmode,
+ XEXP (operands[0], 0), -8));
return "move%.l %0,%3\;sub%.l %R2,%0\;subx%.l %2,%3\;move%.l %3,%1";
}
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
@@ -6344,7 +6346,7 @@
{
OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0");
}
- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+ [(set_attr "type" "bcc")])
(define_insn "bne"
[(set (pc)
@@ -6356,7 +6358,7 @@
{
OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0");
}
- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+ [(set_attr "type" "bcc")])
(define_insn "bgt"
[(set (pc)
@@ -6374,7 +6376,7 @@
OUTPUT_JUMP ("jgt %l0", "fjgt %l0", 0);
}
- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+ [(set_attr "type" "bcc")])
(define_insn "bgtu"
[(set (pc)
@@ -6410,7 +6412,7 @@
OUTPUT_JUMP ("jlt %l0", "fjlt %l0", "jmi %l0");
}
- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+ [(set_attr "type" "bcc")])
(define_insn "bltu"
[(set (pc)
@@ -7264,7 +7266,8 @@
(match_operand:SI 1 "const_int_operand")))])]
"TARGET_68020 || INTVAL (operands[1]) >= -0x8004"
{
- operands[2] = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, -4));
+ operands[2] = gen_frame_mem (SImode,
+ plus_constant (Pmode, stack_pointer_rtx, -4));
})
(define_insn "*link"
diff --git a/gcc/config/mcore/mcore.c b/gcc/config/mcore/mcore.c
index 82ae0cfe68b..9b8cf020ef3 100644
--- a/gcc/config/mcore/mcore.c
+++ b/gcc/config/mcore/mcore.c
@@ -2003,7 +2003,8 @@ mcore_expand_prolog (void)
{
emit_insn (gen_movsi
(gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, offset)),
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset)),
gen_rtx_REG (SImode, rn)));
}
}
@@ -2038,7 +2039,8 @@ mcore_expand_prolog (void)
{
emit_insn (gen_movsi
(gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, offs)),
+ plus_constant (Pmode, stack_pointer_rtx,
+ offs)),
gen_rtx_REG (SImode, i)));
offs += 4;
}
@@ -2133,7 +2135,8 @@ mcore_expand_epilog (void)
emit_insn (gen_movsi
(gen_rtx_REG (SImode, i),
gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, offs))));
+ plus_constant (Pmode, stack_pointer_rtx,
+ offs))));
offs += 4;
}
}
diff --git a/gcc/config/mcore/mcore.md b/gcc/config/mcore/mcore.md
index c56a0c6aea1..805de2b2326 100644
--- a/gcc/config/mcore/mcore.md
+++ b/gcc/config/mcore/mcore.md
@@ -1408,8 +1408,8 @@
XVECEXP (operands[3], 0, i)
= gen_rtx_SET (VOIDmode,
gen_rtx_REG (SImode, regno + i),
- gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
- i * 4)));
+ gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx,
+ i * 4)));
}")
(define_insn ""
@@ -1446,8 +1446,8 @@
for (i = 0; i < count; i++)
XVECEXP (operands[3], 0, i)
= gen_rtx_SET (VOIDmode,
- gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
- i * 4)),
+ gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx,
+ i * 4)),
gen_rtx_REG (SImode, regno + i));
}")
diff --git a/gcc/config/mep/mep.c b/gcc/config/mep/mep.c
index 4351702fa4b..edfff549e2a 100644
--- a/gcc/config/mep/mep.c
+++ b/gcc/config/mep/mep.c
@@ -2366,7 +2366,7 @@ mep_allocate_initial_value (rtx reg)
}
rss = cfun->machine->reg_save_slot[REGNO(reg)];
- return gen_rtx_MEM (SImode, plus_constant (arg_pointer_rtx, -rss));
+ return gen_rtx_MEM (SImode, plus_constant (Pmode, arg_pointer_rtx, -rss));
}
rtx
@@ -2844,7 +2844,8 @@ mep_expand_prologue (void)
ALLOCATE_INITIAL_VALUE. The moves emitted here can then be safely
deleted as dead. */
mem = gen_rtx_MEM (rmode,
- plus_constant (stack_pointer_rtx, sp_offset - rss));
+ plus_constant (Pmode, stack_pointer_rtx,
+ sp_offset - rss));
maybe_dead_p = rtx_equal_p (mem, has_hard_reg_initial_val (rmode, i));
if (GR_REGNO_P (i) || LOADABLE_CR_REGNO_P (i))
@@ -2855,7 +2856,8 @@ mep_expand_prologue (void)
int be = TARGET_BIG_ENDIAN ? 4 : 0;
mem = gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, sp_offset - rss + be));
+ plus_constant (Pmode, stack_pointer_rtx,
+ sp_offset - rss + be));
maybe_dead_move (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP),
gen_rtx_REG (SImode, i),
@@ -2876,7 +2878,8 @@ mep_expand_prologue (void)
copy_rtx (mem),
gen_rtx_REG (rmode, i)));
mem = gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx, sp_offset - rss + (4-be)));
+ plus_constant (Pmode, stack_pointer_rtx,
+ sp_offset - rss + (4-be)));
insn = maybe_dead_move (mem,
gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP+1),
maybe_dead_p);
@@ -3083,8 +3086,8 @@ mep_expand_epilogue (void)
if (GR_REGNO_P (i) || LOADABLE_CR_REGNO_P (i))
emit_move_insn (gen_rtx_REG (rmode, i),
gen_rtx_MEM (rmode,
- plus_constant (stack_pointer_rtx,
- sp_offset-rss)));
+ plus_constant (Pmode, stack_pointer_rtx,
+ sp_offset - rss)));
else
{
if (i == LP_REGNO && !mep_sibcall_epilogue && !interrupt_handler)
@@ -3096,7 +3099,8 @@ mep_expand_epilogue (void)
{
emit_move_insn (gen_rtx_REG (rmode, REGSAVE_CONTROL_TEMP),
gen_rtx_MEM (rmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
sp_offset-rss)));
emit_move_insn (gen_rtx_REG (rmode, i),
gen_rtx_REG (rmode, REGSAVE_CONTROL_TEMP));
@@ -3109,7 +3113,7 @@ mep_expand_epilogue (void)
register when we return by jumping indirectly via the temp. */
emit_move_insn (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP),
gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
lp_slot)));
lp_temp = REGSAVE_CONTROL_TEMP;
}
diff --git a/gcc/config/microblaze/microblaze.c b/gcc/config/microblaze/microblaze.c
index 8412d0ef127..b170606bc75 100644
--- a/gcc/config/microblaze/microblaze.c
+++ b/gcc/config/microblaze/microblaze.c
@@ -341,7 +341,8 @@ double_memory_operand (rtx op, enum machine_mode mode)
return 1;
return memory_address_p ((GET_MODE_CLASS (mode) == MODE_INT
- ? SImode : SFmode), plus_constant (addr, 4));
+ ? SImode : SFmode),
+ plus_constant (Pmode, addr, 4));
}
/* Implement REG_OK_FOR_BASE_P -and- REG_OK_FOR_INDEX_P. */
@@ -808,8 +809,8 @@ microblaze_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
microblaze_block_move_straight (dest, src, MAX_MOVE_BYTES);
/* Move on to the next block. */
- emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
- emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
+ emit_move_insn (src_reg, plus_constant (Pmode, src_reg, MAX_MOVE_BYTES));
+ emit_move_insn (dest_reg, plus_constant (Pmode, dest_reg, MAX_MOVE_BYTES));
/* Emit the test & branch. */
emit_insn (gen_cbranchsi4 (gen_rtx_NE (SImode, src_reg, final_src),
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index d48a46582b5..2e6c3001178 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -2569,7 +2569,7 @@ mips_strip_unspec_address (rtx op)
split_const (op, &base, &offset);
if (UNSPEC_ADDRESS_P (base))
- op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
return op;
}
@@ -2622,7 +2622,8 @@ mips16_gp_pseudo_reg (void)
scan = NEXT_INSN (scan);
insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
- emit_insn_after (insn, scan);
+ insn = emit_insn_after (insn, scan);
+ INSN_LOCATOR (insn) = 0;
pop_topmost_sequence ();
}
@@ -2808,7 +2809,7 @@ mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
high = mips_force_temporary (temp, high);
reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
}
- return plus_constant (reg, offset);
+ return plus_constant (Pmode, reg, offset);
}
/* The __tls_get_attr symbol. */
@@ -3490,6 +3491,37 @@ mips_zero_extend_cost (enum machine_mode mode, rtx op)
return COSTS_N_INSNS (1);
}
+/* Return the cost of moving between two registers of mode MODE,
+ assuming that the move will be in pieces of at most UNITS bytes. */
+
+static int
+mips_set_reg_reg_piece_cost (enum machine_mode mode, unsigned int units)
+{
+ return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
+}
+
+/* Return the cost of moving between two registers of mode MODE. */
+
+static int
+mips_set_reg_reg_cost (enum machine_mode mode)
+{
+ switch (GET_MODE_CLASS (mode))
+ {
+ case MODE_CC:
+ return mips_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (CCmode));
+
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:
+ case MODE_VECTOR_FLOAT:
+ if (TARGET_HARD_FLOAT)
+ return mips_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE);
+ /* Fall through */
+
+ default:
+ return mips_set_reg_reg_piece_cost (mode, UNITS_PER_WORD);
+ }
+}
+
/* Implement TARGET_RTX_COSTS. */
static bool
@@ -3877,6 +3909,15 @@ mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
*total = mips_cost->fp_add;
return false;
+ case SET:
+ if (register_operand (SET_DEST (x), VOIDmode)
+ && reg_or_0_operand (SET_SRC (x), VOIDmode))
+ {
+ *total = mips_set_reg_reg_cost (GET_MODE (SET_DEST (x)));
+ return true;
+ }
+ return false;
+
default:
return false;
}
@@ -5392,7 +5433,7 @@ mips_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
{
rtx ptr, mem;
- ptr = plus_constant (virtual_incoming_args_rtx,
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
REG_PARM_STACK_SPACE (cfun->decl)
- gp_saved * UNITS_PER_WORD);
mem = gen_frame_mem (BLKmode, ptr);
@@ -5421,7 +5462,7 @@ mips_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
{
rtx ptr, mem;
- ptr = plus_constant (virtual_incoming_args_rtx, off);
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
mem = gen_frame_mem (mode, ptr);
set_mem_alias_set (mem, get_varargs_alias_set ());
mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
@@ -5584,7 +5625,7 @@ mips_va_start (tree valist, rtx nextarg)
}
else
{
- nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
std_expand_builtin_va_start (valist, nextarg);
}
}
@@ -6951,8 +6992,8 @@ mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
mips_block_move_straight (dest, src, bytes_per_iter);
/* Move on to the next block. */
- mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
- mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
+ mips_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
+ mips_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
/* Emit the loop condition. */
test = gen_rtx_NE (VOIDmode, src_reg, final_src);
@@ -7892,7 +7933,7 @@ mips_print_operand (FILE *file, rtx op, int letter)
case MEM:
if (letter == 'D')
- output_address (plus_constant (XEXP (op, 0), 4));
+ output_address (plus_constant (Pmode, XEXP (op, 0), 4));
else if (letter && letter != 'z')
output_operand_lossage ("invalid use of '%%%c'", letter);
else
@@ -8751,7 +8792,8 @@ mips16e_save_restore_reg (bool restore_p, bool reg_parm_p,
{
rtx reg, mem;
- mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
+ mem = gen_frame_mem (SImode, plus_constant (Pmode, stack_pointer_rtx,
+ offset));
reg = gen_rtx_REG (SImode, regno);
if (restore_p)
{
@@ -8810,7 +8852,7 @@ mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
/* Add the stack pointer adjustment. */
set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
restore_p ? size : -size));
RTX_FRAME_RELATED_P (set) = 1;
XVECEXP (pattern, 0, n++) = set;
@@ -9951,7 +9993,8 @@ mips_save_restore_reg (enum machine_mode mode, int regno,
{
rtx mem;
- mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx,
+ offset));
fn (gen_rtx_REG (mode, regno), mem);
}
@@ -10441,7 +10484,7 @@ mips_expand_prologue (void)
{
/* Push EPC into its stack slot. */
mem = gen_frame_mem (word_mode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
offset));
mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
offset -= UNITS_PER_WORD;
@@ -10460,7 +10503,8 @@ mips_expand_prologue (void)
/* Push Status into its stack slot. */
mem = gen_frame_mem (word_mode,
- plus_constant (stack_pointer_rtx, offset));
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
offset -= UNITS_PER_WORD;
@@ -10532,7 +10576,7 @@ mips_expand_prologue (void)
/* Describe the combined effect of the previous instructions. */
mips_set_frame_expr
(gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -size)));
+ plus_constant (Pmode, stack_pointer_rtx, -size)));
}
mips_frame_barrier ();
}
@@ -10563,7 +10607,7 @@ mips_expand_prologue (void)
MIPS_PROLOGUE_TEMP (Pmode)));
mips_set_frame_expr
(gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
- plus_constant (stack_pointer_rtx, offset)));
+ plus_constant (Pmode, stack_pointer_rtx, offset)));
}
}
@@ -10576,7 +10620,7 @@ mips_expand_prologue (void)
HOST_WIDE_INT offset;
mips_get_cprestore_base_and_offset (&base, &offset, false);
- mem = gen_frame_mem (Pmode, plus_constant (base, offset));
+ mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
temp = (SMALL_OPERAND (offset)
? gen_rtx_SCRATCH (Pmode)
@@ -10585,7 +10629,7 @@ mips_expand_prologue (void)
(mem, GEN_INT (offset), gp, temp)));
mips_get_cprestore_base_and_offset (&base, &offset, true);
- mem = gen_frame_mem (Pmode, plus_constant (base, offset));
+ mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
emit_insn (PMODE_INSN (gen_use_cprestore, (mem)));
}
@@ -10641,7 +10685,7 @@ mips_epilogue_set_cfa (rtx reg, HOST_WIDE_INT offset)
{
RTX_FRAME_RELATED_P (insn) = 1;
REG_NOTES (insn) = alloc_reg_note (REG_CFA_DEF_CFA,
- plus_constant (reg, offset),
+ plus_constant (Pmode, reg, offset),
REG_NOTES (insn));
mips_epilogue.cfa_reg = reg;
mips_epilogue.cfa_offset = offset;
@@ -10830,7 +10874,8 @@ mips_expand_epilogue (bool sibcall_p)
{
/* Restore the original EPC. */
mem = gen_frame_mem (word_mode,
- plus_constant (stack_pointer_rtx, offset));
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
offset -= UNITS_PER_WORD;
@@ -10841,7 +10886,8 @@ mips_expand_epilogue (bool sibcall_p)
/* Restore the original Status. */
mem = gen_frame_mem (word_mode,
- plus_constant (stack_pointer_rtx, offset));
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
offset -= UNITS_PER_WORD;
diff --git a/gcc/config/mips/mips.md b/gcc/config/mips/mips.md
index 5d8f74f5e79..912dccb5ee0 100644
--- a/gcc/config/mips/mips.md
+++ b/gcc/config/mips/mips.md
@@ -5756,7 +5756,7 @@
{
rtx addr;
- addr = plus_constant (operands[0], GET_MODE_SIZE (Pmode) * 3);
+ addr = plus_constant (Pmode, operands[0], GET_MODE_SIZE (Pmode) * 3);
mips_emit_move (gen_rtx_MEM (Pmode, addr), pic_offset_table_rtx);
DONE;
})
@@ -5772,9 +5772,9 @@
/* The elements of the buffer are, in order: */
int W = GET_MODE_SIZE (Pmode);
rtx fp = gen_rtx_MEM (Pmode, operands[0]);
- rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0], 1*W));
- rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0], 2*W));
- rtx gpv = gen_rtx_MEM (Pmode, plus_constant (operands[0], 3*W));
+ rtx lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 1*W));
+ rtx stack = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 2*W));
+ rtx gpv = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 3*W));
rtx pv = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
/* Use gen_raw_REG to avoid being given pic_offset_table_rtx.
The target is bound to be using $28 as the global pointer
diff --git a/gcc/config/mips/t-vxworks b/gcc/config/mips/t-vxworks
index ac2fa9d9f38..488473595d3 100644
--- a/gcc/config/mips/t-vxworks
+++ b/gcc/config/mips/t-vxworks
@@ -32,4 +32,4 @@ MULTILIB_EXCEPTIONS = mips3* mabi=o64 fPIC \
$(addprefix mabi=o64/, EL* msoft-float* mrtp* fPIC*) \
$(addsuffix /fPIC, *mabi=o64 *mips3 *EL *msoft-float)
-MUTLILIB_EXTRA_OPTS = -G 0 -mno-branch-likely
+MULTILIB_EXTRA_OPTS = -G 0 -mno-branch-likely
diff --git a/gcc/config/mmix/mmix.c b/gcc/config/mmix/mmix.c
index c87d09e0e2f..8f801e6c7ca 100644
--- a/gcc/config/mmix/mmix.c
+++ b/gcc/config/mmix/mmix.c
@@ -550,7 +550,7 @@ mmix_dynamic_chain_address (rtx frame)
frame-pointer. Unfortunately, the caller assumes that a
frame-pointer is present for *all* previous frames. There should be
a way to say that that cannot be done, like for RETURN_ADDR_RTX. */
- return plus_constant (frame, -8);
+ return plus_constant (Pmode, frame, -8);
}
/* STARTING_FRAME_OFFSET. */
@@ -581,7 +581,9 @@ mmix_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
See mmix_initial_elimination_offset for the reason we can't use
get_hard_reg_initial_val for both. Always using a stack slot
and not a register would be suboptimal. */
- ? validize_mem (gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx, -16)))
+ ? validize_mem (gen_rtx_MEM (Pmode,
+ plus_constant (Pmode,
+ frame_pointer_rtx, -16)))
: get_hard_reg_initial_val (Pmode, MMIX_INCOMING_RETURN_ADDRESS_REGNUM))
: NULL_RTX;
}
@@ -2063,7 +2065,7 @@ mmix_expand_prologue (void)
/* These registers aren't actually saved (as in "will be
restored"), so don't tell DWARF2 they're saved. */
emit_move_insn (gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
offset)),
gen_rtx_REG (DImode, regno));
offset -= 8;
@@ -2090,7 +2092,8 @@ mmix_expand_prologue (void)
}
insn = emit_move_insn (gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)),
hard_frame_pointer_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
@@ -2132,14 +2135,16 @@ mmix_expand_prologue (void)
emit_move_insn (tmpreg, retreg);
insn = emit_move_insn (gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)),
tmpreg);
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode,
gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)),
retreg));
@@ -2179,7 +2184,8 @@ mmix_expand_prologue (void)
gen_rtx_REG (DImode,
MMIX_rO_REGNUM));
emit_move_insn (gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx, offset)),
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset)),
gen_rtx_REG (DImode, 255));
offset -= 8;
}
@@ -2215,7 +2221,8 @@ mmix_expand_prologue (void)
}
insn = emit_move_insn (gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)),
gen_rtx_REG (DImode, regno));
RTX_FRAME_RELATED_P (insn) = 1;
@@ -2291,7 +2298,7 @@ mmix_expand_epilogue (void)
emit_move_insn (gen_rtx_REG (DImode, regno),
gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
offset)));
offset += 8;
}
@@ -2323,7 +2330,7 @@ mmix_expand_epilogue (void)
emit_move_insn (hard_frame_pointer_rtx,
gen_rtx_MEM (DImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
offset)));
offset += 8;
}
diff --git a/gcc/config/mn10300/mn10300.c b/gcc/config/mn10300/mn10300.c
index b5f3933c26e..1554f94644c 100644
--- a/gcc/config/mn10300/mn10300.c
+++ b/gcc/config/mn10300/mn10300.c
@@ -711,7 +711,7 @@ mn10300_gen_multiple_store (unsigned int mask)
continue;
++count;
- x = plus_constant (stack_pointer_rtx, count * -4);
+ x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
x = gen_frame_mem (SImode, x);
x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
elts[count] = F(x);
@@ -725,7 +725,7 @@ mn10300_gen_multiple_store (unsigned int mask)
gcc_assert (mask == 0);
/* Create the instruction that updates the stack pointer. */
- x = plus_constant (stack_pointer_rtx, count * -4);
+ x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
elts[0] = F(x);
@@ -1464,7 +1464,7 @@ mn10300_builtin_saveregs (void)
alias_set_type set = get_varargs_alias_set ();
if (argadj)
- offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
+ offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
else
offset = crtl->args.arg_offset_rtx;
@@ -1473,7 +1473,8 @@ mn10300_builtin_saveregs (void)
emit_move_insn (mem, gen_rtx_REG (SImode, 0));
mem = gen_rtx_MEM (SImode,
- plus_constant (crtl->args.internal_arg_pointer, 4));
+ plus_constant (Pmode,
+ crtl->args.internal_arg_pointer, 4));
set_mem_alias_set (mem, set);
emit_move_insn (mem, gen_rtx_REG (SImode, 1));
@@ -2516,7 +2517,7 @@ mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
clobber the flags but do not affect the contents of D0 or D1. */
disp = expand_binop (SImode, sub_optab, fnaddr,
- plus_constant (XEXP (m_tramp, 0), 11),
+ plus_constant (Pmode, XEXP (m_tramp, 0), 11),
NULL_RTX, 1, OPTAB_DIRECT);
mem = adjust_address (m_tramp, SImode, 0);
diff --git a/gcc/config/moxie/moxie.c b/gcc/config/moxie/moxie.c
index d70eaac3015..8d40412d095 100644
--- a/gcc/config/moxie/moxie.c
+++ b/gcc/config/moxie/moxie.c
@@ -504,9 +504,9 @@ moxie_static_chain (const_tree fndecl, bool incoming_p)
return NULL;
if (incoming_p)
- addr = plus_constant (arg_pointer_rtx, 2 * UNITS_PER_WORD);
+ addr = plus_constant (Pmode, arg_pointer_rtx, 2 * UNITS_PER_WORD);
else
- addr = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
+ addr = plus_constant (Pmode, stack_pointer_rtx, -UNITS_PER_WORD);
mem = gen_rtx_MEM (Pmode, addr);
MEM_NOTRAP_P (mem) = 1;
diff --git a/gcc/config/moxie/moxie.h b/gcc/config/moxie/moxie.h
index d2a455b289c..c80d26c5ae9 100644
--- a/gcc/config/moxie/moxie.h
+++ b/gcc/config/moxie/moxie.h
@@ -278,7 +278,7 @@ enum reg_class
the prologue. */
#define INCOMING_RETURN_ADDR_RTX \
gen_frame_mem (Pmode, \
- plus_constant (stack_pointer_rtx, UNITS_PER_WORD))
+ plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD))
/* Describe how we implement __builtin_eh_return. */
#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N+2) : INVALID_REGNUM)
@@ -286,7 +286,7 @@ enum reg_class
/* Store the return handler into the call frame. */
#define EH_RETURN_HANDLER_RTX \
gen_frame_mem (Pmode, \
- plus_constant (frame_pointer_rtx, UNITS_PER_WORD))
+ plus_constant (Pmode, frame_pointer_rtx, UNITS_PER_WORD))
/* Storage Layout */
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index 6b4ea25fa22..56c889db88c 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -863,7 +863,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
if (GET_CODE (orig) == CONST_INT)
{
if (INT_14_BITS (orig))
- return plus_constant (base, INTVAL (orig));
+ return plus_constant (Pmode, base, INTVAL (orig));
orig = force_reg (Pmode, orig);
}
pic_ref = gen_rtx_PLUS (Pmode, base, orig);
@@ -1073,7 +1073,7 @@ hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
if (! VAL_14_BITS_P (newoffset)
&& GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
{
- rtx const_part = plus_constant (XEXP (x, 0), newoffset);
+ rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
rtx tmp_reg
= force_reg (Pmode,
gen_rtx_HIGH (Pmode, const_part));
@@ -1094,7 +1094,7 @@ hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
force_reg (Pmode, XEXP (x, 0)),
int_part));
}
- return plus_constant (ptr_reg, offset - newoffset);
+ return plus_constant (Pmode, ptr_reg, offset - newoffset);
}
/* Handle (plus (mult (a) (shadd_constant)) (b)). */
@@ -3484,7 +3484,7 @@ store_reg (int reg, HOST_WIDE_INT disp, int base)
basereg = gen_rtx_REG (Pmode, base);
if (VAL_14_BITS_P (disp))
{
- dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
+ dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
insn = emit_move_insn (dest, src);
}
else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
@@ -3568,7 +3568,8 @@ set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
if (VAL_14_BITS_P (disp))
{
insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
- plus_constant (gen_rtx_REG (Pmode, base), disp));
+ plus_constant (Pmode,
+ gen_rtx_REG (Pmode, base), disp));
}
else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
{
@@ -4007,16 +4008,19 @@ pa_expand_prologue (void)
if (TARGET_64BIT)
{
rtx mem = gen_rtx_MEM (DFmode,
- plus_constant (base, offset));
+ plus_constant (Pmode, base,
+ offset));
add_reg_note (insn, REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode, mem, reg));
}
else
{
rtx meml = gen_rtx_MEM (SFmode,
- plus_constant (base, offset));
+ plus_constant (Pmode, base,
+ offset));
rtx memr = gen_rtx_MEM (SFmode,
- plus_constant (base, offset + 4));
+ plus_constant (Pmode, base,
+ offset + 4));
rtx regl = gen_rtx_REG (SFmode, i);
rtx regr = gen_rtx_REG (SFmode, i + 1);
rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
@@ -4048,7 +4052,7 @@ load_reg (int reg, HOST_WIDE_INT disp, int base)
rtx src;
if (VAL_14_BITS_P (disp))
- src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
+ src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
{
rtx delta = GEN_INT (disp);
@@ -4562,7 +4566,7 @@ pa_return_addr_rtx (int count, rtx frameaddr)
for (i = 0; i < len; i++)
{
- rtx op0 = gen_rtx_MEM (SImode, plus_constant (ins, i * 4));
+ rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
rtx op1 = GEN_INT (insns[i]);
emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
}
@@ -4575,7 +4579,7 @@ pa_return_addr_rtx (int count, rtx frameaddr)
emit_move_insn (saved_rp,
gen_rtx_MEM (Pmode,
memory_address (Pmode,
- plus_constant (frameaddr,
+ plus_constant (Pmode, frameaddr,
-24))));
emit_label (label);
@@ -6080,7 +6084,7 @@ hppa_builtin_saveregs (void)
? UNITS_PER_WORD : 0);
if (argadj)
- offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
+ offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
else
offset = crtl->args.arg_offset_rtx;
@@ -6090,7 +6094,7 @@ hppa_builtin_saveregs (void)
/* Adjust for varargs/stdarg differences. */
if (argadj)
- offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
+ offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
else
offset = crtl->args.arg_offset_rtx;
@@ -6098,7 +6102,8 @@ hppa_builtin_saveregs (void)
from the incoming arg pointer and growing to larger addresses. */
for (i = 26, off = -64; i >= 19; i--, off += 8)
emit_move_insn (gen_rtx_MEM (word_mode,
- plus_constant (arg_pointer_rtx, off)),
+ plus_constant (Pmode,
+ arg_pointer_rtx, off)),
gen_rtx_REG (word_mode, i));
/* The incoming args pointer points just beyond the flushback area;
@@ -6106,7 +6111,7 @@ hppa_builtin_saveregs (void)
varargs/stdargs we want to make the arg pointer point to the start
of the incoming argument area. */
emit_move_insn (virtual_incoming_args_rtx,
- plus_constant (arg_pointer_rtx, -64));
+ plus_constant (Pmode, arg_pointer_rtx, -64));
/* Now return a pointer to the first anonymous argument. */
return copy_to_reg (expand_binop (Pmode, add_optab,
@@ -6116,7 +6121,7 @@ hppa_builtin_saveregs (void)
/* Store general registers on the stack. */
dest = gen_rtx_MEM (BLKmode,
- plus_constant (crtl->args.internal_arg_pointer,
+ plus_constant (Pmode, crtl->args.internal_arg_pointer,
-16));
set_mem_alias_set (dest, get_varargs_alias_set ());
set_mem_align (dest, BITS_PER_WORD);
@@ -10126,7 +10131,8 @@ pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
cache lines to minimize the number of lines flushed. */
emit_insn (gen_andsi3 (start_addr, r_tramp,
GEN_INT (-MIN_CACHELINE_SIZE)));
- tmp = force_reg (Pmode, plus_constant (r_tramp, TRAMPOLINE_CODE_SIZE-1));
+ tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
+ TRAMPOLINE_CODE_SIZE-1));
emit_insn (gen_andsi3 (end_addr, tmp,
GEN_INT (-MIN_CACHELINE_SIZE)));
emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
@@ -10144,7 +10150,8 @@ pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
/* Create a fat pointer for the trampoline. */
tmp = adjust_address (m_tramp, Pmode, 16);
- emit_move_insn (tmp, force_reg (Pmode, plus_constant (r_tramp, 32)));
+ emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
+ r_tramp, 32)));
tmp = adjust_address (m_tramp, Pmode, 24);
emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
@@ -10152,10 +10159,11 @@ pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
they do not accept integer displacements. We align the
start and end addresses to the beginning of their respective
cache lines to minimize the number of lines flushed. */
- tmp = force_reg (Pmode, plus_constant (r_tramp, 32));
+ tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
emit_insn (gen_anddi3 (start_addr, tmp,
GEN_INT (-MIN_CACHELINE_SIZE)));
- tmp = force_reg (Pmode, plus_constant (tmp, TRAMPOLINE_CODE_SIZE - 1));
+ tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
+ TRAMPOLINE_CODE_SIZE - 1));
emit_insn (gen_anddi3 (end_addr, tmp,
GEN_INT (-MIN_CACHELINE_SIZE)));
emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
@@ -10174,7 +10182,7 @@ static rtx
pa_trampoline_adjust_address (rtx addr)
{
if (!TARGET_64BIT)
- addr = memory_address (Pmode, plus_constant (addr, 46));
+ addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
return addr;
}
diff --git a/gcc/config/pa/pa.md b/gcc/config/pa/pa.md
index a69f2b8a365..d0110e79868 100644
--- a/gcc/config/pa/pa.md
+++ b/gcc/config/pa/pa.md
@@ -6870,7 +6870,7 @@
of the virtual stack variables and the hard frame pointer. */
if (GET_CODE (fp) != REG)
fp = force_reg (Pmode, fp);
- emit_move_insn (hard_frame_pointer_rtx, plus_constant (fp, -8));
+ emit_move_insn (hard_frame_pointer_rtx, plus_constant (Pmode, fp, -8));
emit_stack_restore (SAVE_NONLOCAL, stack);
@@ -8300,9 +8300,9 @@ add,l %2,%3,%3\;bv,n %%r0(%3)"
{
/* The elements of the buffer are, in order: */
rtx fp = gen_rtx_MEM (Pmode, operands[0]);
- rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0],
+ rtx lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0],
POINTER_SIZE / BITS_PER_UNIT));
- rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0],
+ rtx stack = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0],
(POINTER_SIZE * 2) / BITS_PER_UNIT));
rtx pv = gen_rtx_REG (Pmode, 1);
@@ -8314,7 +8314,7 @@ add,l %2,%3,%3\;bv,n %%r0(%3)"
to adjust for the offset between these two values. */
if (GET_CODE (fp) != REG)
fp = force_reg (Pmode, fp);
- emit_move_insn (hard_frame_pointer_rtx, plus_constant (fp, -8));
+ emit_move_insn (hard_frame_pointer_rtx, plus_constant (Pmode, fp, -8));
/* This bit is the same as expand_builtin_longjmp. */
emit_stack_restore (SAVE_NONLOCAL, stack);
diff --git a/gcc/config/pdp11/pdp11.c b/gcc/config/pdp11/pdp11.c
index 42e3af078db..85ea46abfa7 100644
--- a/gcc/config/pdp11/pdp11.c
+++ b/gcc/config/pdp11/pdp11.c
@@ -389,7 +389,7 @@ pdp11_expand_epilogue (void)
for (regno = AC5_REGNUM; regno >= AC0_REGNUM; regno--)
if (pdp11_saved_regno (regno))
{
- x = plus_constant (hard_frame_pointer_rtx, ofs);
+ x = plus_constant (Pmode, hard_frame_pointer_rtx, ofs);
x = gen_frame_mem (DFmode, x);
reg = gen_rtx_REG (DFmode, regno);
@@ -407,7 +407,7 @@ pdp11_expand_epilogue (void)
if (pdp11_saved_regno (regno)
&& (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed))
{
- x = plus_constant (hard_frame_pointer_rtx, ofs);
+ x = plus_constant (Pmode, hard_frame_pointer_rtx, ofs);
x = gen_frame_mem (Pmode, x);
emit_move_insn (gen_rtx_REG (Pmode, regno), x);
ofs += 2;
diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c
index b878a1f1777..57cbd157f41 100644
--- a/gcc/config/picochip/picochip.c
+++ b/gcc/config/picochip/picochip.c
@@ -4696,6 +4696,6 @@ picochip_static_chain (const_tree ARG_UNUSED (fndecl), bool incoming_p)
if (incoming_p)
addr = arg_pointer_rtx;
else
- addr = plus_constant (stack_pointer_rtx, -2 * UNITS_PER_WORD);
+ addr = plus_constant (Pmode, stack_pointer_rtx, -2 * UNITS_PER_WORD);
return gen_frame_mem (Pmode, addr);
}
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index dca434b1945..c3331dce213 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -5594,7 +5594,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
high_int = INTVAL (XEXP (x, 1)) - low_int;
sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
GEN_INT (high_int)), 0);
- return plus_constant (sum, low_int);
+ return plus_constant (Pmode, sum, low_int);
}
else if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
@@ -8951,7 +8951,7 @@ setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
}
cfun->machine->varargs_save_offset = offset;
- save_area = plus_constant (virtual_stack_vars_rtx, offset);
+ save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
}
}
else
@@ -8983,7 +8983,7 @@ setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
}
mem = gen_rtx_MEM (BLKmode,
- plus_constant (save_area,
+ plus_constant (Pmode, save_area,
first_reg_offset * reg_size));
MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
@@ -9021,7 +9021,7 @@ setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
{
mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
? DFmode : SFmode,
- plus_constant (save_area, off));
+ plus_constant (Pmode, save_area, off));
MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
set_mem_align (mem, GET_MODE_ALIGNMENT (
@@ -14832,10 +14832,10 @@ print_operand (FILE *file, rtx x, int code)
we have already done it, we can just use an offset of word. */
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0),
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
UNITS_PER_WORD));
else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0),
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
UNITS_PER_WORD));
else
output_address (XEXP (adjust_address_nv (x, SImode,
@@ -15159,9 +15159,9 @@ print_operand (FILE *file, rtx x, int code)
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
else
output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
if (small_data_operand (x, GET_MODE (x)))
@@ -15209,9 +15209,9 @@ print_operand (FILE *file, rtx x, int code)
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
- output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
else
output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
if (small_data_operand (x, GET_MODE (x)))
@@ -18161,7 +18161,8 @@ rs6000_return_addr (int count, rtx frame)
(Pmode,
memory_address
(Pmode,
- plus_constant (copy_to_reg
+ plus_constant (Pmode,
+ copy_to_reg
(gen_rtx_MEM (Pmode,
memory_address (Pmode, frame))),
RETURN_ADDRESS_OFFSET)));
@@ -18431,7 +18432,8 @@ rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
else if (info->push_p)
sp_offset = info->total_size;
- tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
+ tmp = plus_constant (Pmode, frame_rtx,
+ info->lr_save_offset + sp_offset);
tmp = gen_frame_mem (Pmode, tmp);
emit_move_insn (tmp, operands[0]);
}
@@ -18679,9 +18681,11 @@ rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
it exceeds SIZE. If only one probe is needed, this will not
generate any code. Then probe at FIRST + SIZE. */
for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
- emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -(first + i)));
- emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -(first + size)));
}
/* Otherwise, do the same as above, but in a loop. Note that we must be
@@ -18707,7 +18711,8 @@ rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
/* TEST_ADDR = SP + FIRST. */
emit_insn (gen_rtx_SET (VOIDmode, r12,
- plus_constant (stack_pointer_rtx, -first)));
+ plus_constant (Pmode, stack_pointer_rtx,
+ -first)));
/* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
if (rounded_size > 32768)
@@ -18718,7 +18723,7 @@ rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
}
else
emit_insn (gen_rtx_SET (VOIDmode, r0,
- plus_constant (r12, -rounded_size)));
+ plus_constant (Pmode, r12, -rounded_size)));
/* Step 3: the loop
@@ -18742,7 +18747,7 @@ rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
that SIZE is equal to ROUNDED_SIZE. */
if (size != rounded_size)
- emit_stack_probe (plus_constant (r12, rounded_size - size));
+ emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
}
}
@@ -18956,6 +18961,28 @@ generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
return insn;
}
+static rtx
+gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
+{
+ rtx addr, mem;
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
+ mem = gen_frame_mem (GET_MODE (reg), addr);
+ return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
+}
+
+static rtx
+gen_frame_load (rtx reg, rtx frame_reg, int offset)
+{
+ return gen_frame_set (reg, frame_reg, offset, false);
+}
+
+static rtx
+gen_frame_store (rtx reg, rtx frame_reg, int offset)
+{
+ return gen_frame_set (reg, frame_reg, offset, true);
+}
+
/* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
@@ -19296,27 +19323,14 @@ rs6000_emit_savres_rtx (rs6000_stack_t *info,
= gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
for (i = 0; i < end_reg - start_reg; i++)
- {
- rtx addr, reg, mem;
- reg = gen_rtx_REG (reg_mode, start_reg + i);
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (save_area_offset + reg_size * i));
- mem = gen_frame_mem (reg_mode, addr);
-
- RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode,
- (sel & SAVRES_SAVE) ? mem : reg,
- (sel & SAVRES_SAVE) ? reg : mem);
- }
+ RTVEC_ELT (p, i + offset)
+ = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
+ frame_reg_rtx, save_area_offset + reg_size * i,
+ (sel & SAVRES_SAVE) != 0);
if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
- {
- rtx addr, reg, mem;
- reg = gen_rtx_REG (Pmode, 0);
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (lr_offset));
- mem = gen_frame_mem (Pmode, addr);
- RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode, mem, reg);
- }
+ RTVEC_ELT (p, i + offset)
+ = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
par = gen_rtx_PARALLEL (VOIDmode, p);
@@ -19474,59 +19488,33 @@ rs6000_emit_prologue (void)
/* We do floats first so that the instruction pattern matches
properly. */
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- {
- rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- ? DFmode : SFmode),
- info->first_fp_reg_save + i);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
- + frame_off + 8 * i));
- rtx mem = gen_frame_mem ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- ? DFmode : SFmode), addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
- }
+ RTVEC_ELT (p, j++)
+ = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
+ ? DFmode : SFmode,
+ info->first_fp_reg_save + i),
+ frame_reg_rtx,
+ info->fp_save_offset + frame_off + 8 * i);
for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
- {
- rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->altivec_save_offset
- + frame_off + 16 * i));
- rtx mem = gen_frame_mem (V4SImode, addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
- }
+ RTVEC_ELT (p, j++)
+ = gen_frame_store (gen_rtx_REG (V4SImode,
+ info->first_altivec_reg_save + i),
+ frame_reg_rtx,
+ info->altivec_save_offset + frame_off + 16 * i);
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- {
- rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + frame_off + reg_size * i));
- rtx mem = gen_frame_mem (reg_mode, addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
- }
-
- {
- /* CR register traditionally saved as CR2. */
- rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->cr_save_offset
- + frame_off));
- rtx mem = gen_frame_mem (reg_mode, addr);
+ RTVEC_ELT (p, j++)
+ = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ frame_reg_rtx,
+ info->gp_save_offset + frame_off + reg_size * i);
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
- }
+ /* CR register traditionally saved as CR2. */
+ RTVEC_ELT (p, j++)
+ = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
+ frame_reg_rtx, info->cr_save_offset + frame_off);
/* Explain about use of R0. */
if (info->lr_save_p)
- {
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->lr_save_offset
- + frame_off));
- rtx mem = gen_frame_mem (reg_mode, addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
- }
+ RTVEC_ELT (p, j++)
+ = gen_frame_store (reg0,
+ frame_reg_rtx, info->lr_save_offset + frame_off);
/* Explain what happens to the stack pointer. */
{
rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
@@ -19829,17 +19817,10 @@ rs6000_emit_prologue (void)
int i;
p = rtvec_alloc (32 - info->first_gp_reg_save);
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- {
- rtx addr, reg, mem;
- reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + frame_off
- + reg_size * i));
- mem = gen_frame_mem (reg_mode, addr);
-
- RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
- }
+ RTVEC_ELT (p, i)
+ = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ frame_reg_rtx,
+ info->gp_save_offset + frame_off + reg_size * i);
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
NULL_RTX, NULL_RTX);
@@ -19855,22 +19836,37 @@ rs6000_emit_prologue (void)
sp_off - frame_off);
}
- /* ??? There's no need to emit actual instructions here, but it's the
- easiest way to get the frame unwind information emitted. */
if (crtl->calls_eh_return)
{
- unsigned int i, regno;
+ unsigned int i;
+ rtvec p;
for (i = 0; ; ++i)
{
- regno = EH_RETURN_DATA_REGNO (i);
+ unsigned int regno = EH_RETURN_DATA_REGNO (i);
if (regno == INVALID_REGNUM)
break;
+ }
- emit_frame_save (frame_reg_rtx, reg_mode, regno,
- info->ehrd_offset + frame_off + reg_size * (int) i,
- sp_off - frame_off);
+ p = rtvec_alloc (i);
+
+ for (i = 0; ; ++i)
+ {
+ unsigned int regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ insn
+ = gen_frame_store (gen_rtx_REG (reg_mode, regno),
+ sp_reg_rtx,
+ info->ehrd_offset + sp_off + reg_size * (int) i);
+ RTVEC_ELT (p, i) = insn;
+ RTX_FRAME_RELATED_P (insn) = 1;
}
+
+ insn = emit_insn (gen_blockage ());
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
}
/* In AIX ABI we need to make sure r2 is really saved. */
@@ -20517,13 +20513,9 @@ rs6000_emit_epilogue (int sibcall)
{
/* CR register traditionally saved as CR2. */
- rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->cr_save_offset));
- rtx mem = gen_frame_mem (reg_mode, addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
-
+ rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
+ RTVEC_ELT (p, j++)
+ = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
if (flag_shrink_wrap)
{
cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
@@ -20536,24 +20528,18 @@ rs6000_emit_epilogue (int sibcall)
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
{
rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + reg_size * i));
- rtx mem = gen_frame_mem (reg_mode, addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ RTVEC_ELT (p, j++)
+ = gen_frame_load (reg,
+ frame_reg_rtx, info->gp_save_offset + reg_size * i);
if (flag_shrink_wrap)
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
{
rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->altivec_save_offset
- + 16 * i));
- rtx mem = gen_frame_mem (V4SImode, addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ RTVEC_ELT (p, j++)
+ = gen_frame_load (reg,
+ frame_reg_rtx, info->altivec_save_offset + 16 * i);
if (flag_shrink_wrap)
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
@@ -20562,13 +20548,8 @@ rs6000_emit_epilogue (int sibcall)
rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
? DFmode : SFmode),
info->first_fp_reg_save + i);
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
- + 8 * i));
- rtx mem = gen_frame_mem ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- ? DFmode : SFmode), addr);
-
- RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ RTVEC_ELT (p, j++)
+ = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
if (flag_shrink_wrap)
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
@@ -21071,16 +21052,10 @@ rs6000_emit_epilogue (int sibcall)
rtvec p;
p = rtvec_alloc (32 - info->first_gp_reg_save);
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- {
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + frame_off
- + reg_size * i));
- rtx mem = gen_frame_mem (reg_mode, addr);
- rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
-
- RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, reg, mem);
- }
+ RTVEC_ELT (p, i)
+ = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ frame_reg_rtx,
+ info->gp_save_offset + frame_off + reg_size * i);
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
}
else
@@ -21111,7 +21086,7 @@ rs6000_emit_epilogue (int sibcall)
{
insn = get_last_insn ();
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (frame_reg_rtx, frame_off));
+ plus_constant (Pmode, frame_reg_rtx, frame_off));
RTX_FRAME_RELATED_P (insn) = 1;
}
@@ -21272,14 +21247,10 @@ rs6000_emit_epilogue (int sibcall)
? 1 : 11));
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
{
- rtx addr, mem, reg;
-
- addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
- GEN_INT (info->fp_save_offset + 8 * i));
- mem = gen_frame_mem (DFmode, addr);
- reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
- RTVEC_ELT (p, i + 4) = gen_rtx_SET (VOIDmode, reg, mem);
+ RTVEC_ELT (p, i + 4)
+ = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
if (flag_shrink_wrap)
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
cfa_restores);
@@ -25176,7 +25147,7 @@ rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
if (GET_CODE (offset) == CONST_INT)
{
if (SMALL_INT (offset))
- return plus_constant (base, INTVAL (offset));
+ return plus_constant (Pmode, base, INTVAL (offset));
else if (! reload_in_progress && ! reload_completed)
offset = force_reg (Pmode, offset);
else
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 561c623d44f..c69a209fc8a 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -1394,7 +1394,7 @@ extern enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
/* MEM representing address to save the TOC register */
#define RS6000_SAVE_TOC gen_rtx_MEM (Pmode, \
- plus_constant (stack_pointer_rtx, \
+ plus_constant (Pmode, stack_pointer_rtx, \
(TARGET_32BIT ? 20 : 40)))
/* Align an address */
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 3d271695b94..78e56030b04 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -2550,7 +2550,18 @@
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
- addr2 = gen_rtx_PLUS (Pmode, op2, XEXP (addr1, 1));
+ if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
+ addr2 = op2;
+ }
+ else
+ addr2 = gen_rtx_PLUS (Pmode, op2, XEXP (addr1, 1));
+ }
+ else if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
+ addr2 = op2;
}
else
{
@@ -2600,7 +2611,18 @@
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
- addr2 = gen_rtx_PLUS (Pmode, op2, XEXP (addr1, 1));
+ if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
+ addr2 = op2;
+ }
+ else
+ addr2 = gen_rtx_PLUS (Pmode, op2, XEXP (addr1, 1));
+ }
+ else if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
+ addr2 = op2;
}
else
{
@@ -2681,7 +2703,18 @@
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
- addr2 = gen_rtx_PLUS (SImode, op2, XEXP (addr1, 1));
+ if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
+ addr2 = op2;
+ }
+ else
+ addr2 = gen_rtx_PLUS (SImode, op2, XEXP (addr1, 1));
+ }
+ else if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
+ addr2 = op2;
}
else
{
@@ -2726,7 +2759,18 @@
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
- addr2 = gen_rtx_PLUS (SImode, op2, XEXP (addr1, 1));
+ if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
+ addr2 = op2;
+ }
+ else
+ addr2 = gen_rtx_PLUS (SImode, op2, XEXP (addr1, 1));
+ }
+ else if (TARGET_AVOID_XFORM)
+ {
+ emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
+ addr2 = op2;
}
else
{
diff --git a/gcc/config/rx/rx.c b/gcc/config/rx/rx.c
index eb360726005..00b541166bd 100644
--- a/gcc/config/rx/rx.c
+++ b/gcc/config/rx/rx.c
@@ -1779,7 +1779,7 @@ gen_rx_rtsd_vector (unsigned int adjust, unsigned int low, unsigned int high)
XVECEXP (vector, 0, 0) =
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, adjust));
+ plus_constant (Pmode, stack_pointer_rtx, adjust));
for (i = 0; i < count - 2; i++)
XVECEXP (vector, 0, i + 1) =
@@ -1787,7 +1787,7 @@ gen_rx_rtsd_vector (unsigned int adjust, unsigned int low, unsigned int high)
gen_rtx_REG (SImode, low + i),
gen_rtx_MEM (SImode,
i == 0 ? stack_pointer_rtx
- : plus_constant (stack_pointer_rtx,
+ : plus_constant (Pmode, stack_pointer_rtx,
i * UNITS_PER_WORD)));
XVECEXP (vector, 0, count - 1) = ret_rtx;
@@ -1808,7 +1808,7 @@ gen_rx_popm_vector (unsigned int low, unsigned int high)
XVECEXP (vector, 0, 0) =
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
(count - 1) * UNITS_PER_WORD));
for (i = 0; i < count - 1; i++)
@@ -1817,7 +1817,7 @@ gen_rx_popm_vector (unsigned int low, unsigned int high)
gen_rtx_REG (SImode, low + i),
gen_rtx_MEM (SImode,
i == 0 ? stack_pointer_rtx
- : plus_constant (stack_pointer_rtx,
+ : plus_constant (Pmode, stack_pointer_rtx,
i * UNITS_PER_WORD)));
return vector;
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index 5592674e857..b338cd96136 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -1994,7 +1994,7 @@ s390_decompose_address (rtx addr, struct s390_address *out)
if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
return false;
- orig_disp = plus_constant (orig_disp, offset);
+ orig_disp = plus_constant (Pmode, orig_disp, offset);
}
}
@@ -3564,7 +3564,7 @@ legitimize_pic_address (rtx orig, rtx reg)
new_rtx = legitimize_pic_address (XEXP (addr, 1),
base == reg ? NULL_RTX : reg);
if (GET_CODE (new_rtx) == CONST_INT)
- new_rtx = plus_constant (base, INTVAL (new_rtx));
+ new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
else
{
if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
@@ -3807,7 +3807,8 @@ legitimize_tls_address (rtx addr, rtx reg)
new_rtx = gen_rtx_CONST (Pmode, new_rtx);
new_rtx = legitimize_tls_address (new_rtx, reg);
- new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
+ new_rtx = plus_constant (Pmode, new_rtx,
+ INTVAL (XEXP (XEXP (addr, 0), 1)));
new_rtx = force_operand (new_rtx, 0);
}
@@ -4941,7 +4942,7 @@ s390_delegitimize_address (rtx orig_x)
if (GET_CODE (y) == UNSPEC
&& (XINT (y, 1) == UNSPEC_GOTOFF
|| XINT (y, 1) == UNSPEC_PLTOFF))
- return plus_constant (XVECEXP (y, 0, 0), offset);
+ return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
}
if (GET_CODE (x) != MEM)
@@ -5328,7 +5329,8 @@ print_operand (FILE *file, rtx x, int code)
if (GET_CODE (x) == REG)
x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
else if (GET_CODE (x) == MEM)
- x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
+ x = change_address (x, VOIDmode,
+ plus_constant (Pmode, XEXP (x, 0), 4));
else
output_operand_lossage ("register or memory expression expected "
"for 'N' output modifier");
@@ -5338,7 +5340,8 @@ print_operand (FILE *file, rtx x, int code)
if (GET_CODE (x) == REG)
x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
else if (GET_CODE (x) == MEM)
- x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
+ x = change_address (x, VOIDmode,
+ plus_constant (Pmode, XEXP (x, 0), 8));
else
output_operand_lossage ("register or memory expression expected "
"for 'M' output modifier");
@@ -5644,7 +5647,7 @@ annotate_constant_pool_refs (rtx *x)
rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
UNSPEC_LTREF);
- *x = replace_equiv_address (*x, plus_constant (addr, off));
+ *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
return;
}
}
@@ -5677,7 +5680,7 @@ annotate_constant_pool_refs (rtx *x)
rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
UNSPEC_LTREF);
- SET_SRC (*x) = plus_constant (addr, off);
+ SET_SRC (*x) = plus_constant (Pmode, addr, off);
return;
}
}
@@ -5874,7 +5877,7 @@ replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
&& XVECEXP (XEXP (*x, 0), 0, 0) == ref)
{
rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
- *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
+ *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
return;
}
@@ -7033,7 +7036,7 @@ s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
else
offset = RETURN_REGNUM * UNITS_PER_LONG;
- addr = plus_constant (frame, offset);
+ addr = plus_constant (Pmode, frame, offset);
addr = memory_address (Pmode, addr);
return gen_rtx_MEM (Pmode, addr);
}
@@ -7049,7 +7052,7 @@ s390_back_chain_rtx (void)
gcc_assert (TARGET_BACKCHAIN);
if (TARGET_PACKED_STACK)
- chain = plus_constant (stack_pointer_rtx,
+ chain = plus_constant (Pmode, stack_pointer_rtx,
STACK_POINTER_OFFSET - UNITS_PER_LONG);
else
chain = stack_pointer_rtx;
@@ -7711,7 +7714,7 @@ static rtx
save_fpr (rtx base, int offset, int regnum)
{
rtx addr;
- addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
+ addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
set_mem_alias_set (addr, get_varargs_alias_set ());
@@ -7728,7 +7731,7 @@ static rtx
restore_fpr (rtx base, int offset, int regnum)
{
rtx addr;
- addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
+ addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
set_mem_alias_set (addr, get_frame_alias_set ());
return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
@@ -7759,7 +7762,7 @@ save_gprs (rtx base, int offset, int first, int last)
rtx addr, insn, note;
int i;
- addr = plus_constant (base, offset);
+ addr = plus_constant (Pmode, base, offset);
addr = gen_rtx_MEM (Pmode, addr);
set_mem_alias_set (addr, get_frame_alias_set ());
@@ -7826,7 +7829,8 @@ save_gprs (rtx base, int offset, int first, int last)
if (start > last)
return insn;
- addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
+ addr = plus_constant (Pmode, base,
+ offset + (start - first) * UNITS_PER_LONG);
note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
gen_rtx_REG (Pmode, start),
GEN_INT (last - start + 1));
@@ -7855,7 +7859,7 @@ restore_gprs (rtx base, int offset, int first, int last)
{
rtx addr, insn;
- addr = plus_constant (base, offset);
+ addr = plus_constant (Pmode, base, offset);
addr = gen_rtx_MEM (Pmode, addr);
set_mem_alias_set (addr, get_frame_alias_set ());
@@ -8158,7 +8162,7 @@ s390_emit_prologue (void)
{
if (cfun_frame_layout.backchain_offset)
addr = gen_rtx_MEM (Pmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
cfun_frame_layout.backchain_offset));
else
addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
@@ -8193,7 +8197,7 @@ s390_emit_prologue (void)
for (i = 24; i <= next_fpr; i++)
if (cfun_fpr_bit_p (i - 16))
{
- rtx addr = plus_constant (stack_pointer_rtx,
+ rtx addr = plus_constant (Pmode, stack_pointer_rtx,
cfun_frame_layout.frame_size
+ cfun_frame_layout.f8_offset
+ offset);
@@ -8375,7 +8379,7 @@ s390_emit_epilogue (bool sibcall)
{
if (global_not_special_regno_p (i))
{
- addr = plus_constant (frame_pointer,
+ addr = plus_constant (Pmode, frame_pointer,
offset + cfun_frame_layout.gprs_offset
+ (i - cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_LONG);
@@ -8403,7 +8407,7 @@ s390_emit_epilogue (bool sibcall)
return_regnum = 4;
return_reg = gen_rtx_REG (Pmode, return_regnum);
- addr = plus_constant (frame_pointer,
+ addr = plus_constant (Pmode, frame_pointer,
offset + cfun_frame_layout.gprs_offset
+ (RETURN_REGNUM
- cfun_frame_layout.first_save_gpr_slot)
@@ -8424,7 +8428,8 @@ s390_emit_epilogue (bool sibcall)
insn = emit_insn (insn);
REG_NOTES (insn) = cfa_restores;
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
+ plus_constant (Pmode, stack_pointer_rtx,
+ STACK_POINTER_OFFSET));
RTX_FRAME_RELATED_P (insn) = 1;
}
@@ -9285,7 +9290,7 @@ s390_function_profiler (FILE *file, int labelno)
op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
- op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
+ op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
op[2] = gen_rtx_REG (Pmode, 1);
op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
diff --git a/gcc/config/s390/s390.h b/gcc/config/s390/s390.h
index edc6399789e..99c09e8860e 100644
--- a/gcc/config/s390/s390.h
+++ b/gcc/config/s390/s390.h
@@ -539,7 +539,7 @@ extern const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER];
/* Defining this macro makes __builtin_frame_address(0) and
__builtin_return_address(0) work with -fomit-frame-pointer. */
#define INITIAL_FRAME_ADDRESS_RTX \
- (plus_constant (arg_pointer_rtx, -STACK_POINTER_OFFSET))
+ (plus_constant (Pmode, arg_pointer_rtx, -STACK_POINTER_OFFSET))
/* The return address of the current frame is retrieved
from the initial value of register RETURN_REGNUM.
@@ -547,7 +547,8 @@ extern const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER];
the corresponding RETURN_REGNUM register was saved. */
#define DYNAMIC_CHAIN_ADDRESS(FRAME) \
(TARGET_PACKED_STACK ? \
- plus_constant ((FRAME), STACK_POINTER_OFFSET - UNITS_PER_LONG) : (FRAME))
+ plus_constant (Pmode, (FRAME), \
+ STACK_POINTER_OFFSET - UNITS_PER_LONG) : (FRAME))
/* For -mpacked-stack this adds 160 - 8 (96 - 4) to the output of
builtin_frame_address. Otherwise arg pointer -
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index a875eec7b9f..a467d4a10ec 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -2353,7 +2353,8 @@
XVECEXP (operands[3], 0, i)
= gen_rtx_SET (VOIDmode, gen_rtx_REG (mode, regno + i),
change_address (operands[1], mode,
- plus_constant (from, off + i * GET_MODE_SIZE (mode))));
+ plus_constant (Pmode, from,
+ off + i * GET_MODE_SIZE (mode))));
})
(define_insn "*load_multiple_di"
@@ -2443,7 +2444,8 @@
XVECEXP (operands[3], 0, i)
= gen_rtx_SET (VOIDmode,
change_address (operands[0], mode,
- plus_constant (to, off + i * GET_MODE_SIZE (mode))),
+ plus_constant (Pmode, to,
+ off + i * GET_MODE_SIZE (mode))),
gen_rtx_REG (mode, regno + i));
})
@@ -2658,9 +2660,8 @@
(match_operand:BLK 1 "memory_operand" "Q,Q,Q,Q"))
(use (match_operand 2 "nonmemory_operand" "n,a,a,a"))
(use (match_operand 3 "immediate_operand" "X,R,X,X"))
- (clobber (match_scratch 4 "=X,X,X,&a"))]
- "(GET_MODE (operands[2]) == Pmode || GET_MODE (operands[2]) == VOIDmode)
- && GET_MODE (operands[4]) == Pmode"
+ (clobber (match_scratch:P 4 "=X,X,X,&a"))]
+ "(GET_MODE (operands[2]) == Pmode || GET_MODE (operands[2]) == VOIDmode)"
"#"
[(set_attr "type" "cs")
(set_attr "cpu_facility" "*,*,z10,*")])
@@ -2867,10 +2868,9 @@
(const_int 0))
(use (match_operand 1 "nonmemory_operand" "n,a,a,a"))
(use (match_operand 2 "immediate_operand" "X,R,X,X"))
- (clobber (match_scratch 3 "=X,X,X,&a"))
+ (clobber (match_scratch:P 3 "=X,X,X,&a"))
(clobber (reg:CC CC_REGNUM))]
- "(GET_MODE (operands[1]) == Pmode || GET_MODE (operands[1]) == VOIDmode)
- && GET_MODE (operands[3]) == Pmode"
+ "(GET_MODE (operands[1]) == Pmode || GET_MODE (operands[1]) == VOIDmode)"
"#"
[(set_attr "type" "cs")
(set_attr "cpu_facility" "*,*,z10,*")])
@@ -3040,9 +3040,8 @@
(match_operand:BLK 1 "memory_operand" "Q,Q,Q,Q")))
(use (match_operand 2 "nonmemory_operand" "n,a,a,a"))
(use (match_operand 3 "immediate_operand" "X,R,X,X"))
- (clobber (match_scratch 4 "=X,X,X,&a"))]
- "(GET_MODE (operands[2]) == Pmode || GET_MODE (operands[2]) == VOIDmode)
- && GET_MODE (operands[4]) == Pmode"
+ (clobber (match_scratch:P 4 "=X,X,X,&a"))]
+ "(GET_MODE (operands[2]) == Pmode || GET_MODE (operands[2]) == VOIDmode)"
"#"
[(set_attr "type" "cs")
(set_attr "cpu_facility" "*,*,z10,*")])
diff --git a/gcc/config/score/score.c b/gcc/config/score/score.c
index 28a0265978b..0af0fd50b82 100644
--- a/gcc/config/score/score.c
+++ b/gcc/config/score/score.c
@@ -444,7 +444,7 @@ score_add_offset (rtx reg, HOST_WIDE_INT offset)
offset &= 0x3fff;
}
- return plus_constant (reg, offset);
+ return plus_constant (GET_MODE (reg), reg, offset);
}
/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
@@ -1546,8 +1546,8 @@ score_prologue (void)
REG_NOTES (insn) =
alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
- -size)),
+ plus_constant (Pmode, stack_pointer_rtx,
+ -size)),
REG_NOTES (insn));
}
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index 7dbbe1c5728..08ee5b436f5 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -1499,7 +1499,7 @@ expand_block_move (rtx *operands)
set_mem_size (from, 4);
emit_insn (gen_movua (temp, from));
- emit_move_insn (src_addr, plus_constant (src_addr, 4));
+ emit_move_insn (src_addr, plus_constant (Pmode, src_addr, 4));
emit_move_insn (to, temp);
copied += 4;
}
@@ -2999,6 +2999,27 @@ sh_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
{
switch (code)
{
+ /* The lower-subreg pass decides whether to split multi-word regs
+ into individual regs by looking at the cost for a SET of certain
+ modes with the following patterns:
+ (set (reg) (reg))
+ (set (reg) (const_int 0))
+ On machines that support vector-move operations a multi-word move
+ is the same cost as individual reg move. On SH there is no
+ vector-move, so we have to provide the correct cost in the number
+ of move insns to load/store the reg of the mode in question. */
+ case SET:
+ if (register_operand (SET_DEST (x), VOIDmode)
+ && (register_operand (SET_SRC (x), VOIDmode)
+ || satisfies_constraint_Z (SET_SRC (x))))
+ {
+ const enum machine_mode mode = GET_MODE (SET_DEST (x));
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode)
+ / mov_insn_size (mode, TARGET_SH2A));
+ return true;
+ }
+ return false;
+
case CONST_INT:
if (TARGET_SHMEDIA)
{
@@ -6584,12 +6605,13 @@ push_regs (HARD_REG_SET *mask, int interrupt_handler)
x = frame_insn (x);
for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
{
- mem = gen_rtx_MEM (SImode, plus_constant (sp_reg, i * 4));
+ mem = gen_rtx_MEM (SImode, plus_constant (Pmode, sp_reg, i * 4));
reg = gen_rtx_REG (SImode, i);
add_reg_note (x, REG_CFA_OFFSET, gen_rtx_SET (SImode, mem, reg));
}
- set = gen_rtx_SET (SImode, sp_reg, plus_constant (sp_reg, - 32));
+ set = gen_rtx_SET (SImode, sp_reg,
+ plus_constant (Pmode, sp_reg, - 32));
add_reg_note (x, REG_CFA_ADJUST_CFA, set);
emit_insn (gen_blockage ());
}
@@ -7817,7 +7839,8 @@ sh_builtin_saveregs (void)
rtx addr, mask;
regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
- addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
+ addr = copy_to_mode_reg (Pmode, plus_constant (Pmode,
+ XEXP (regbuf, 0), 4));
mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
emit_insn (gen_andsi3 (addr, addr, mask));
regbuf = change_address (regbuf, BLKmode, addr);
@@ -7849,8 +7872,8 @@ sh_builtin_saveregs (void)
We emit the moves in reverse order so that we can use predecrement. */
fpregs = copy_to_mode_reg (Pmode,
- plus_constant (XEXP (regbuf, 0),
- n_floatregs * UNITS_PER_WORD));
+ plus_constant (Pmode, XEXP (regbuf, 0),
+ n_floatregs * UNITS_PER_WORD));
if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
{
rtx mem;
@@ -11777,7 +11800,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
error ("need a call-clobbered target register");
}
- this_value = plus_constant (this_rtx, delta);
+ this_value = plus_constant (Pmode, this_rtx, delta);
if (vcall_offset
&& (simple_add || scratch0 != scratch1)
&& strict_memory_address_p (ptr_mode, this_value))
@@ -11803,7 +11826,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
if (!did_load)
emit_load_ptr (scratch0, this_rtx);
- offset_addr = plus_constant (scratch0, vcall_offset);
+ offset_addr = plus_constant (Pmode, scratch0, vcall_offset);
if (strict_memory_address_p (ptr_mode, offset_addr))
; /* Do nothing. */
else if (! TARGET_SH5 && scratch0 != scratch1)
diff --git a/gcc/config/sh/sh.md b/gcc/config/sh/sh.md
index 45a5edf6ad7..7167b920641 100644
--- a/gcc/config/sh/sh.md
+++ b/gcc/config/sh/sh.md
@@ -6187,7 +6187,7 @@ label:
if (TARGET_SH5 && true_regnum (operands[1]) < 16)
{
emit_move_insn (stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -8));
+ plus_constant (Pmode, stack_pointer_rtx, -8));
tos = gen_tmp_stack_mem (DFmode, stack_pointer_rtx);
}
else
@@ -6203,7 +6203,8 @@ label:
gen_rtx_POST_INC (Pmode, stack_pointer_rtx));
insn = emit_insn (gen_movdf_i4 (operands[0], tos, operands[2]));
if (TARGET_SH5 && true_regnum (operands[0]) < 16)
- emit_move_insn (stack_pointer_rtx, plus_constant (stack_pointer_rtx, 8));
+ emit_move_insn (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, 8));
else
add_reg_note (insn, REG_INC, stack_pointer_rtx);
DONE;
@@ -6413,7 +6414,7 @@ label:
case PLUS:
emit_insn (gen_movsf_ie (reg0, operands[1], operands[2]));
operands[1] = copy_rtx (operands[1]);
- XEXP (operands[1], 0) = plus_constant (addr, 4);
+ XEXP (operands[1], 0) = plus_constant (Pmode, addr, 4);
emit_insn (gen_movsf_ie (reg1, operands[1], operands[2]));
break;
@@ -6480,7 +6481,7 @@ label:
emit_insn (gen_movsf_ie (operands[0], reg0, operands[2]));
operands[0] = copy_rtx (operands[0]);
- XEXP (operands[0], 0) = plus_constant (addr, 4);
+ XEXP (operands[0], 0) = plus_constant (Pmode, addr, 4);
emit_insn (gen_movsf_ie (operands[0], reg1, operands[2]));
break;
@@ -7096,6 +7097,29 @@ label:
}
[(set_attr "type" "cbranch")])
+;; The *branch_true patterns help combine when trying to invert conditions.
+(define_insn "*branch_true"
+ [(set (pc) (if_then_else (ne (zero_extend:SI (subreg:QI (reg:SI T_REG) 0))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_SH1 && TARGET_LITTLE_ENDIAN"
+{
+ return output_branch (1, insn, operands);
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*branch_true"
+ [(set (pc) (if_then_else (ne (zero_extend:SI (subreg:QI (reg:SI T_REG) 3))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_SH1 && ! TARGET_LITTLE_ENDIAN"
+{
+ return output_branch (1, insn, operands);
+}
+ [(set_attr "type" "cbranch")])
+
(define_insn "branch_false"
[(set (pc) (if_then_else (eq (reg:SI T_REG) (const_int 0))
(label_ref (match_operand 0 "" ""))
@@ -7106,6 +7130,29 @@ label:
}
[(set_attr "type" "cbranch")])
+;; The *branch_false patterns help combine when trying to invert conditions.
+(define_insn "*branch_false"
+ [(set (pc) (if_then_else (eq (zero_extend:SI (subreg:QI (reg:SI T_REG) 0))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_SH1 && TARGET_LITTLE_ENDIAN"
+{
+ return output_branch (0, insn, operands);
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*branch_false"
+ [(set (pc) (if_then_else (eq (zero_extend:SI (subreg:QI (reg:SI T_REG) 3))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_SH1 && ! TARGET_LITTLE_ENDIAN"
+{
+ return output_branch (0, insn, operands);
+}
+ [(set_attr "type" "cbranch")])
+
;; Patterns to prevent reorg from re-combining a condbranch with a branch
;; which destination is too far away.
;; The const_int_operand is distinct for each branch target; it avoids
@@ -9720,7 +9767,7 @@ label:
""
[(const_int 0)])
-;; The *movtt patterns improve code at -O1.
+;; The *movtt patterns eliminate redundant T bit to T bit moves / tests.
(define_insn_and_split "*movtt"
[(set (reg:SI T_REG)
(eq:SI (zero_extend:SI (subreg:QI (reg:SI T_REG) 3))
@@ -11244,7 +11291,8 @@ label:
emit_insn (gen_movsi (shift_reg, operands[3]));
qi_val = gen_rtx_SUBREG (QImode, shift_reg, 3);
}
- addr_target = copy_addr_to_reg (plus_constant (orig_address, size - 1));
+ addr_target = copy_addr_to_reg (plus_constant (Pmode,
+ orig_address, size - 1));
operands[0] = replace_equiv_address (operands[0], addr_target);
emit_insn (gen_movqi (operands[0], qi_val));
diff --git a/gcc/config/sol2-protos.h b/gcc/config/sol2-protos.h
index 9f31ce34aff..3d24e9271d8 100644
--- a/gcc/config/sol2-protos.h
+++ b/gcc/config/sol2-protos.h
@@ -1,6 +1,6 @@
/* Operating system specific prototypes to be used when targeting GCC for any
Solaris 2 system.
- Copyright 2004, 2007, 2010, 2011 Free Software Foundation, Inc.
+ Copyright 2004, 2007, 2010, 2011, 2012 Free Software Foundation, Inc.
This file is part of GCC.
@@ -24,6 +24,7 @@ extern void solaris_elf_asm_comdat_section (const char *, unsigned int, tree);
extern void solaris_file_end (void);
extern void solaris_insert_attributes (tree, tree *);
extern void solaris_output_init_fini (FILE *, tree);
+extern void solaris_override_options (void);
/* In sol2-c.c. */
extern void solaris_register_pragmas (void);
diff --git a/gcc/config/sol2.c b/gcc/config/sol2.c
index b1f0196e7b5..b15b895c70f 100644
--- a/gcc/config/sol2.c
+++ b/gcc/config/sol2.c
@@ -1,5 +1,6 @@
/* General Solaris system support.
- Copyright (C) 2004, 2005 , 2007, 2010, 2011 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005 , 2007, 2010, 2011, 2012
+ Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC.
This file is part of GCC.
@@ -282,3 +283,12 @@ solaris_file_end (void)
htab_traverse (solaris_comdat_htab, solaris_define_comdat_signature, NULL);
}
+
+void
+solaris_override_options (void)
+{
+ /* Don't emit DWARF3/4 unless specifically selected. Solaris ld cannot
+ handle CIE version 3 in .eh_frame. */
+ if (!global_options_set.x_dwarf_version)
+ dwarf_version = 2;
+}
diff --git a/gcc/config/sol2.h b/gcc/config/sol2.h
index 670dbaa782f..4cbb3081b3a 100644
--- a/gcc/config/sol2.h
+++ b/gcc/config/sol2.h
@@ -99,6 +99,12 @@ along with GCC; see the file COPYING3. If not see
TARGET_SUB_OS_CPP_BUILTINS(); \
} while (0)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+ do { \
+ solaris_override_options (); \
+ } while (0)
+
+
/* It's safe to pass -s always, even if -g is not used. Those options are
handled by both Sun as and GNU as. */
#define ASM_SPEC_BASE \
diff --git a/gcc/config/sparc/linux.h b/gcc/config/sparc/linux.h
index 60dc86976e3..ac6c537ed41 100644
--- a/gcc/config/sparc/linux.h
+++ b/gcc/config/sparc/linux.h
@@ -87,7 +87,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
#undef LINK_SPEC
-#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+#define LINK_SPEC "-m elf32_sparc %{shared:-shared} \
%{!mno-relax:%{!r:-relax}} \
%{!shared: \
%{!static: \
diff --git a/gcc/config/sparc/linux64.h b/gcc/config/sparc/linux64.h
index 14966b97fc3..f932e98a342 100644
--- a/gcc/config/sparc/linux64.h
+++ b/gcc/config/sparc/linux64.h
@@ -105,7 +105,7 @@ along with GCC; see the file COPYING3. If not see
{ "link_arch_default", LINK_ARCH_DEFAULT_SPEC }, \
{ "link_arch", LINK_ARCH_SPEC },
-#define LINK_ARCH32_SPEC "-m elf32_sparc -Y P,%R/usr/lib %{shared:-shared} \
+#define LINK_ARCH32_SPEC "-m elf32_sparc %{shared:-shared} \
%{!shared: \
%{!static: \
%{rdynamic:-export-dynamic} \
@@ -113,7 +113,7 @@ along with GCC; see the file COPYING3. If not see
%{static:-static}} \
"
-#define LINK_ARCH64_SPEC "-m elf64_sparc -Y P,%R/usr/lib64 %{shared:-shared} \
+#define LINK_ARCH64_SPEC "-m elf64_sparc %{shared:-shared} \
%{!shared: \
%{!static: \
%{rdynamic:-export-dynamic} \
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index 11bd1fe90ff..4cb381e60e9 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -1,7 +1,7 @@
/* Subroutines for insn-output.c for SPARC.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
- 2011
+ 2011, 2012
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
@@ -2724,7 +2724,12 @@ emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
if (GET_CODE (this_arg) == MEM
&& ! force_stack_temp)
- this_arg = XEXP (this_arg, 0);
+ {
+ tree expr = MEM_EXPR (this_arg);
+ if (expr)
+ mark_addressable (expr);
+ this_arg = XEXP (this_arg, 0);
+ }
else if (CONSTANT_P (this_arg)
&& ! force_stack_temp)
{
@@ -3861,7 +3866,7 @@ sparc_legitimize_pic_address (rtx orig, rtx reg)
if (GET_CODE (offset) == CONST_INT)
{
if (SMALL_INT (offset))
- return plus_constant (base, INTVAL (offset));
+ return plus_constant (Pmode, base, INTVAL (offset));
else if (can_create_pseudo_p ())
offset = force_reg (Pmode, offset);
else
@@ -4600,7 +4605,7 @@ sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
emit_move_insn (g1, GEN_INT (first));
emit_insn (gen_rtx_SET (VOIDmode, g1,
gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
- emit_stack_probe (plus_constant (g1, -size));
+ emit_stack_probe (plus_constant (Pmode, g1, -size));
}
/* The run-time loop is made up of 10 insns in the generic case while the
@@ -4620,11 +4625,12 @@ sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
{
emit_insn (gen_rtx_SET (VOIDmode, g1,
- plus_constant (g1, -PROBE_INTERVAL)));
+ plus_constant (Pmode, g1, -PROBE_INTERVAL)));
emit_stack_probe (g1);
}
- emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
+ emit_stack_probe (plus_constant (Pmode, g1,
+ (i - PROBE_INTERVAL) - size));
}
/* Otherwise, do the same as above, but in a loop. Note that we must be
@@ -4677,7 +4683,7 @@ sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
that SIZE is equal to ROUNDED_SIZE. */
if (size != rounded_size)
- emit_stack_probe (plus_constant (g4, rounded_size - size));
+ emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
}
/* Make sure nothing is scheduled before we are done. */
@@ -4754,7 +4760,8 @@ emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
{
if (save_p (i, leaf_function))
{
- mem = gen_frame_mem (DImode, plus_constant (base, offset));
+ mem = gen_frame_mem (DImode, plus_constant (Pmode,
+ base, offset));
if (action_true == SORR_SAVE)
{
insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
@@ -4779,7 +4786,7 @@ emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
if (fp_offset >= 0)
{
- mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
+ mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
emit_move_insn (hard_frame_pointer_rtx, mem);
}
}
@@ -4815,7 +4822,7 @@ emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
continue;
}
- mem = gen_frame_mem (mode, plus_constant (base, offset));
+ mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
if (action_true == SORR_SAVE)
{
insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
@@ -4823,12 +4830,14 @@ emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
if (mode == DImode)
{
rtx set1, set2;
- mem = gen_frame_mem (SImode, plus_constant (base, offset));
+ mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
+ offset));
set1 = gen_rtx_SET (VOIDmode, mem,
gen_rtx_REG (SImode, regno));
RTX_FRAME_RELATED_P (set1) = 1;
mem
- = gen_frame_mem (SImode, plus_constant (base, offset + 4));
+ = gen_frame_mem (SImode, plus_constant (Pmode, base,
+ offset + 4));
set2 = gen_rtx_SET (VOIDmode, mem,
gen_rtx_REG (SImode, regno + 1));
RTX_FRAME_RELATED_P (set2) = 1;
@@ -4918,7 +4927,7 @@ emit_window_save (rtx increment)
/* The CFA is %fp, the hard frame pointer. */
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (hard_frame_pointer_rtx,
+ plus_constant (Pmode, hard_frame_pointer_rtx,
INCOMING_FRAME_SP_OFFSET));
return insn;
@@ -5140,7 +5149,7 @@ sparc_flat_expand_prologue (void)
add_reg_note (insn, REG_CFA_ADJUST_CFA,
gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
size)));
}
@@ -6731,10 +6740,10 @@ sparc_struct_value_rtx (tree fndecl, int incoming)
rtx mem;
if (incoming)
- mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
+ mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
STRUCT_VALUE_OFFSET));
else
- mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
+ mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
STRUCT_VALUE_OFFSET));
/* Only follow the SPARC ABI for fixed-size structure returns.
@@ -6765,7 +6774,8 @@ sparc_struct_value_rtx (tree fndecl, int incoming)
it's an unimp instruction (the most significant 10 bits
will be zero). */
emit_move_insn (scratch, gen_rtx_MEM (SImode,
- plus_constant (ret_reg, 8)));
+ plus_constant (Pmode,
+ ret_reg, 8)));
/* Assume the size is valid and pre-adjust */
emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
@@ -7413,7 +7423,12 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
if (TARGET_ARCH64)
{
if (MEM_P (x))
- slot0 = x;
+ {
+ tree expr = MEM_EXPR (x);
+ if (expr)
+ mark_addressable (expr);
+ slot0 = x;
+ }
else
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
@@ -7421,7 +7436,12 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
}
if (MEM_P (y))
- slot1 = y;
+ {
+ tree expr = MEM_EXPR (y);
+ if (expr)
+ mark_addressable (expr);
+ slot1 = y;
+ }
else
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
@@ -10405,7 +10425,7 @@ emit_and_preserve (rtx seq, rtx reg, rtx reg2)
HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
rtx slot
- = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
+ = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
SPARC_STACK_BIAS + offset));
emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
diff --git a/gcc/config/sparc/sparc.h b/gcc/config/sparc/sparc.h
index a1919b4e7e7..a2bf09f1c81 100644
--- a/gcc/config/sparc/sparc.h
+++ b/gcc/config/sparc/sparc.h
@@ -1273,11 +1273,11 @@ do { \
return an rtx for the address of the word in the frame
that holds the dynamic chain--the previous frame's address. */
#define DYNAMIC_CHAIN_ADDRESS(frame) \
- plus_constant (frame, 14 * UNITS_PER_WORD + SPARC_STACK_BIAS)
+ plus_constant (Pmode, frame, 14 * UNITS_PER_WORD + SPARC_STACK_BIAS)
/* Given an rtx for the frame pointer,
return an rtx for the address of the frame. */
-#define FRAME_ADDR_RTX(frame) plus_constant (frame, SPARC_STACK_BIAS)
+#define FRAME_ADDR_RTX(frame) plus_constant (Pmode, frame, SPARC_STACK_BIAS)
/* The return address isn't on the stack, it is in a register, so we can't
access it from the current frame pointer. We can access it from the
@@ -1299,7 +1299,7 @@ do { \
((count == -1) \
? gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM) \
: gen_rtx_MEM (Pmode, \
- memory_address (Pmode, plus_constant (frame, \
+ memory_address (Pmode, plus_constant (Pmode, frame, \
15 * UNITS_PER_WORD \
+ SPARC_STACK_BIAS))))
@@ -1309,7 +1309,8 @@ do { \
is something you can return to. */
#define INCOMING_RETURN_ADDR_REGNUM 15
#define INCOMING_RETURN_ADDR_RTX \
- plus_constant (gen_rtx_REG (word_mode, INCOMING_RETURN_ADDR_REGNUM), 8)
+ plus_constant (word_mode, \
+ gen_rtx_REG (word_mode, INCOMING_RETURN_ADDR_REGNUM), 8)
#define DWARF_FRAME_RETURN_COLUMN \
DWARF_FRAME_REGNUM (INCOMING_RETURN_ADDR_REGNUM)
diff --git a/gcc/config/sparc/sparc.md b/gcc/config/sparc/sparc.md
index 4c7a2b0b232..aafff5598d1 100644
--- a/gcc/config/sparc/sparc.md
+++ b/gcc/config/sparc/sparc.md
@@ -6436,7 +6436,7 @@
instruction (the most significant 10 bits will be zero). If so,
update the return address to skip the unimp instruction. */
emit_move_insn (value,
- gen_rtx_MEM (SImode, plus_constant (rtnreg, 8)));
+ gen_rtx_MEM (SImode, plus_constant (SImode, rtnreg, 8)));
emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
emit_insn (gen_update_return (rtnreg, value));
}
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index c65d003c40e..dc5ca45dd57 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -858,7 +858,7 @@ spu_expand_insv (rtx ops[])
rtx mask1 = gen_reg_rtx (TImode);
rtx dst1 = gen_reg_rtx (TImode);
rtx mem1;
- addr1 = plus_constant (addr, 16);
+ addr1 = plus_constant (Pmode, addr, 16);
addr1 = gen_rtx_AND (Pmode, addr1, GEN_INT (-16));
emit_insn (gen_subsi3 (shl, GEN_INT (16), low));
emit_insn (gen_shlqby_ti (mask1, mask, shl));
@@ -2949,7 +2949,7 @@ spu_machine_dependent_reorg (void)
if (NONJUMP_INSN_P (branch))
offset += get_attr_length (branch);
if (offset > 0)
- XVECEXP (unspec, 0, 0) = plus_constant (label_ref, offset);
+ XVECEXP (unspec, 0, 0) = plus_constant (Pmode, label_ref, offset);
}
spu_var_tracking ();
@@ -4379,7 +4379,7 @@ spu_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
for (regno = ncum; regno < MAX_REGISTER_ARGS; regno++)
{
tmp = gen_frame_mem (V4SImode,
- plus_constant (virtual_incoming_args_rtx,
+ plus_constant (Pmode, virtual_incoming_args_rtx,
offset));
emit_move_insn (tmp,
gen_rtx_REG (V4SImode, FIRST_ARG_REGNUM + regno));
@@ -4556,7 +4556,7 @@ ea_load_store_inline (rtx mem, bool is_store, rtx ea_addr, rtx data_addr)
tag_equal_hi = gen_reg_rtx (V4SImode);
}
- emit_move_insn (index_mask, plus_constant (tag_size_sym, -128));
+ emit_move_insn (index_mask, plus_constant (Pmode, tag_size_sym, -128));
emit_move_insn (tag_arr, tag_arr_sym);
v = 0x0001020300010203LL;
emit_move_insn (splat_mask, immed_double_const (v, v, TImode));
@@ -4583,14 +4583,16 @@ ea_load_store_inline (rtx mem, bool is_store, rtx ea_addr, rtx data_addr)
emit_move_insn (cache_tag, gen_rtx_MEM (V4SImode, tag_addr));
if (spu_ea_model != 32)
emit_move_insn (cache_tag_hi, gen_rtx_MEM (V4SImode,
- plus_constant (tag_addr, 16)));
+ plus_constant (Pmode,
+ tag_addr, 16)));
/* tag = ea_addr & -128 */
emit_insn (gen_andv4si3 (tag, splat, spu_const (V4SImode, -128)));
/* Read all four cache data pointers. */
emit_move_insn (cache_ptrs, gen_rtx_MEM (TImode,
- plus_constant (tag_addr, 32)));
+ plus_constant (Pmode,
+ tag_addr, 32)));
/* Compare tags. */
emit_insn (gen_ceq_v4si (tag_equal, tag, cache_tag));
@@ -4941,7 +4943,7 @@ spu_expand_load (rtx dst0, rtx dst1, rtx src, int extra_rotby)
if (dst1)
{
- addr1 = plus_constant (copy_rtx (addr), 16);
+ addr1 = plus_constant (SImode, copy_rtx (addr), 16);
addr1 = gen_rtx_AND (SImode, addr1, GEN_INT (-16));
emit_insn (gen__movti (dst1, change_address (src, TImode, addr1)));
}
diff --git a/gcc/config/spu/spu.h b/gcc/config/spu/spu.h
index d89bf49f2d2..ce0bc8edb5f 100644
--- a/gcc/config/spu/spu.h
+++ b/gcc/config/spu/spu.h
@@ -250,7 +250,7 @@ targetm.resolve_overloaded_builtin = spu_resolve_overloaded_builtin; \
#define FIRST_PARM_OFFSET(FNDECL) (0)
-#define DYNAMIC_CHAIN_ADDRESS(FP) plus_constant ((FP), -16)
+#define DYNAMIC_CHAIN_ADDRESS(FP) plus_constant (Pmode, (FP), -16)
#define RETURN_ADDR_RTX(COUNT,FP) (spu_return_addr (COUNT, FP))
diff --git a/gcc/config/stormy16/stormy16.c b/gcc/config/stormy16/stormy16.c
index 99acee6f69b..a2e6481d07b 100644
--- a/gcc/config/stormy16/stormy16.c
+++ b/gcc/config/stormy16/stormy16.c
@@ -1053,7 +1053,8 @@ xstormy16_expand_prologue (void)
gen_rtx_MEM (Pmode, stack_pointer_rtx),
reg);
XVECEXP (dwarf, 0, 1) = gen_rtx_SET (Pmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
GET_MODE_SIZE (Pmode)));
add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
RTX_FRAME_RELATED_P (XVECEXP (dwarf, 0, 0)) = 1;
@@ -1076,7 +1077,8 @@ xstormy16_expand_prologue (void)
gen_rtx_MEM (Pmode, stack_pointer_rtx),
reg);
XVECEXP (dwarf, 0, 1) = gen_rtx_SET (Pmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, \
+ stack_pointer_rtx,
GET_MODE_SIZE (Pmode)));
add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
RTX_FRAME_RELATED_P (XVECEXP (dwarf, 0, 0)) = 1;
@@ -2564,7 +2566,7 @@ combine_bnp (rtx insn)
if (! (mask & 0xff))
{
- addr = plus_constant (addr, 1);
+ addr = plus_constant (Pmode, addr, 1);
mask >>= 8;
}
mem = gen_rtx_MEM (QImode, addr);
diff --git a/gcc/config/tilegx/tilegx.c b/gcc/config/tilegx/tilegx.c
index 217682eaa66..a23e193ee3a 100644
--- a/gcc/config/tilegx/tilegx.c
+++ b/gcc/config/tilegx/tilegx.c
@@ -352,7 +352,8 @@ tilegx_setup_incoming_varargs (cumulative_args_t cum,
{
alias_set_type set = get_varargs_alias_set ();
rtx tmp =
- gen_rtx_MEM (BLKmode, plus_constant (virtual_incoming_args_rtx,
+ gen_rtx_MEM (BLKmode, plus_constant (Pmode,
+ virtual_incoming_args_rtx,
-STACK_POINTER_OFFSET -
UNITS_PER_WORD *
(TILEGX_NUM_ARG_REGS -
@@ -1755,7 +1756,7 @@ tilegx_expand_unaligned_load (rtx dest_reg, rtx mem, HOST_WIDE_INT bitsize,
implicitly alias surrounding code. Ideally we'd have some alias
set that covered all types except those with alignment 8 or
higher. */
- addr_lo = force_reg (Pmode, plus_constant (mema, byte_offset));
+ addr_lo = force_reg (Pmode, plus_constant (Pmode, mema, byte_offset));
mem_lo = change_address (mem, mode,
gen_rtx_AND (GET_MODE (mema), addr_lo,
GEN_INT (-8)));
@@ -1764,7 +1765,7 @@ tilegx_expand_unaligned_load (rtx dest_reg, rtx mem, HOST_WIDE_INT bitsize,
/* Load the high word at an address that will not fault if the low
address is aligned and at the very end of a page. */
last_byte_offset = (bit_offset + bitsize - 1) / BITS_PER_UNIT;
- addr_hi = force_reg (Pmode, plus_constant (mema, last_byte_offset));
+ addr_hi = force_reg (Pmode, plus_constant (Pmode, mema, last_byte_offset));
mem_hi = change_address (mem, mode,
gen_rtx_AND (GET_MODE (mema), addr_hi,
GEN_INT (-8)));
@@ -4890,7 +4891,7 @@ tilegx_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
/* Get pointers to the beginning and end of the code block. */
begin_addr = force_reg (Pmode, XEXP (m_tramp, 0));
- end_addr = force_reg (Pmode, plus_constant (XEXP (m_tramp, 0),
+ end_addr = force_reg (Pmode, plus_constant (Pmode, XEXP (m_tramp, 0),
TRAMPOLINE_SIZE));
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
diff --git a/gcc/config/tilegx/tilegx.h b/gcc/config/tilegx/tilegx.h
index 081ecc1756a..ac69061d85c 100644
--- a/gcc/config/tilegx/tilegx.h
+++ b/gcc/config/tilegx/tilegx.h
@@ -243,7 +243,8 @@ enum reg_class
#define FRAME_GROWS_DOWNWARD 1
#define STARTING_FRAME_OFFSET 0
-#define DYNAMIC_CHAIN_ADDRESS(FRAME) plus_constant ((FRAME), UNITS_PER_WORD)
+#define DYNAMIC_CHAIN_ADDRESS(FRAME) \
+ plus_constant (Pmode, (FRAME), UNITS_PER_WORD)
#define FIRST_PARM_OFFSET(FNDECL) 0
diff --git a/gcc/config/tilepro/tilepro.c b/gcc/config/tilepro/tilepro.c
index 011ac083add..2b18b4f6622 100644
--- a/gcc/config/tilepro/tilepro.c
+++ b/gcc/config/tilepro/tilepro.c
@@ -356,7 +356,8 @@ tilepro_setup_incoming_varargs (cumulative_args_t cum,
{
alias_set_type set = get_varargs_alias_set ();
rtx tmp =
- gen_rtx_MEM (BLKmode, plus_constant (virtual_incoming_args_rtx,
+ gen_rtx_MEM (BLKmode, plus_constant (Pmode, \
+ virtual_incoming_args_rtx,
-STACK_POINTER_OFFSET -
UNITS_PER_WORD *
(TILEPRO_NUM_ARG_REGS -
@@ -1640,7 +1641,7 @@ tilepro_expand_unaligned_load (rtx dest_reg, rtx mem, HOST_WIDE_INT bitsize,
implicitly alias surrounding code. Ideally we'd have some alias
set that covered all types except those with alignment 8 or
higher. */
- addr_lo = force_reg (Pmode, plus_constant (mema, byte_offset));
+ addr_lo = force_reg (Pmode, plus_constant (Pmode, mema, byte_offset));
mem_lo = change_address (mem, mode,
gen_rtx_AND (Pmode, addr_lo, GEN_INT (-4)));
set_mem_alias_set (mem_lo, 0);
@@ -1648,7 +1649,7 @@ tilepro_expand_unaligned_load (rtx dest_reg, rtx mem, HOST_WIDE_INT bitsize,
/* Load the high word at an address that will not fault if the low
address is aligned and at the very end of a page. */
last_byte_offset = (bit_offset + bitsize - 1) / BITS_PER_UNIT;
- addr_hi = force_reg (Pmode, plus_constant (mema, last_byte_offset));
+ addr_hi = force_reg (Pmode, plus_constant (Pmode, mema, last_byte_offset));
mem_hi = change_address (mem, mode,
gen_rtx_AND (Pmode, addr_hi, GEN_INT (-4)));
set_mem_alias_set (mem_hi, 0);
@@ -4455,7 +4456,7 @@ tilepro_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
/* Get pointers to the beginning and end of the code block. */
begin_addr = force_reg (Pmode, XEXP (m_tramp, 0));
- end_addr = force_reg (Pmode, plus_constant (XEXP (m_tramp, 0),
+ end_addr = force_reg (Pmode, plus_constant (Pmode, XEXP (m_tramp, 0),
TRAMPOLINE_SIZE));
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
diff --git a/gcc/config/tilepro/tilepro.h b/gcc/config/tilepro/tilepro.h
index 930612d1fb4..593bf68d60a 100644
--- a/gcc/config/tilepro/tilepro.h
+++ b/gcc/config/tilepro/tilepro.h
@@ -226,7 +226,8 @@ enum reg_class
#define FRAME_GROWS_DOWNWARD 1
#define STARTING_FRAME_OFFSET 0
-#define DYNAMIC_CHAIN_ADDRESS(FRAME) plus_constant ((FRAME), UNITS_PER_WORD)
+#define DYNAMIC_CHAIN_ADDRESS(FRAME) \
+ plus_constant (Pmode, (FRAME), UNITS_PER_WORD)
#define FIRST_PARM_OFFSET(FNDECL) 0
diff --git a/gcc/config/v850/v850.c b/gcc/config/v850/v850.c
index 5c43607347e..e2a72b0b3d5 100644
--- a/gcc/config/v850/v850.c
+++ b/gcc/config/v850/v850.c
@@ -1719,7 +1719,8 @@ expand_prologue (void)
if (num_save > 0 && REGNO (save_regs[num_save-1]) == LINK_POINTER_REGNUM)
{
emit_move_insn (gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)),
save_regs[--num_save]);
offset -= 4;
@@ -1728,7 +1729,8 @@ expand_prologue (void)
for (i = 0; i < num_save; i++)
{
emit_move_insn (gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)),
save_regs[i]);
offset -= 4;
@@ -1916,7 +1918,8 @@ expand_epilogue (void)
{
emit_move_insn (restore_regs[--num_restore],
gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)));
offset -= 4;
}
@@ -1925,7 +1928,8 @@ expand_epilogue (void)
{
emit_move_insn (restore_regs[i],
gen_rtx_MEM (SImode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
offset)));
emit_use (restore_regs[i]);
diff --git a/gcc/config/v850/v850.md b/gcc/config/v850/v850.md
index bf3492e7975..4ac565383cd 100644
--- a/gcc/config/v850/v850.md
+++ b/gcc/config/v850/v850.md
@@ -648,7 +648,8 @@
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
- plus_constant (XEXP (operands[0], 0), log2 / 8));
+ plus_constant (Pmode, XEXP (operands[0], 0),
+ log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn ("clr1 %1,%0", xoperands);
return "";
@@ -668,7 +669,8 @@
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
- plus_constant (XEXP (operands[0], 0), log2 / 8));
+ plus_constant (Pmode, XEXP (operands[0], 0),
+ log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn ("clr1 %1,%0", xoperands);
return "";
@@ -719,7 +721,7 @@
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
- plus_constant (XEXP (operands[0], 0),
+ plus_constant (Pmode, XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn ("set1 %1,%0", xoperands);
@@ -745,7 +747,7 @@
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
- plus_constant (XEXP (operands[0], 0),
+ plus_constant (Pmode, XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn ("set1 %1,%0", xoperands);
@@ -798,7 +800,7 @@
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
- plus_constant (XEXP (operands[0], 0),
+ plus_constant (Pmode, XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn ("not1 %1,%0", xoperands);
@@ -824,7 +826,7 @@
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
- plus_constant (XEXP (operands[0], 0),
+ plus_constant (Pmode, XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn ("not1 %1,%0", xoperands);
diff --git a/gcc/config/vax/elf.h b/gcc/config/vax/elf.h
index 8e5f4afe650..7796f348d59 100644
--- a/gcc/config/vax/elf.h
+++ b/gcc/config/vax/elf.h
@@ -55,13 +55,15 @@ along with GCC; see the file COPYING3. If not see
/* Place the top of the stack for the DWARF2 EH stackadj value. */
#define EH_RETURN_STACKADJ_RTX \
gen_rtx_MEM (SImode, \
- plus_constant (gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\
+ plus_constant (Pmode, \
+ gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\
-4))
/* Simple store the return handler into the call frame. */
#define EH_RETURN_HANDLER_RTX \
gen_rtx_MEM (Pmode, \
- plus_constant (gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\
+ plus_constant (Pmode, \
+ gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\
16))
diff --git a/gcc/config/vax/vax.c b/gcc/config/vax/vax.c
index 4e704b6f714..aa929ca1e50 100644
--- a/gcc/config/vax/vax.c
+++ b/gcc/config/vax/vax.c
@@ -138,7 +138,7 @@ vax_add_reg_cfa_offset (rtx insn, int offset, rtx src)
{
rtx x;
- x = plus_constant (frame_pointer_rtx, offset);
+ x = plus_constant (Pmode, frame_pointer_rtx, offset);
x = gen_rtx_MEM (SImode, x);
x = gen_rtx_SET (VOIDmode, x, src);
add_reg_note (insn, REG_CFA_OFFSET, x);
@@ -201,7 +201,7 @@ vax_expand_prologue (void)
it will be processed first. This is required to allow the other
notes be interpreted properly. */
add_reg_note (insn, REG_CFA_DEF_CFA,
- plus_constant (frame_pointer_rtx, offset));
+ plus_constant (Pmode, frame_pointer_rtx, offset));
/* Allocate the local stack frame. */
size = get_frame_size ();
@@ -373,11 +373,13 @@ print_operand_address (FILE * file, rtx addr)
if (offset)
{
if (CONST_INT_P (offset))
- offset = plus_constant (XEXP (addr, 0), INTVAL (offset));
+ offset = plus_constant (Pmode, XEXP (addr, 0),
+ INTVAL (offset));
else
{
gcc_assert (CONST_INT_P (XEXP (addr, 0)));
- offset = plus_constant (offset, INTVAL (XEXP (addr, 0)));
+ offset = plus_constant (Pmode, offset,
+ INTVAL (XEXP (addr, 0)));
}
}
offset = XEXP (addr, 0);
@@ -402,11 +404,13 @@ print_operand_address (FILE * file, rtx addr)
if (offset)
{
if (CONST_INT_P (offset))
- offset = plus_constant (XEXP (addr, 1), INTVAL (offset));
+ offset = plus_constant (Pmode, XEXP (addr, 1),
+ INTVAL (offset));
else
{
gcc_assert (CONST_INT_P (XEXP (addr, 1)));
- offset = plus_constant (offset, INTVAL (XEXP (addr, 1)));
+ offset = plus_constant (Pmode, offset,
+ INTVAL (XEXP (addr, 1)));
}
}
offset = XEXP (addr, 1);
@@ -2108,7 +2112,7 @@ vax_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
mem = adjust_address (m_tramp, SImode, 4);
emit_move_insn (mem, cxt);
mem = adjust_address (m_tramp, SImode, 11);
- emit_move_insn (mem, plus_constant (fnaddr, 2));
+ emit_move_insn (mem, plus_constant (Pmode, fnaddr, 2));
emit_insn (gen_sync_istream ());
}
diff --git a/gcc/config/vax/vax.h b/gcc/config/vax/vax.h
index 0c835637ae1..a9de79c62fb 100644
--- a/gcc/config/vax/vax.h
+++ b/gcc/config/vax/vax.h
@@ -259,7 +259,7 @@ enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
/* Given an rtx for the address of a frame,
return an rtx for the address of the word in the frame
that holds the dynamic chain--the previous frame's address. */
-#define DYNAMIC_CHAIN_ADDRESS(FRAME) plus_constant ((FRAME), 12)
+#define DYNAMIC_CHAIN_ADDRESS(FRAME) plus_constant (Pmode, (FRAME), 12)
/* If we generate an insn to push BYTES bytes,
this says how many the stack pointer really advances by.
@@ -370,7 +370,8 @@ enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
#define RETURN_ADDR_RTX(COUNT, FRAME) \
((COUNT == 0) \
- ? gen_rtx_MEM (Pmode, plus_constant (FRAME, RETURN_ADDRESS_OFFSET)) \
+ ? gen_rtx_MEM (Pmode, plus_constant (Pmode, FRAME, \
+ RETURN_ADDRESS_OFFSET)) \
: (rtx) 0)
diff --git a/gcc/config/xtensa/xtensa.c b/gcc/config/xtensa/xtensa.c
index 7f42e48dd53..fa38008bb0b 100644
--- a/gcc/config/xtensa/xtensa.c
+++ b/gcc/config/xtensa/xtensa.c
@@ -2682,7 +2682,8 @@ xtensa_expand_prologue (void)
note_rtx = gen_rtx_SET (VOIDmode, (frame_pointer_needed
? hard_frame_pointer_rtx
: stack_pointer_rtx),
- plus_constant (stack_pointer_rtx, -total_size));
+ plus_constant (Pmode, stack_pointer_rtx,
+ -total_size));
RTX_FRAME_RELATED_P (insn) = 1;
add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
}
@@ -2707,7 +2708,7 @@ xtensa_return_addr (int count, rtx frame)
retaddr = gen_rtx_REG (Pmode, A0_REG);
else
{
- rtx addr = plus_constant (frame, -4 * UNITS_PER_WORD);
+ rtx addr = plus_constant (Pmode, frame, -4 * UNITS_PER_WORD);
addr = memory_address (Pmode, addr);
retaddr = gen_reg_rtx (Pmode);
emit_move_insn (retaddr, gen_rtx_MEM (Pmode, addr));
@@ -3608,7 +3609,8 @@ static rtx
xtensa_static_chain (const_tree ARG_UNUSED (fndecl), bool incoming_p)
{
rtx base = incoming_p ? arg_pointer_rtx : stack_pointer_rtx;
- return gen_frame_mem (Pmode, plus_constant (base, -5 * UNITS_PER_WORD));
+ return gen_frame_mem (Pmode, plus_constant (Pmode, base,
+ -5 * UNITS_PER_WORD));
}
diff --git a/gcc/configure b/gcc/configure
index c1b0e465c35..557a4cc794d 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -21511,20 +21511,20 @@ fi
fi
ORIGINAL_PLUGIN_LD_FOR_TARGET=$gcc_cv_ld
-PLUGIN_LD=`basename $gcc_cv_ld`
+PLUGIN_LD_SUFFIX=`basename $gcc_cv_ld | sed -e "s,$target_alias-,,"`
# Check whether --with-plugin-ld was given.
if test "${with_plugin_ld+set}" = set; then :
withval=$with_plugin_ld; if test x"$withval" != x; then
ORIGINAL_PLUGIN_LD_FOR_TARGET="$withval"
- PLUGIN_LD="$withval"
+ PLUGIN_LD_SUFFIX=`echo $withval | sed -e "s,$target_alias-,,"`
fi
fi
cat >>confdefs.h <<_ACEOF
-#define PLUGIN_LD "$PLUGIN_LD"
+#define PLUGIN_LD_SUFFIX "$PLUGIN_LD_SUFFIX"
_ACEOF
@@ -24628,6 +24628,39 @@ $as_echo "#define HAVE_AS_IX86_SAHF 1" >>confdefs.h
fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for hle prefixes" >&5
+$as_echo_n "checking assembler for hle prefixes... " >&6; }
+if test "${gcc_cv_as_ix86_hle+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ gcc_cv_as_ix86_hle=no
+ if test x$gcc_cv_as != x; then
+ $as_echo '.code64
+ lock xacquire cmpxchg %esi, (%rcx)
+ ' > conftest.s
+ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }
+ then
+ gcc_cv_as_ix86_hle=yes
+ else
+ echo "configure: failed program was" >&5
+ cat conftest.s >&5
+ fi
+ rm -f conftest.o conftest.s
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_ix86_hle" >&5
+$as_echo "$gcc_cv_as_ix86_hle" >&6; }
+if test $gcc_cv_as_ix86_hle = yes; then
+
+$as_echo "#define HAVE_AS_IX86_HLE 1" >>confdefs.h
+
+fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for swap suffix" >&5
$as_echo_n "checking assembler for swap suffix... " >&6; }
if test "${gcc_cv_as_ix86_swap+set}" = set; then :
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 8869121f768..2c17736edf2 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -2001,15 +2001,15 @@ else
fi])
ORIGINAL_PLUGIN_LD_FOR_TARGET=$gcc_cv_ld
-PLUGIN_LD=`basename $gcc_cv_ld`
+PLUGIN_LD_SUFFIX=`basename $gcc_cv_ld | sed -e "s,$target_alias-,,"`
AC_ARG_WITH(plugin-ld,
[AS_HELP_STRING([[--with-plugin-ld=[ARG]]], [specify the plugin linker])],
[if test x"$withval" != x; then
ORIGINAL_PLUGIN_LD_FOR_TARGET="$withval"
- PLUGIN_LD="$withval"
+ PLUGIN_LD_SUFFIX=`echo $withval | sed -e "s,$target_alias-,,"`
fi])
AC_SUBST(ORIGINAL_PLUGIN_LD_FOR_TARGET)
-AC_DEFINE_UNQUOTED(PLUGIN_LD, "$PLUGIN_LD", [Specify plugin linker])
+AC_DEFINE_UNQUOTED(PLUGIN_LD_SUFFIX, "$PLUGIN_LD_SUFFIX", [Specify plugin linker])
# Check to see if we are using gold instead of ld
AC_MSG_CHECKING(whether we are using gold)
@@ -3597,6 +3597,14 @@ foo: nop
[AC_DEFINE(HAVE_AS_IX86_SAHF, 1,
[Define if your assembler supports the sahf mnemonic in 64bit mode.])])
+ gcc_GAS_CHECK_FEATURE([hle prefixes],
+ gcc_cv_as_ix86_hle,,,
+ [.code64
+ lock xacquire cmpxchg %esi, (%rcx)
+ ],,
+ [AC_DEFINE(HAVE_AS_IX86_HLE, 1,
+ [Define if your assembler supports HLE prefixes.])])
+
gcc_GAS_CHECK_FEATURE([swap suffix],
gcc_cv_as_ix86_swap,,,
[movl.s %esp, %ebp],,
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index 1374a98a810..7e5c0480f2b 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -72,9 +72,7 @@ struct cl_decoded_option;
struct cl_option_handlers;
struct diagnostic_context;
typedef struct diagnostic_context diagnostic_context;
-struct gimple_seq_d;
-typedef struct gimple_seq_d *gimple_seq;
-typedef const struct gimple_seq_d *const_gimple_seq;
+typedef gimple gimple_seq;
/* Address space number for named address space support. */
typedef unsigned char addr_space_t;
@@ -194,5 +192,8 @@ enum memmodel
MEMMODEL_LAST = 6
};
+/* Suppose that higher bits are target dependant. */
+#define MEMMODEL_MASK ((1<<16)-1)
+
#endif /* coretypes.h */
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index e52149f023d..dfb3204c18c 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,80 @@
+2012-05-11 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53305
+ * pt.c (tsubst_copy: case PARM_DECL): Return error_mark_node if
+ tsubst_decl returns NULL_TREE.
+ * cxx-pretty-print.c (pp_cxx_simple_type_specifier): Handle
+ BOUND_TEMPLATE_TEMPLATE_PARM.
+
+2012-05-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53158
+ * cvt.c (ocp_convert): Error out early for void -> bool conversions.
+ * typeck.c (decay_conversion): Use error_at.
+ * call.c (build_integral_nontype_arg_conv, convert_like_real,
+ convert_arg_to_ellipsis, perform_implicit_conversion_flags,
+ initialize_reference): Likewise.
+ * cvt.c (warn_ref_binding): Add location_t parameter.
+ (cp_convert_to_pointer, convert_to_reference, ocp_convert,
+ convert_to_void, ): Use error_at and warning_at.
+
+2012-05-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53301
+ * decl.c (check_default_argument): Fix typo (POINTER_TYPE_P
+ instead of TYPE_PTR_P) in zero-as-null-pointer-constant warning.
+
+2012-05-06 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53152
+ * call.c (op_error, build_new_op_1, build_new_op): Add location_t
+ parameter.
+ (build_conditional_expr_1): Adjust.
+ * typeck.c (build_x_indirect_ref, build_x_binary_op,
+ build_x_unary_op): Add location_t parameter.
+ (rationalize_conditional_expr, build_x_array_ref,
+ build_x_compound_expr, cp_build_modify_expr, build_x_modify_expr):
+ Adjust.
+ * typeck2.c (build_x_arrow): Add location_t parameter.
+ * semantics.c (finish_unary_op_expr): Likewise.
+ (finish_increment_expr, handle_omp_for_class_iterator): Adjust.
+ * decl2.c (grok_array_decl): Add location_t parameter.
+ * parser.c (cp_parser_postfix_open_square_expression,
+ cp_parser_postfix_dot_deref_expression, cp_parser_unary_expression,
+ cp_parser_binary_expression, cp_parser_builtin_offsetof,
+ do_range_for_auto_deduction, cp_convert_range_for,
+ cp_parser_template_argument, cp_parser_omp_for_cond): Pass the
+ location, adjust.
+ * pt.c (tsubst_copy_and_build): Adjust.
+ * tree.c (maybe_dummy_object): Likewise.
+ * cp-tree.h: Update declarations.
+
+2012-05-04 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * semantics.c (cxx_eval_constant_expression, case CONVERT_EXPR): Tidy.
+
+2012-05-04 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53166
+ * pt.c (instantiate_class_template_1): Increase / decrease
+ c_inhibit_evaluation_warnings around the tsubst_expr call
+ for STATIC_ASSERT_CONDITION.
+ (tsubst_expr, case STATIC_ASSERT): Likewise.
+ * typeck.c (cp_build_binary_op, case EQ_EXPR/NE_EXPR): Check
+ c_inhibit_evaluation_warnings in the OPT_Waddress warnings.
+
+2012-05-03 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53186
+ * call.c (build_over_call): Handle final member functions
+ and class types.
+ (build_new_method_call_1): Do not handle here.
+
+2012-05-02 Richard Guenther <rguenther@suse.de>
+
+ * decl.c (grokdeclarator): Properly check for sizes that
+ cover more than half of the address-space.
+
2012-04-30 Marc Glisse <marc.glisse@inria.fr>
PR c++/51033
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index e072891f927..53ff78bf9ef 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -159,8 +159,8 @@ static tree build_java_interface_fn_ref (tree, tree);
/*c_cast_p=*/false, (COMPLAIN))
static tree convert_like_real (conversion *, tree, tree, int, int, bool,
bool, tsubst_flags_t);
-static void op_error (enum tree_code, enum tree_code, tree, tree,
- tree, bool);
+static void op_error (location_t, enum tree_code, enum tree_code, tree,
+ tree, tree, bool);
static struct z_candidate *build_user_type_conversion_1 (tree, tree, int,
tsubst_flags_t);
static void print_z_candidate (const char *, struct z_candidate *);
@@ -3182,7 +3182,7 @@ print_z_candidate (const char *msgstr, struct z_candidate *candidate)
candidate->convs[0]->type);
}
else if (TYPE_P (candidate->fn))
- inform (input_location, "%s%T <conversion>", msg, candidate->fn);
+ inform (loc, "%s%T <conversion>", msg, candidate->fn);
else if (candidate->viable == -1)
inform (loc, "%s%#D <near match>", msg, candidate->fn);
else if (DECL_DELETED_FN (STRIP_TEMPLATE (candidate->fn)))
@@ -3692,6 +3692,7 @@ build_integral_nontype_arg_conv (tree type, tree expr, tsubst_flags_t complain)
conversion *conv;
void *p;
tree t;
+ location_t loc = EXPR_LOC_OR_HERE (expr);
if (error_operand_p (expr))
return error_mark_node;
@@ -3727,8 +3728,8 @@ build_integral_nontype_arg_conv (tree type, tree expr, tsubst_flags_t complain)
break;
if (complain & tf_error)
- error ("conversion from %qT to %qT not considered for "
- "non-type template argument", t, type);
+ error_at (loc, "conversion from %qT to %qT not considered for "
+ "non-type template argument", t, type);
/* and fall through. */
default:
@@ -4200,7 +4201,7 @@ op_error_string (const char *errmsg, int ntypes, bool match)
}
static void
-op_error (enum tree_code code, enum tree_code code2,
+op_error (location_t loc, enum tree_code code, enum tree_code code2,
tree arg1, tree arg2, tree arg3, bool match)
{
const char *opname;
@@ -4214,62 +4215,65 @@ op_error (enum tree_code code, enum tree_code code2,
{
case COND_EXPR:
if (flag_diagnostics_show_caret)
- error (op_error_string (G_("ternary %<operator?:%>"), 3, match),
- TREE_TYPE (arg1), TREE_TYPE (arg2), TREE_TYPE (arg3));
+ error_at (loc, op_error_string (G_("ternary %<operator?:%>"),
+ 3, match),
+ TREE_TYPE (arg1), TREE_TYPE (arg2), TREE_TYPE (arg3));
else
- error (op_error_string (G_("ternary %<operator?:%> "
- "in %<%E ? %E : %E%>"), 3, match),
- arg1, arg2, arg3,
- TREE_TYPE (arg1), TREE_TYPE (arg2), TREE_TYPE (arg3));
+ error_at (loc, op_error_string (G_("ternary %<operator?:%> "
+ "in %<%E ? %E : %E%>"), 3, match),
+ arg1, arg2, arg3,
+ TREE_TYPE (arg1), TREE_TYPE (arg2), TREE_TYPE (arg3));
break;
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
if (flag_diagnostics_show_caret)
- error (op_error_string (G_("%<operator%s%>"), 1, match),
- opname, TREE_TYPE (arg1));
+ error_at (loc, op_error_string (G_("%<operator%s%>"), 1, match),
+ opname, TREE_TYPE (arg1));
else
- error (op_error_string (G_("%<operator%s%> in %<%E%s%>"), 1, match),
- opname, arg1, opname, TREE_TYPE (arg1));
+ error_at (loc, op_error_string (G_("%<operator%s%> in %<%E%s%>"),
+ 1, match),
+ opname, arg1, opname, TREE_TYPE (arg1));
break;
case ARRAY_REF:
if (flag_diagnostics_show_caret)
- error (op_error_string (G_("%<operator[]%>"), 2, match),
- TREE_TYPE (arg1), TREE_TYPE (arg2));
+ error_at (loc, op_error_string (G_("%<operator[]%>"), 2, match),
+ TREE_TYPE (arg1), TREE_TYPE (arg2));
else
- error (op_error_string (G_("%<operator[]%> in %<%E[%E]%>"), 2, match),
- arg1, arg2, TREE_TYPE (arg1), TREE_TYPE (arg2));
+ error_at (loc, op_error_string (G_("%<operator[]%> in %<%E[%E]%>"),
+ 2, match),
+ arg1, arg2, TREE_TYPE (arg1), TREE_TYPE (arg2));
break;
case REALPART_EXPR:
case IMAGPART_EXPR:
if (flag_diagnostics_show_caret)
- error (op_error_string (G_("%qs"), 1, match),
- opname, TREE_TYPE (arg1));
+ error_at (loc, op_error_string (G_("%qs"), 1, match),
+ opname, TREE_TYPE (arg1));
else
- error (op_error_string (G_("%qs in %<%s %E%>"), 1, match),
- opname, opname, arg1, TREE_TYPE (arg1));
+ error_at (loc, op_error_string (G_("%qs in %<%s %E%>"), 1, match),
+ opname, opname, arg1, TREE_TYPE (arg1));
break;
default:
if (arg2)
if (flag_diagnostics_show_caret)
- error (op_error_string (G_("%<operator%s%>"), 2, match),
- opname, TREE_TYPE (arg1), TREE_TYPE (arg2));
+ error_at (loc, op_error_string (G_("%<operator%s%>"), 2, match),
+ opname, TREE_TYPE (arg1), TREE_TYPE (arg2));
else
- error (op_error_string (G_("%<operator%s%> in %<%E %s %E%>"),
- 2, match),
- opname, arg1, opname, arg2,
- TREE_TYPE (arg1), TREE_TYPE (arg2));
+ error_at (loc, op_error_string (G_("%<operator%s%> in %<%E %s %E%>"),
+ 2, match),
+ opname, arg1, opname, arg2,
+ TREE_TYPE (arg1), TREE_TYPE (arg2));
else
if (flag_diagnostics_show_caret)
- error (op_error_string (G_("%<operator%s%>"), 1, match),
- opname, TREE_TYPE (arg1));
+ error_at (loc, op_error_string (G_("%<operator%s%>"), 1, match),
+ opname, TREE_TYPE (arg1));
else
- error (op_error_string (G_("%<operator%s%> in %<%s%E%>"),
- 1, match),
- opname, opname, arg1, TREE_TYPE (arg1));
+ error_at (loc, op_error_string (G_("%<operator%s%> in %<%s%E%>"),
+ 1, match),
+ opname, opname, arg1, TREE_TYPE (arg1));
break;
}
}
@@ -4607,7 +4611,8 @@ build_conditional_expr_1 (tree arg1, tree arg2, tree arg3,
{
if (complain & tf_error)
{
- op_error (COND_EXPR, NOP_EXPR, arg1, arg2, arg3, FALSE);
+ op_error (input_location, COND_EXPR, NOP_EXPR,
+ arg1, arg2, arg3, FALSE);
print_z_candidates (location_of (arg1), candidates);
}
return error_mark_node;
@@ -4617,7 +4622,8 @@ build_conditional_expr_1 (tree arg1, tree arg2, tree arg3,
{
if (complain & tf_error)
{
- op_error (COND_EXPR, NOP_EXPR, arg1, arg2, arg3, FALSE);
+ op_error (input_location, COND_EXPR, NOP_EXPR,
+ arg1, arg2, arg3, FALSE);
print_z_candidates (location_of (arg1), candidates);
}
return error_mark_node;
@@ -4944,8 +4950,8 @@ add_candidates (tree fns, tree first_arg, const VEC(tree,gc) *args,
}
static tree
-build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
- tree *overload, tsubst_flags_t complain)
+build_new_op_1 (location_t loc, enum tree_code code, int flags, tree arg1,
+ tree arg2, tree arg3, tree *overload, tsubst_flags_t complain)
{
struct z_candidate *candidates = 0, *cand;
VEC(tree,gc) *arglist;
@@ -5098,8 +5104,7 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
? G_("no %<%D(int)%> declared for postfix %qs,"
" trying prefix operator instead")
: G_("no %<%D(int)%> declared for postfix %qs");
- permerror (input_location, msg, fnname,
- operator_name_info[code].name);
+ permerror (loc, msg, fnname, operator_name_info[code].name);
}
if (!flag_permissive)
@@ -5109,8 +5114,8 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
code = PREINCREMENT_EXPR;
else
code = PREDECREMENT_EXPR;
- result = build_new_op_1 (code, flags, arg1, NULL_TREE, NULL_TREE,
- overload, complain);
+ result = build_new_op_1 (loc, code, flags, arg1, NULL_TREE,
+ NULL_TREE, overload, complain);
break;
/* The caller will deal with these. */
@@ -5135,8 +5140,8 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
{
/* ... Otherwise, report the more generic
"no matching operator found" error */
- op_error (code, code2, arg1, arg2, arg3, FALSE);
- print_z_candidates (input_location, candidates);
+ op_error (loc, code, code2, arg1, arg2, arg3, FALSE);
+ print_z_candidates (loc, candidates);
}
}
result = error_mark_node;
@@ -5150,8 +5155,8 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
{
if ((flags & LOOKUP_COMPLAIN) && (complain & tf_error))
{
- op_error (code, code2, arg1, arg2, arg3, TRUE);
- print_z_candidates (input_location, candidates);
+ op_error (loc, code, code2, arg1, arg2, arg3, TRUE);
+ print_z_candidates (loc, candidates);
}
result = error_mark_node;
}
@@ -5213,7 +5218,7 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
/* We need to call warn_logical_operator before
converting arg2 to a boolean_type. */
if (complain & tf_warning)
- warn_logical_operator (input_location, code, boolean_type_node,
+ warn_logical_operator (loc, code, boolean_type_node,
code_orig_arg1, arg1,
code_orig_arg2, arg2);
@@ -5254,7 +5259,7 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
- warn_logical_operator (input_location, code, boolean_type_node,
+ warn_logical_operator (loc, code, boolean_type_node,
code_orig_arg1, arg1, code_orig_arg2, arg2);
/* Fall through. */
case PLUS_EXPR:
@@ -5313,12 +5318,14 @@ build_new_op_1 (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
/* Wrapper for above. */
tree
-build_new_op (enum tree_code code, int flags, tree arg1, tree arg2, tree arg3,
+build_new_op (location_t loc, enum tree_code code, int flags,
+ tree arg1, tree arg2, tree arg3,
tree *overload, tsubst_flags_t complain)
{
tree ret;
bool subtime = timevar_cond_start (TV_OVERLOAD);
- ret = build_new_op_1 (code, flags, arg1, arg2, arg3, overload, complain);
+ ret = build_new_op_1 (loc, code, flags, arg1, arg2, arg3,
+ overload, complain);
timevar_cond_stop (TV_OVERLOAD, subtime);
return ret;
}
@@ -5642,6 +5649,7 @@ convert_like_real (conversion *convs, tree expr, tree fn, int argnum,
tree totype = convs->type;
diagnostic_t diag_kind;
int flags;
+ location_t loc = EXPR_LOC_OR_HERE (expr);
if (convs->bad_p && !(complain & tf_error))
return error_mark_node;
@@ -5662,13 +5670,13 @@ convert_like_real (conversion *convs, tree expr, tree fn, int argnum,
&& SCALAR_TYPE_P (totype)
&& CONSTRUCTOR_NELTS (expr) > 0
&& BRACE_ENCLOSED_INITIALIZER_P (CONSTRUCTOR_ELT (expr, 0)->value))
- permerror (input_location, "too many braces around initializer for %qT", totype);
+ permerror (loc, "too many braces around initializer for %qT", totype);
for (; t ; t = next_conversion (t))
{
if (t->kind == ck_user && t->cand->reason)
{
- permerror (input_location, "invalid user-defined conversion "
+ permerror (loc, "invalid user-defined conversion "
"from %qT to %qT", TREE_TYPE (expr), totype);
print_z_candidate ("candidate is:", t->cand);
expr = convert_like_real (t, expr, fn, argnum, 1,
@@ -5698,7 +5706,7 @@ convert_like_real (conversion *convs, tree expr, tree fn, int argnum,
break;
}
- permerror (input_location, "invalid conversion from %qT to %qT",
+ permerror (loc, "invalid conversion from %qT to %qT",
TREE_TYPE (expr), totype);
if (fn)
permerror (DECL_SOURCE_LOCATION (fn),
@@ -5931,8 +5939,8 @@ convert_like_real (conversion *convs, tree expr, tree fn, int argnum,
gcc_assert (TYPE_REF_IS_RVALUE (ref_type)
&& real_lvalue_p (expr));
- error ("cannot bind %qT lvalue to %qT",
- TREE_TYPE (expr), totype);
+ error_at (loc, "cannot bind %qT lvalue to %qT",
+ TREE_TYPE (expr), totype);
if (fn)
error (" initializing argument %P of %q+D", argnum, fn);
return error_mark_node;
@@ -5963,13 +5971,14 @@ convert_like_real (conversion *convs, tree expr, tree fn, int argnum,
/* If the reference is volatile or non-const, we
cannot create a temporary. */
if (lvalue & clk_bitfield)
- error ("cannot bind bitfield %qE to %qT",
- expr, ref_type);
+ error_at (loc, "cannot bind bitfield %qE to %qT",
+ expr, ref_type);
else if (lvalue & clk_packed)
- error ("cannot bind packed field %qE to %qT",
- expr, ref_type);
+ error_at (loc, "cannot bind packed field %qE to %qT",
+ expr, ref_type);
else
- error ("cannot bind rvalue %qE to %qT", expr, ref_type);
+ error_at (loc, "cannot bind rvalue %qE to %qT",
+ expr, ref_type);
return error_mark_node;
}
/* If the source is a packed field, and we must use a copy
@@ -5982,8 +5991,8 @@ convert_like_real (conversion *convs, tree expr, tree fn, int argnum,
&& CLASS_TYPE_P (type)
&& type_has_nontrivial_copy_init (type))
{
- error ("cannot bind packed field %qE to %qT",
- expr, ref_type);
+ error_at (loc, "cannot bind packed field %qE to %qT",
+ expr, ref_type);
return error_mark_node;
}
if (lvalue & clk_bitfield)
@@ -6049,6 +6058,7 @@ tree
convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
{
tree arg_type;
+ location_t loc = EXPR_LOC_OR_HERE (arg);
/* [expr.call]
@@ -6070,10 +6080,10 @@ convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
{
if ((complain & tf_warning)
&& warn_double_promotion && !c_inhibit_evaluation_warnings)
- warning (OPT_Wdouble_promotion,
- "implicit conversion from %qT to %qT when passing "
- "argument to function",
- arg_type, double_type_node);
+ warning_at (loc, OPT_Wdouble_promotion,
+ "implicit conversion from %qT to %qT when passing "
+ "argument to function",
+ arg_type, double_type_node);
arg = convert_to_real (double_type_node, arg);
}
else if (NULLPTR_TYPE_P (arg_type))
@@ -6083,8 +6093,8 @@ convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
if (SCOPED_ENUM_P (arg_type) && !abi_version_at_least (6))
{
if (complain & tf_warning)
- warning (OPT_Wabi, "scoped enum %qT will not promote to an "
- "integral type in a future version of GCC", arg_type);
+ warning_at (loc, OPT_Wabi, "scoped enum %qT will not promote to an "
+ "integral type in a future version of GCC", arg_type);
arg = cp_convert (ENUM_UNDERLYING_TYPE (arg_type), arg);
}
arg = perform_integral_promotions (arg);
@@ -6120,8 +6130,8 @@ convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
|| TYPE_HAS_NONTRIVIAL_DESTRUCTOR (arg_type)))
{
if (complain & tf_error)
- error ("cannot pass objects of non-trivially-copyable "
- "type %q#T through %<...%>", arg_type);
+ error_at (loc, "cannot pass objects of non-trivially-copyable "
+ "type %q#T through %<...%>", arg_type);
else
return error_mark_node;
}
@@ -6550,6 +6560,12 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
return error_mark_node;
}
+ /* See if the function member or the whole class type is declared
+ final and the call can be devirtualized. */
+ if (DECL_FINAL_P (fn)
+ || CLASSTYPE_FINAL (TYPE_METHOD_BASETYPE (TREE_TYPE (fn))))
+ flags |= LOOKUP_NONVIRTUAL;
+
/* [class.mfct.nonstatic]: If a nonstatic member function of a class
X is called for an object that is not of type X, or of a type
derived from X, the behavior is undefined.
@@ -7418,8 +7434,7 @@ build_new_method_call_1 (tree instance, tree fns, VEC(tree,gc) **args,
/* Optimize away vtable lookup if we know that this function
can't be overridden. */
if (DECL_VINDEX (fn) && ! (flags & LOOKUP_NONVIRTUAL)
- && (resolves_to_fixed_type_p (instance, 0)
- || DECL_FINAL_P (fn) || CLASSTYPE_FINAL (basetype)))
+ && resolves_to_fixed_type_p (instance, 0))
flags |= LOOKUP_NONVIRTUAL;
if (explicit_targs)
flags |= LOOKUP_EXPLICIT_TMPL_ARGS;
@@ -8521,6 +8536,7 @@ perform_implicit_conversion_flags (tree type, tree expr,
{
conversion *conv;
void *p;
+ location_t loc = EXPR_LOC_OR_HERE (expr);
if (error_operand_p (expr))
return error_mark_node;
@@ -8543,8 +8559,8 @@ perform_implicit_conversion_flags (tree type, tree expr,
else if (invalid_nonstatic_memfn_p (expr, complain))
/* We gave an error. */;
else
- error ("could not convert %qE from %qT to %qT", expr,
- TREE_TYPE (expr), type);
+ error_at (loc, "could not convert %qE from %qT to %qT", expr,
+ TREE_TYPE (expr), type);
}
expr = error_mark_node;
}
@@ -8822,6 +8838,7 @@ initialize_reference (tree type, tree expr,
{
conversion *conv;
void *p;
+ location_t loc = EXPR_LOC_OR_HERE (expr);
if (type == error_mark_node || error_operand_p (expr))
return error_mark_node;
@@ -8840,13 +8857,13 @@ initialize_reference (tree type, tree expr,
else if (!CP_TYPE_CONST_P (TREE_TYPE (type))
&& !TYPE_REF_IS_RVALUE (type)
&& !real_lvalue_p (expr))
- error ("invalid initialization of non-const reference of "
- "type %qT from an rvalue of type %qT",
- type, TREE_TYPE (expr));
+ error_at (loc, "invalid initialization of non-const reference of "
+ "type %qT from an rvalue of type %qT",
+ type, TREE_TYPE (expr));
else
- error ("invalid initialization of reference of type "
- "%qT from expression of type %qT", type,
- TREE_TYPE (expr));
+ error_at (loc, "invalid initialization of reference of type "
+ "%qT from expression of type %qT", type,
+ TREE_TYPE (expr));
}
return error_mark_node;
}
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 5a7ebaed993..141b559eb74 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -4874,8 +4874,8 @@ extern tree build_new_method_call (tree, tree, VEC(tree,gc) **,
tsubst_flags_t);
extern tree build_special_member_call (tree, tree, VEC(tree,gc) **,
tree, int, tsubst_flags_t);
-extern tree build_new_op (enum tree_code, int, tree,
- tree, tree, tree *,
+extern tree build_new_op (location_t, enum tree_code,
+ int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, VEC(tree,gc) **,
tsubst_flags_t);
@@ -5112,7 +5112,7 @@ extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
-extern tree grok_array_decl (tree, tree);
+extern tree grok_array_decl (location_t, tree, tree);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
@@ -5564,7 +5564,7 @@ extern tree finish_call_expr (tree, VEC(tree,gc) **, bool,
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree);
-extern tree finish_unary_op_expr (enum tree_code, tree);
+extern tree finish_unary_op_expr (location_t, enum tree_code, tree);
extern tree finish_compound_literal (tree, tree, tsubst_flags_t);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
@@ -5791,8 +5791,8 @@ extern tree build_class_member_access_expr (tree, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (tree, tree, bool,
tsubst_flags_t);
-extern tree build_x_indirect_ref (tree, ref_operator,
- tsubst_flags_t);
+extern tree build_x_indirect_ref (location_t, tree,
+ ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree build_array_ref (location_t, tree, tree);
@@ -5804,12 +5804,14 @@ extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, VEC(tree,gc) **,
tsubst_flags_t);
-extern tree build_x_binary_op (enum tree_code, tree,
+extern tree build_x_binary_op (location_t,
+ enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (tree, tree, tsubst_flags_t);
-extern tree build_x_unary_op (enum tree_code, tree,
+extern tree build_x_unary_op (location_t,
+ enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_addr_expr_strict (tree, tsubst_flags_t);
@@ -5898,7 +5900,8 @@ extern void check_narrowing (tree, tree);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int);
extern tree build_scoped_ref (tree, tree, tree *);
-extern tree build_x_arrow (tree, tsubst_flags_t);
+extern tree build_x_arrow (location_t, tree,
+ tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
diff --git a/gcc/cp/cvt.c b/gcc/cp/cvt.c
index 49ba38a3f39..3d8f7021d1f 100644
--- a/gcc/cp/cvt.c
+++ b/gcc/cp/cvt.c
@@ -42,7 +42,7 @@ static tree cp_convert_to_pointer (tree, tree);
static tree convert_to_pointer_force (tree, tree);
static tree build_type_conversion (tree, tree);
static tree build_up_reference (tree, tree, int, tree);
-static void warn_ref_binding (tree, tree, tree);
+static void warn_ref_binding (location_t, tree, tree, tree);
/* Change of width--truncation and extension of integers or reals--
is represented with NOP_EXPR. Proper functioning of many things
@@ -79,6 +79,8 @@ cp_convert_to_pointer (tree type, tree expr)
tree intype = TREE_TYPE (expr);
enum tree_code form;
tree rval;
+ location_t loc = EXPR_LOC_OR_HERE (expr);
+
if (intype == error_mark_node)
return error_mark_node;
@@ -87,8 +89,8 @@ cp_convert_to_pointer (tree type, tree expr)
intype = complete_type (intype);
if (!COMPLETE_TYPE_P (intype))
{
- error ("can%'t convert from incomplete type %qT to %qT",
- intype, type);
+ error_at (loc, "can%'t convert from incomplete type %qT to %qT",
+ intype, type);
return error_mark_node;
}
@@ -96,8 +98,8 @@ cp_convert_to_pointer (tree type, tree expr)
if (rval)
{
if (rval == error_mark_node)
- error ("conversion of %qE from %qT to %qT is ambiguous",
- expr, intype, type);
+ error_at (loc, "conversion of %qE from %qT to %qT is ambiguous",
+ expr, intype, type);
return rval;
}
}
@@ -166,8 +168,8 @@ cp_convert_to_pointer (tree type, tree expr)
if (TYPE_PTRMEMFUNC_P (type))
{
- error ("cannot convert %qE from type %qT to type %qT",
- expr, intype, type);
+ error_at (loc, "cannot convert %qE from type %qT to type %qT",
+ expr, intype, type);
return error_mark_node;
}
@@ -192,8 +194,8 @@ cp_convert_to_pointer (tree type, tree expr)
tf_warning_or_error);
}
}
- error ("cannot convert %qE from type %qT to type %qT",
- expr, intype, type);
+ error_at (loc, "cannot convert %qE from type %qT to type %qT",
+ expr, intype, type);
return error_mark_node;
}
@@ -201,8 +203,8 @@ cp_convert_to_pointer (tree type, tree expr)
{
if (c_inhibit_evaluation_warnings == 0
&& !NULLPTR_TYPE_P (TREE_TYPE (expr)))
- warning (OPT_Wzero_as_null_pointer_constant,
- "zero as null pointer constant");
+ warning_at (loc, OPT_Wzero_as_null_pointer_constant,
+ "zero as null pointer constant");
if (TYPE_PTRMEMFUNC_P (type))
return build_ptrmemfunc (TYPE_PTRMEMFUNC_FN_TYPE (type), expr, 0,
@@ -221,7 +223,7 @@ cp_convert_to_pointer (tree type, tree expr)
}
else if (TYPE_PTR_TO_MEMBER_P (type) && INTEGRAL_CODE_P (form))
{
- error ("invalid conversion from %qT to %qT", intype, type);
+ error_at (loc, "invalid conversion from %qT to %qT", intype, type);
return error_mark_node;
}
@@ -242,8 +244,8 @@ cp_convert_to_pointer (tree type, tree expr)
if (type_unknown_p (expr))
return instantiate_type (type, expr, tf_warning_or_error);
- error ("cannot convert %qE from type %qT to type %qT",
- expr, intype, type);
+ error_at (loc, "cannot convert %qE from type %qT to type %qT",
+ expr, intype, type);
return error_mark_node;
}
@@ -367,7 +369,7 @@ build_up_reference (tree type, tree arg, int flags, tree decl)
non-volatile const type. */
static void
-warn_ref_binding (tree reftype, tree intype, tree decl)
+warn_ref_binding (location_t loc, tree reftype, tree intype, tree decl)
{
tree ttl = TREE_TYPE (reftype);
@@ -388,7 +390,7 @@ warn_ref_binding (tree reftype, tree intype, tree decl)
msg = G_("conversion to non-const reference type %q#T from "
"rvalue of type %qT");
- permerror (input_location, msg, reftype, intype);
+ permerror (loc, msg, reftype, intype);
}
}
@@ -410,6 +412,7 @@ convert_to_reference (tree reftype, tree expr, int convtype,
bool can_convert_intype_to_type;
tsubst_flags_t complain = ((flags & LOOKUP_COMPLAIN)
? tf_warning_or_error : tf_none);
+ location_t loc = EXPR_LOC_OR_HERE (expr);
if (TREE_CODE (type) == FUNCTION_TYPE
&& TREE_TYPE (expr) == unknown_type_node)
@@ -455,11 +458,11 @@ convert_to_reference (tree reftype, tree expr, int convtype,
tree ttr = lvalue_type (expr);
if (! real_lvalue_p (expr))
- warn_ref_binding (reftype, intype, decl);
+ warn_ref_binding (loc, reftype, intype, decl);
if (! (convtype & CONV_CONST)
&& !at_least_as_qualified_p (ttl, ttr))
- permerror (input_location, "conversion from %qT to %qT discards qualifiers",
+ permerror (loc, "conversion from %qT to %qT discards qualifiers",
ttr, reftype);
}
@@ -477,8 +480,8 @@ convert_to_reference (tree reftype, tree expr, int convtype,
if (TREE_CODE (intype) == POINTER_TYPE
&& (comptypes (TREE_TYPE (intype), type,
COMPARE_BASE | COMPARE_DERIVED)))
- warning (0, "casting %qT to %qT does not dereference pointer",
- intype, reftype);
+ warning_at (loc, 0, "casting %qT to %qT does not dereference pointer",
+ intype, reftype);
rval = cp_build_addr_expr (expr, tf_warning_or_error);
if (rval != error_mark_node)
@@ -494,7 +497,7 @@ convert_to_reference (tree reftype, tree expr, int convtype,
tf_warning_or_error);
if (rval == NULL_TREE || rval == error_mark_node)
return rval;
- warn_ref_binding (reftype, intype, decl);
+ warn_ref_binding (loc, reftype, intype, decl);
rval = build_up_reference (reftype, rval, flags, decl);
}
@@ -505,7 +508,7 @@ convert_to_reference (tree reftype, tree expr, int convtype,
}
if (flags & LOOKUP_COMPLAIN)
- error ("cannot convert type %qT to type %qT", intype, reftype);
+ error_at (loc, "cannot convert type %qT to type %qT", intype, reftype);
return error_mark_node;
}
@@ -633,6 +636,7 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
enum tree_code code = TREE_CODE (type);
const char *invalid_conv_diag;
tree e1;
+ location_t loc = EXPR_LOC_OR_HERE (expr);
if (error_operand_p (e) || type == error_mark_node)
return error_mark_node;
@@ -711,8 +715,7 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
|| TREE_CODE (intype) == POINTER_TYPE)
{
if (flags & LOOKUP_COMPLAIN)
- permerror (input_location, "conversion from %q#T to %q#T", intype, type);
-
+ permerror (loc, "conversion from %q#T to %q#T", intype, type);
if (!flag_permissive)
return error_mark_node;
}
@@ -726,10 +729,10 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
unspecified. */
if (TREE_CODE (expr) == INTEGER_CST
&& !int_fits_type_p (expr, ENUM_UNDERLYING_TYPE (type)))
- warning (OPT_Wconversion,
- "the result of the conversion is unspecified because "
- "%qE is outside the range of type %qT",
- expr, type);
+ warning_at (loc, OPT_Wconversion,
+ "the result of the conversion is unspecified because "
+ "%qE is outside the range of type %qT",
+ expr, type);
}
if (MAYBE_CLASS_TYPE_P (intype))
{
@@ -738,11 +741,18 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
if (rval)
return rval;
if (flags & LOOKUP_COMPLAIN)
- error ("%q#T used where a %qT was expected", intype, type);
+ error_at (loc, "%q#T used where a %qT was expected", intype, type);
return error_mark_node;
}
if (code == BOOLEAN_TYPE)
{
+ if (TREE_CODE (intype) == VOID_TYPE)
+ {
+ error_at (loc, "could not convert %qE from %<void%> to %<bool%>",
+ expr);
+ return error_mark_node;
+ }
+
/* We can't implicitly convert a scoped enum to bool, so convert
to the underlying type first. */
if (SCOPED_ENUM_P (intype) && (convtype & CONV_STATIC))
@@ -769,7 +779,7 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
if (ret_val)
return ret_val;
if (flags & LOOKUP_COMPLAIN)
- error ("%q#T used where a %qT was expected", in_vtype, type);
+ error_at (loc, "%q#T used where a %qT was expected", in_vtype, type);
return error_mark_node;
}
return fold_if_not_in_template (convert_to_vector (type, e));
@@ -784,7 +794,7 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
return rval;
else
if (flags & LOOKUP_COMPLAIN)
- error ("%q#T used where a floating point value was expected",
+ error_at (loc, "%q#T used where a floating point value was expected",
TREE_TYPE (e));
}
if (code == REAL_TYPE)
@@ -845,8 +855,8 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
if (invalid_nonstatic_memfn_p (expr, tf_warning_or_error))
/* We displayed the error message. */;
else
- error ("conversion from %qT to non-scalar type %qT requested",
- TREE_TYPE (expr), type);
+ error_at (loc, "conversion from %qT to non-scalar type %qT requested",
+ TREE_TYPE (expr), type);
}
return error_mark_node;
}
@@ -873,6 +883,8 @@ ocp_convert (tree type, tree expr, int convtype, int flags)
tree
convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
{
+ location_t loc = EXPR_LOC_OR_HERE (expr);
+
if (expr == error_mark_node
|| TREE_TYPE (expr) == error_mark_node)
return error_mark_node;
@@ -903,7 +915,7 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR)
{
if (complain & tf_error)
- error ("pseudo-destructor is not called");
+ error_at (loc, "pseudo-destructor is not called");
return error_mark_node;
}
if (VOID_TYPE_P (TREE_TYPE (expr)))
@@ -980,35 +992,35 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_CAST:
- warning (0, "conversion to void will not access "
+ warning_at (loc, 0, "conversion to void will not access "
"object of incomplete type %qT", type);
break;
case ICV_SECOND_OF_COND:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"incomplete type %qT in second operand "
"of conditional expression", type);
break;
case ICV_THIRD_OF_COND:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"incomplete type %qT in third operand "
"of conditional expression", type);
break;
case ICV_RIGHT_OF_COMMA:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"incomplete type %qT in right operand of "
"comma operator", type);
break;
case ICV_LEFT_OF_COMMA:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"incomplete type %qT in left operand of "
"comma operator", type);
break;
case ICV_STATEMENT:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"incomplete type %qT in statement", type);
break;
case ICV_THIRD_IN_FOR:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"incomplete type %qT in for increment "
"expression", type);
break;
@@ -1024,37 +1036,37 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_CAST:
- warning (0, "conversion to void will not access "
+ warning_at (loc, 0, "conversion to void will not access "
"object of type %qT", type);
break;
case ICV_SECOND_OF_COND:
- warning (0, "implicit dereference will not access object "
- "of type %qT in second operand of "
+ warning_at (loc, 0, "implicit dereference will not access "
+ "object of type %qT in second operand of "
"conditional expression", type);
break;
case ICV_THIRD_OF_COND:
- warning (0, "implicit dereference will not access object "
- "of type %qT in third operand of "
+ warning_at (loc, 0, "implicit dereference will not access "
+ "object of type %qT in third operand of "
"conditional expression", type);
break;
case ICV_RIGHT_OF_COMMA:
- warning (0, "implicit dereference will not access object "
- "of type %qT in right operand of "
+ warning_at (loc, 0, "implicit dereference will not access "
+ "object of type %qT in right operand of "
"comma operator", type);
break;
case ICV_LEFT_OF_COMMA:
- warning (0, "implicit dereference will not access object "
- "of type %qT in left operand of comma operator",
- type);
+ warning_at (loc, 0, "implicit dereference will not access "
+ "object of type %qT in left operand of comma "
+ "operator", type);
break;
case ICV_STATEMENT:
- warning (0, "implicit dereference will not access object "
- "of type %qT in statement", type);
+ warning_at (loc, 0, "implicit dereference will not access "
+ "object of type %qT in statement", type);
break;
case ICV_THIRD_IN_FOR:
- warning (0, "implicit dereference will not access object "
- "of type %qT in for increment expression",
- type);
+ warning_at (loc, 0, "implicit dereference will not access "
+ "object of type %qT in for increment expression",
+ type);
break;
default:
gcc_unreachable ();
@@ -1066,37 +1078,37 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_CAST:
- warning (0, "conversion to void will not access "
+ warning_at (loc, 0, "conversion to void will not access "
"object of non-trivially-copyable type %qT",
- type);
+ type);
break;
case ICV_SECOND_OF_COND:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"non-trivially-copyable type %qT in second "
"operand of conditional expression", type);
break;
case ICV_THIRD_OF_COND:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"non-trivially-copyable type %qT in third "
"operand of conditional expression", type);
break;
case ICV_RIGHT_OF_COMMA:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"non-trivially-copyable type %qT in right "
"operand of comma operator", type);
break;
case ICV_LEFT_OF_COMMA:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"non-trivially-copyable type %qT in left "
"operand of comma operator", type);
break;
case ICV_STATEMENT:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"non-trivially-copyable type %qT in statement",
- type);
+ type);
break;
case ICV_THIRD_IN_FOR:
- warning (0, "indirection will not access object of "
+ warning_at (loc, 0, "indirection will not access object of "
"non-trivially-copyable type %qT in for "
"increment expression", type);
break;
@@ -1117,7 +1129,7 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
&& (complain & tf_warning)
&& !TREE_NO_WARNING (expr)
&& !is_reference)
- warning (OPT_Wunused_value, "value computed is not used");
+ warning_at (loc, OPT_Wunused_value, "value computed is not used");
expr = TREE_OPERAND (expr, 0);
}
@@ -1134,37 +1146,37 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_CAST:
- warning (0, "conversion to void will not access "
+ warning_at (loc, 0, "conversion to void will not access "
"object %qE of incomplete type %qT", expr, type);
break;
case ICV_SECOND_OF_COND:
- warning (0, "variable %qE of incomplete type %qT will not "
- "be accessed in second operand of "
+ warning_at (loc, 0, "variable %qE of incomplete type %qT will "
+ "not be accessed in second operand of "
"conditional expression", expr, type);
break;
case ICV_THIRD_OF_COND:
- warning (0, "variable %qE of incomplete type %qT will not "
- "be accessed in third operand of "
+ warning_at (loc, 0, "variable %qE of incomplete type %qT will "
+ "not be accessed in third operand of "
"conditional expression", expr, type);
break;
case ICV_RIGHT_OF_COMMA:
- warning (0, "variable %qE of incomplete type %qT will not "
- "be accessed in right operand of comma operator",
- expr, type);
+ warning_at (loc, 0, "variable %qE of incomplete type %qT will "
+ "not be accessed in right operand of comma operator",
+ expr, type);
break;
case ICV_LEFT_OF_COMMA:
- warning (0, "variable %qE of incomplete type %qT will not "
- "be accessed in left operand of comma operator",
- expr, type);
+ warning_at (loc, 0, "variable %qE of incomplete type %qT will "
+ "not be accessed in left operand of comma operator",
+ expr, type);
break;
case ICV_STATEMENT:
- warning (0, "variable %qE of incomplete type %qT will not "
- "be accessed in statement", expr, type);
+ warning_at (loc, 0, "variable %qE of incomplete type %qT will "
+ "not be accessed in statement", expr, type);
break;
case ICV_THIRD_IN_FOR:
- warning (0, "variable %qE of incomplete type %qT will not "
- "be accessed in for increment expression",
- expr, type);
+ warning_at (loc, 0, "variable %qE of incomplete type %qT will "
+ "not be accessed in for increment expression",
+ expr, type);
break;
default:
gcc_unreachable ();
@@ -1211,32 +1223,32 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_CAST:
- error ("conversion to void "
- "cannot resolve address of overloaded function");
+ error_at (loc, "conversion to void "
+ "cannot resolve address of overloaded function");
break;
case ICV_SECOND_OF_COND:
- error ("second operand of conditional expression "
- "cannot resolve address of overloaded function");
+ error_at (loc, "second operand of conditional expression "
+ "cannot resolve address of overloaded function");
break;
case ICV_THIRD_OF_COND:
- error ("third operand of conditional expression "
- "cannot resolve address of overloaded function");
+ error_at (loc, "third operand of conditional expression "
+ "cannot resolve address of overloaded function");
break;
case ICV_RIGHT_OF_COMMA:
- error ("right operand of comma operator "
- "cannot resolve address of overloaded function");
+ error_at (loc, "right operand of comma operator "
+ "cannot resolve address of overloaded function");
break;
case ICV_LEFT_OF_COMMA:
- error ("left operand of comma operator "
- "cannot resolve address of overloaded function");
+ error_at (loc, "left operand of comma operator "
+ "cannot resolve address of overloaded function");
break;
case ICV_STATEMENT:
- error ("statement "
- "cannot resolve address of overloaded function");
+ error_at (loc, "statement "
+ "cannot resolve address of overloaded function");
break;
case ICV_THIRD_IN_FOR:
- error ("for increment expression "
- "cannot resolve address of overloaded function");
+ error_at (loc, "for increment expression "
+ "cannot resolve address of overloaded function");
break;
}
else
@@ -1250,34 +1262,34 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_SECOND_OF_COND:
- warning (OPT_Waddress,
- "second operand of conditional expression "
- "is a reference, not call, to function %qE", expr);
+ warning_at (loc, OPT_Waddress,
+ "second operand of conditional expression "
+ "is a reference, not call, to function %qE", expr);
break;
case ICV_THIRD_OF_COND:
- warning (OPT_Waddress,
- "third operand of conditional expression "
- "is a reference, not call, to function %qE", expr);
+ warning_at (loc, OPT_Waddress,
+ "third operand of conditional expression "
+ "is a reference, not call, to function %qE", expr);
break;
case ICV_RIGHT_OF_COMMA:
- warning (OPT_Waddress,
- "right operand of comma operator "
- "is a reference, not call, to function %qE", expr);
+ warning_at (loc, OPT_Waddress,
+ "right operand of comma operator "
+ "is a reference, not call, to function %qE", expr);
break;
case ICV_LEFT_OF_COMMA:
- warning (OPT_Waddress,
- "left operand of comma operator "
- "is a reference, not call, to function %qE", expr);
+ warning_at (loc, OPT_Waddress,
+ "left operand of comma operator "
+ "is a reference, not call, to function %qE", expr);
break;
case ICV_STATEMENT:
- warning (OPT_Waddress,
- "statement is a reference, not call, to function %qE",
- expr);
+ warning_at (loc, OPT_Waddress,
+ "statement is a reference, not call, to function %qE",
+ expr);
break;
case ICV_THIRD_IN_FOR:
- warning (OPT_Waddress,
- "for increment expression "
- "is a reference, not call, to function %qE", expr);
+ warning_at (loc, OPT_Waddress,
+ "for increment expression "
+ "is a reference, not call, to function %qE", expr);
break;
default:
gcc_unreachable ();
@@ -1302,28 +1314,30 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
switch (implicit)
{
case ICV_SECOND_OF_COND:
- warning (OPT_Wunused_value,
- "second operand of conditional expression has no effect");
+ warning_at (loc, OPT_Wunused_value,
+ "second operand of conditional expression "
+ "has no effect");
break;
case ICV_THIRD_OF_COND:
- warning (OPT_Wunused_value,
- "third operand of conditional expression has no effect");
+ warning_at (loc, OPT_Wunused_value,
+ "third operand of conditional expression "
+ "has no effect");
break;
case ICV_RIGHT_OF_COMMA:
- warning (OPT_Wunused_value,
- "right operand of comma operator has no effect");
+ warning_at (loc, OPT_Wunused_value,
+ "right operand of comma operator has no effect");
break;
case ICV_LEFT_OF_COMMA:
- warning (OPT_Wunused_value,
- "left operand of comma operator has no effect");
+ warning_at (loc, OPT_Wunused_value,
+ "left operand of comma operator has no effect");
break;
case ICV_STATEMENT:
- warning (OPT_Wunused_value,
- "statement has no effect");
+ warning_at (loc, OPT_Wunused_value,
+ "statement has no effect");
break;
case ICV_THIRD_IN_FOR:
- warning (OPT_Wunused_value,
- "for increment expression has no effect");
+ warning_at (loc, OPT_Wunused_value,
+ "for increment expression has no effect");
break;
default:
gcc_unreachable ();
@@ -1361,7 +1375,7 @@ convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
|| code == POSTDECREMENT_EXPR
|| code == POSTINCREMENT_EXPR)))
&& (complain & tf_warning))
- warning (OPT_Wunused_value, "value computed is not used");
+ warning_at (loc, OPT_Wunused_value, "value computed is not used");
}
}
expr = build1 (CONVERT_EXPR, void_type_node, expr);
diff --git a/gcc/cp/cxx-pretty-print.c b/gcc/cp/cxx-pretty-print.c
index 3d10afb837c..cb7922f04c6 100644
--- a/gcc/cp/cxx-pretty-print.c
+++ b/gcc/cp/cxx-pretty-print.c
@@ -1261,6 +1261,7 @@ pp_cxx_simple_type_specifier (cxx_pretty_printer *pp, tree t)
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
+ case BOUND_TEMPLATE_TEMPLATE_PARM:
pp_cxx_unqualified_id (pp, t);
break;
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 40818a3f3ff..0e833b1b0e3 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -9672,12 +9672,12 @@ grokdeclarator (const cp_declarator *declarator,
error ("non-parameter %qs cannot be a parameter pack", name);
}
- /* Did array size calculations overflow? */
-
+ /* Did array size calculations overflow or does the array cover more
+ than half of the address-space? */
if (TREE_CODE (type) == ARRAY_TYPE
&& COMPLETE_TYPE_P (type)
&& TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST
- && TREE_OVERFLOW (TYPE_SIZE_UNIT (type)))
+ && ! valid_constant_size_p (TYPE_SIZE_UNIT (type)))
{
error ("size of array %qs is too large", name);
/* If we proceed with the array type as it is, we'll eventually
@@ -10619,7 +10619,7 @@ check_default_argument (tree decl, tree arg)
if (warn_zero_as_null_pointer_constant
&& c_inhibit_evaluation_warnings == 0
- && (POINTER_TYPE_P (decl_type) || TYPE_PTR_TO_MEMBER_P (decl_type))
+ && (TYPE_PTR_P (decl_type) || TYPE_PTR_TO_MEMBER_P (decl_type))
&& null_ptr_cst_p (arg)
&& !NULLPTR_TYPE_P (TREE_TYPE (arg)))
{
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index 5d1f8de4ed9..b0544bbb91d 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -336,7 +336,7 @@ grokclassfn (tree ctype, tree function, enum overload_flags flags)
along the way. */
tree
-grok_array_decl (tree array_expr, tree index_exp)
+grok_array_decl (location_t loc, tree array_expr, tree index_exp)
{
tree type;
tree expr;
@@ -362,7 +362,7 @@ grok_array_decl (tree array_expr, tree index_exp)
/* If they have an `operator[]', use that. */
if (MAYBE_CLASS_TYPE_P (type) || MAYBE_CLASS_TYPE_P (TREE_TYPE (index_exp)))
- expr = build_new_op (ARRAY_REF, LOOKUP_NORMAL,
+ expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL,
array_expr, index_exp, NULL_TREE,
/*overload=*/NULL, tf_warning_or_error);
else
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 0c423536dd7..20597fddc8c 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -5850,6 +5850,7 @@ cp_parser_postfix_open_square_expression (cp_parser *parser,
bool for_offsetof)
{
tree index;
+ location_t loc = cp_lexer_peek_token (parser->lexer)->location;
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
@@ -5880,7 +5881,7 @@ cp_parser_postfix_open_square_expression (cp_parser *parser,
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Build the ARRAY_REF. */
- postfix_expression = grok_array_decl (postfix_expression, index);
+ postfix_expression = grok_array_decl (loc, postfix_expression, index);
/* When not doing offsetof, array references are not permitted in
constant-expressions. */
@@ -5918,7 +5919,7 @@ cp_parser_postfix_dot_deref_expression (cp_parser *parser,
/* If this is a `->' operator, dereference the pointer. */
if (token_type == CPP_DEREF)
- postfix_expression = build_x_arrow (postfix_expression,
+ postfix_expression = build_x_arrow (location, postfix_expression,
tf_warning_or_error);
/* Check to see whether or not the expression is type-dependent. */
dependent_p = type_dependent_expression_p (postfix_expression);
@@ -6435,7 +6436,8 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* Create the complete representation. */
- return build_x_unary_op ((keyword == RID_REALPART
+ return build_x_unary_op (token->location,
+ (keyword == RID_REALPART
? REALPART_EXPR : IMAGPART_EXPR),
expression,
tf_warning_or_error);
@@ -6531,7 +6533,7 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
{
tree identifier;
tree expression;
- location_t loc = cp_lexer_peek_token (parser->lexer)->location;
+ location_t loc = token->location;
/* Consume the '&&' token. */
cp_lexer_consume_token (parser->lexer);
@@ -6550,6 +6552,7 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
tree cast_expression;
tree expression = error_mark_node;
non_integral_constant non_constant_p = NIC_NONE;
+ location_t loc = token->location;
/* Consume the operator token. */
token = cp_lexer_consume_token (parser->lexer);
@@ -6563,7 +6566,8 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
{
case INDIRECT_REF:
non_constant_p = NIC_STAR;
- expression = build_x_indirect_ref (cast_expression, RO_UNARY_STAR,
+ expression = build_x_indirect_ref (loc, cast_expression,
+ RO_UNARY_STAR,
tf_warning_or_error);
break;
@@ -6571,7 +6575,8 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
non_constant_p = NIC_ADDR;
/* Fall through. */
case BIT_NOT_EXPR:
- expression = build_x_unary_op (unary_operator, cast_expression,
+ expression = build_x_unary_op (loc, unary_operator,
+ cast_expression,
tf_warning_or_error);
break;
@@ -6583,7 +6588,8 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
case UNARY_PLUS_EXPR:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
- expression = finish_unary_op_expr (unary_operator, cast_expression);
+ expression = finish_unary_op_expr (loc, unary_operator,
+ cast_expression);
break;
default:
@@ -7271,6 +7277,7 @@ cp_parser_binary_expression (cp_parser* parser, bool cast_p,
cp_parser_expression_stack_entry *sp = &stack[0];
tree lhs, rhs;
cp_token *token;
+ location_t loc;
enum tree_code tree_type, lhs_type, rhs_type;
enum cp_parser_prec new_prec, lookahead_prec;
tree overload;
@@ -7283,16 +7290,15 @@ cp_parser_binary_expression (cp_parser* parser, bool cast_p,
{
/* Get an operator token. */
token = cp_lexer_peek_token (parser->lexer);
+ loc = token->location;
if (warn_cxx0x_compat
&& token->type == CPP_RSHIFT
&& !parser->greater_than_is_operator_p)
{
- if (warning_at (token->location, OPT_Wc__0x_compat,
- "%<>>%> operator is treated as"
- " two right angle brackets in C++11"))
- inform (token->location,
- "suggest parentheses around %<>>%> expression");
+ if (warning_at (loc, OPT_Wc__0x_compat, "%<>>%> operator is treated"
+ " as two right angle brackets in C++11"))
+ inform (loc, "suggest parentheses around %<>>%> expression");
}
new_prec = TOKEN_PRECEDENCE (token);
@@ -7390,7 +7396,7 @@ cp_parser_binary_expression (cp_parser* parser, bool cast_p,
&& TREE_CODE_CLASS (tree_type) == tcc_comparison)
lhs = build2 (tree_type, boolean_type_node, lhs, rhs);
else
- lhs = build_x_binary_op (tree_type, lhs, lhs_type, rhs, rhs_type,
+ lhs = build_x_binary_op (loc, tree_type, lhs, lhs_type, rhs, rhs_type,
&overload, tf_warning_or_error);
lhs_type = tree_type;
@@ -7798,7 +7804,7 @@ cp_parser_builtin_offsetof (cp_parser *parser)
case CPP_DEREF:
/* offsetof-member-designator "->" identifier */
- expr = grok_array_decl (expr, integer_zero_node);
+ expr = grok_array_decl (token->location, expr, integer_zero_node);
/* FALLTHRU */
case CPP_DOT:
@@ -9407,7 +9413,7 @@ do_range_for_auto_deduction (tree decl, tree range_expr)
iter_type = (cp_parser_perform_range_for_lookup
(range_temp, &begin_dummy, &end_dummy));
iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE, iter_type);
- iter_decl = build_x_indirect_ref (iter_decl, RO_NULL,
+ iter_decl = build_x_indirect_ref (input_location, iter_decl, RO_NULL,
tf_warning_or_error);
TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl),
iter_decl, auto_node);
@@ -9495,19 +9501,21 @@ cp_convert_range_for (tree statement, tree range_decl, tree range_expr)
finish_for_init_stmt (statement);
/* The new for condition. */
- condition = build_x_binary_op (NE_EXPR,
+ condition = build_x_binary_op (input_location, NE_EXPR,
begin, ERROR_MARK,
end, ERROR_MARK,
NULL, tf_warning_or_error);
finish_for_cond (condition, statement);
/* The new increment expression. */
- expression = finish_unary_op_expr (PREINCREMENT_EXPR, begin);
+ expression = finish_unary_op_expr (input_location,
+ PREINCREMENT_EXPR, begin);
finish_for_expr (expression, statement);
/* The declaration is initialized with *__begin inside the loop body. */
cp_finish_decl (range_decl,
- build_x_indirect_ref (begin, RO_NULL, tf_warning_or_error),
+ build_x_indirect_ref (input_location, begin, RO_NULL,
+ tf_warning_or_error),
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
@@ -12858,6 +12866,7 @@ cp_parser_template_argument (cp_parser* parser)
bool address_p;
bool maybe_type_id = false;
cp_token *token = NULL, *argument_start_token = NULL;
+ location_t loc = 0;
cp_id_kind idk;
/* There's really no way to know what we're looking at, so we just
@@ -12973,7 +12982,10 @@ cp_parser_template_argument (cp_parser* parser)
object or function with external linkage. */
address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND);
if (address_p)
- cp_lexer_consume_token (parser->lexer);
+ {
+ loc = cp_lexer_peek_token (parser->lexer)->location;
+ cp_lexer_consume_token (parser->lexer);
+ }
/* See if we might have an id-expression. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
@@ -13033,8 +13045,8 @@ cp_parser_template_argument (cp_parser* parser)
if (cp_parser_parse_definitely (parser))
{
if (address_p)
- argument = build_x_unary_op (ADDR_EXPR, argument,
- tf_warning_or_error);
+ argument = build_x_unary_op (loc, ADDR_EXPR, argument,
+ tf_warning_or_error);
return argument;
}
}
@@ -26062,7 +26074,7 @@ cp_parser_omp_for_cond (cp_parser *parser, tree decl)
|| CLASS_TYPE_P (TREE_TYPE (decl))))
return cond;
- return build_x_binary_op (TREE_CODE (cond),
+ return build_x_binary_op (input_location, TREE_CODE (cond),
TREE_OPERAND (cond, 0), ERROR_MARK,
TREE_OPERAND (cond, 1), ERROR_MARK,
/*overload=*/NULL, tf_warning_or_error);
@@ -26138,11 +26150,12 @@ cp_parser_omp_for_incr (cp_parser *parser, tree decl)
if (op == PLUS_EXPR)
lhs = rhs;
else
- lhs = build_x_unary_op (NEGATE_EXPR, rhs, tf_warning_or_error);
+ lhs = build_x_unary_op (input_location, NEGATE_EXPR, rhs,
+ tf_warning_or_error);
}
else
- lhs = build_x_binary_op (op, lhs, ERROR_MARK, rhs, ERROR_MARK,
- NULL, tf_warning_or_error);
+ lhs = build_x_binary_op (input_location, op, lhs, ERROR_MARK, rhs,
+ ERROR_MARK, NULL, tf_warning_or_error);
}
}
while (token->type == CPP_PLUS || token->type == CPP_MINUS);
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index b720d4a3161..a506a8481d0 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -8950,10 +8950,15 @@ instantiate_class_template_1 (tree type)
/* Build new TYPE_FIELDS. */
if (TREE_CODE (t) == STATIC_ASSERT)
{
- tree condition =
- tsubst_expr (STATIC_ASSERT_CONDITION (t), args,
- tf_warning_or_error, NULL_TREE,
- /*integral_constant_expression_p=*/true);
+ tree condition;
+
+ ++c_inhibit_evaluation_warnings;
+ condition =
+ tsubst_expr (STATIC_ASSERT_CONDITION (t), args,
+ tf_warning_or_error, NULL_TREE,
+ /*integral_constant_expression_p=*/true);
+ --c_inhibit_evaluation_warnings;
+
finish_static_assert (condition,
STATIC_ASSERT_MESSAGE (t),
STATIC_ASSERT_SOURCE_LOCATION (t),
@@ -12061,7 +12066,7 @@ tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl)
case PARM_DECL:
r = retrieve_local_specialization (t);
- if (r == NULL)
+ if (r == NULL_TREE)
{
tree c;
@@ -12079,6 +12084,8 @@ tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl)
not the following PARM_DECLs that are chained to T. */
c = copy_node (t);
r = tsubst_decl (c, args, complain);
+ if (r == NULL_TREE)
+ return error_mark_node;
/* Give it the template pattern as its context; its true context
hasn't been instantiated yet and this is good enough for
mangling. */
@@ -13110,11 +13117,16 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
case STATIC_ASSERT:
{
- tree condition =
+ tree condition;
+
+ ++c_inhibit_evaluation_warnings;
+ condition =
tsubst_expr (STATIC_ASSERT_CONDITION (t),
args,
complain, in_decl,
/*integral_constant_expression_p=*/true);
+ --c_inhibit_evaluation_warnings;
+
finish_static_assert (condition,
STATIC_ASSERT_MESSAGE (t),
STATIC_ASSERT_SOURCE_LOCATION (t),
@@ -13456,7 +13468,7 @@ tsubst_copy_and_build (tree t,
r = convert_from_reference (r);
}
else
- r = build_x_indirect_ref (r, RO_UNARY_STAR, complain);
+ r = build_x_indirect_ref (input_location, r, RO_UNARY_STAR, complain);
return r;
}
@@ -13533,7 +13545,7 @@ tsubst_copy_and_build (tree t,
case POSTINCREMENT_EXPR:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
- return build_x_unary_op (TREE_CODE (t), op1, complain);
+ return build_x_unary_op (input_location, TREE_CODE (t), op1, complain);
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
@@ -13544,8 +13556,8 @@ tsubst_copy_and_build (tree t,
case UNARY_PLUS_EXPR: /* Unary + */
case REALPART_EXPR:
case IMAGPART_EXPR:
- return build_x_unary_op (TREE_CODE (t), RECUR (TREE_OPERAND (t, 0)),
- complain);
+ return build_x_unary_op (input_location, TREE_CODE (t),
+ RECUR (TREE_OPERAND (t, 0)), complain);
case FIX_TRUNC_EXPR:
return cp_build_unary_op (FIX_TRUNC_EXPR, RECUR (TREE_OPERAND (t, 0)),
@@ -13562,7 +13574,7 @@ tsubst_copy_and_build (tree t,
else
op1 = tsubst_non_call_postfix_expression (op1, args, complain,
in_decl);
- return build_x_unary_op (ADDR_EXPR, op1, complain);
+ return build_x_unary_op (input_location, ADDR_EXPR, op1, complain);
case PLUS_EXPR:
case MINUS_EXPR:
@@ -13597,7 +13609,7 @@ tsubst_copy_and_build (tree t,
case DOTSTAR_EXPR:
{
tree r = build_x_binary_op
- (TREE_CODE (t),
+ (input_location, TREE_CODE (t),
RECUR (TREE_OPERAND (t, 0)),
(TREE_NO_WARNING (TREE_OPERAND (t, 0))
? ERROR_MARK
@@ -13701,7 +13713,7 @@ tsubst_copy_and_build (tree t,
/* Remember that there was a reference to this entity. */
if (DECL_P (op1))
mark_used (op1);
- return build_x_arrow (op1, complain);
+ return build_x_arrow (input_location, op1, complain);
case NEW_EXPR:
{
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 90378dc4122..9447787f002 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -2224,7 +2224,7 @@ finish_call_expr (tree fn, VEC(tree,gc) **args, bool disallow_virtual,
tree
finish_increment_expr (tree expr, enum tree_code code)
{
- return build_x_unary_op (code, expr, tf_warning_or_error);
+ return build_x_unary_op (input_location, code, expr, tf_warning_or_error);
}
/* Finish a use of `this'. Returns an expression for `this'. */
@@ -2318,9 +2318,9 @@ finish_pseudo_destructor_expr (tree object, tree scope, tree destructor)
/* Finish an expression of the form CODE EXPR. */
tree
-finish_unary_op_expr (enum tree_code code, tree expr)
+finish_unary_op_expr (location_t loc, enum tree_code code, tree expr)
{
- tree result = build_x_unary_op (code, expr, tf_warning_or_error);
+ tree result = build_x_unary_op (loc, code, expr, tf_warning_or_error);
if (TREE_OVERFLOW_P (result) && !TREE_OVERFLOW_P (expr))
overflow_warning (input_location, result);
@@ -4456,7 +4456,8 @@ handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv,
cond = error_mark_node;
else
{
- tree tem = build_x_binary_op (TREE_CODE (cond), iter, ERROR_MARK,
+ tree tem = build_x_binary_op (input_location, TREE_CODE (cond),
+ iter, ERROR_MARK,
TREE_OPERAND (cond, 1), ERROR_MARK,
NULL, tf_warning_or_error);
if (error_operand_p (tem))
@@ -4472,7 +4473,7 @@ handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv,
error_at (elocus, "invalid controlling predicate");
return true;
}
- diff = build_x_binary_op (MINUS_EXPR, TREE_OPERAND (cond, 1),
+ diff = build_x_binary_op (input_location, MINUS_EXPR, TREE_OPERAND (cond, 1),
ERROR_MARK, iter, ERROR_MARK, NULL,
tf_warning_or_error);
if (error_operand_p (diff))
@@ -4495,7 +4496,7 @@ handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv,
incr = error_mark_node;
break;
}
- iter_incr = build_x_unary_op (TREE_CODE (incr), iter,
+ iter_incr = build_x_unary_op (input_location, TREE_CODE (incr), iter,
tf_warning_or_error);
if (error_operand_p (iter_incr))
return true;
@@ -4545,7 +4546,7 @@ handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv,
incr = error_mark_node;
else
{
- iter_incr = build_x_binary_op (PLUS_EXPR,
+ iter_incr = build_x_binary_op (input_location, PLUS_EXPR,
TREE_OPERAND (rhs, 0),
ERROR_MARK, iter,
ERROR_MARK, NULL,
@@ -7757,18 +7758,16 @@ cxx_eval_constant_expression (const constexpr_call *call, tree t,
case NOP_EXPR:
{
tree oldop = TREE_OPERAND (t, 0);
- tree op = oldop;
- tree to = TREE_TYPE (t);
- op = cxx_eval_constant_expression (call, TREE_OPERAND (t, 0),
- allow_non_constant, addr,
- non_constant_p);
+ tree op = cxx_eval_constant_expression (call, oldop,
+ allow_non_constant, addr,
+ non_constant_p);
if (*non_constant_p)
return t;
if (op == oldop)
/* We didn't fold at the top so we could check for ptr-int
conversion. */
return fold (t);
- r = fold_build1 (TREE_CODE (t), to, op);
+ r = fold_build1 (TREE_CODE (t), TREE_TYPE (t), op);
/* Conversion of an out-of-range value has implementation-defined
behavior; the language considers it different from arithmetic
overflow, which is undefined. */
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index 96a403fce6b..918b9ccd55a 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -2598,8 +2598,8 @@ maybe_dummy_object (tree type, tree* binfop)
&& context == nonlambda_method_basetype ())
/* In a lambda, need to go through 'this' capture. */
decl = (build_x_indirect_ref
- ((lambda_expr_this_capture
- (CLASSTYPE_LAMBDA_EXPR (current_class_type))),
+ (input_location, (lambda_expr_this_capture
+ (CLASSTYPE_LAMBDA_EXPR (current_class_type))),
RO_NULL, tf_warning_or_error));
else
decl = build_dummy_object (context);
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index b59741c6471..7eed7548562 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -1822,6 +1822,7 @@ decay_conversion (tree exp, tsubst_flags_t complain)
{
tree type;
enum tree_code code;
+ location_t loc = EXPR_LOC_OR_HERE (exp);
type = TREE_TYPE (exp);
if (type == error_mark_node)
@@ -1853,7 +1854,7 @@ decay_conversion (tree exp, tsubst_flags_t complain)
if (code == VOID_TYPE)
{
if (complain & tf_error)
- error ("void value not ignored as it ought to be");
+ error_at (loc, "void value not ignored as it ought to be");
return error_mark_node;
}
if (invalid_nonstatic_memfn_p (exp, complain))
@@ -1882,7 +1883,7 @@ decay_conversion (tree exp, tsubst_flags_t complain)
&& ! (TREE_CODE (exp) == CONSTRUCTOR && TREE_STATIC (exp)))
{
if (complain & tf_error)
- error ("invalid use of non-lvalue array");
+ error_at (loc, "invalid use of non-lvalue array");
return error_mark_node;
}
@@ -2060,7 +2061,8 @@ rationalize_conditional_expr (enum tree_code code, tree t,
gcc_assert (!TREE_SIDE_EFFECTS (op0)
&& !TREE_SIDE_EFFECTS (op1));
return
- build_conditional_expr (build_x_binary_op ((TREE_CODE (t) == MIN_EXPR
+ build_conditional_expr (build_x_binary_op (input_location,
+ (TREE_CODE (t) == MIN_EXPR
? LE_EXPR : GE_EXPR),
op0, TREE_CODE (op0),
op1, TREE_CODE (op1),
@@ -2730,7 +2732,7 @@ build_ptrmemfunc_access_expr (tree ptrmem, tree member_name)
Must also handle REFERENCE_TYPEs for C++. */
tree
-build_x_indirect_ref (tree expr, ref_operator errorstring,
+build_x_indirect_ref (location_t loc, tree expr, ref_operator errorstring,
tsubst_flags_t complain)
{
tree orig_expr = expr;
@@ -2746,8 +2748,8 @@ build_x_indirect_ref (tree expr, ref_operator errorstring,
expr = build_non_dependent_expr (expr);
}
- rval = build_new_op (INDIRECT_REF, LOOKUP_NORMAL, expr, NULL_TREE,
- NULL_TREE, /*overload=*/NULL, complain);
+ rval = build_new_op (loc, INDIRECT_REF, LOOKUP_NORMAL, expr,
+ NULL_TREE, NULL_TREE, /*overload=*/NULL, complain);
if (!rval)
rval = cp_build_indirect_ref (expr, errorstring, complain);
@@ -3580,8 +3582,9 @@ convert_arguments (tree typelist, VEC(tree,gc) **values, tree fndecl,
ARG2_CODE as ERROR_MARK. */
tree
-build_x_binary_op (enum tree_code code, tree arg1, enum tree_code arg1_code,
- tree arg2, enum tree_code arg2_code, tree *overload,
+build_x_binary_op (location_t loc, enum tree_code code, tree arg1,
+ enum tree_code arg1_code, tree arg2,
+ enum tree_code arg2_code, tree *overload,
tsubst_flags_t complain)
{
tree orig_arg1;
@@ -3603,7 +3606,7 @@ build_x_binary_op (enum tree_code code, tree arg1, enum tree_code arg1_code,
if (code == DOTSTAR_EXPR)
expr = build_m_component_ref (arg1, arg2, complain);
else
- expr = build_new_op (code, LOOKUP_NORMAL, arg1, arg2, NULL_TREE,
+ expr = build_new_op (loc, code, LOOKUP_NORMAL, arg1, arg2, NULL_TREE,
overload, complain);
/* Check for cases such as x+y<<z which users are likely to
@@ -3643,8 +3646,8 @@ build_x_array_ref (tree arg1, tree arg2, tsubst_flags_t complain)
arg2 = build_non_dependent_expr (arg2);
}
- expr = build_new_op (ARRAY_REF, LOOKUP_NORMAL, arg1, arg2, NULL_TREE,
- /*overload=*/NULL, complain);
+ expr = build_new_op (input_location, ARRAY_REF, LOOKUP_NORMAL, arg1,
+ arg2, NULL_TREE, /*overload=*/NULL, complain);
if (processing_template_decl && expr != error_mark_node)
return build_min_non_dep (ARRAY_REF, expr, orig_arg1, orig_arg2,
@@ -4081,7 +4084,8 @@ cp_build_binary_op (location_t location,
if (TREE_CODE (op0) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op0, 0)))
{
- if (complain & tf_warning)
+ if ((complain & tf_warning)
+ && c_inhibit_evaluation_warnings == 0)
warning (OPT_Waddress, "the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
}
@@ -4093,7 +4097,8 @@ cp_build_binary_op (location_t location,
if (TREE_CODE (op1) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op1, 0)))
{
- if (complain & tf_warning)
+ if ((complain & tf_warning)
+ && c_inhibit_evaluation_warnings == 0)
warning (OPT_Waddress, "the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
}
@@ -4657,7 +4662,8 @@ pointer_diff (tree op0, tree op1, tree ptrtype)
and XARG is the operand. */
tree
-build_x_unary_op (enum tree_code code, tree xarg, tsubst_flags_t complain)
+build_x_unary_op (location_t loc, enum tree_code code, tree xarg,
+ tsubst_flags_t complain)
{
tree orig_expr = xarg;
tree exp;
@@ -4688,8 +4694,8 @@ build_x_unary_op (enum tree_code code, tree xarg, tsubst_flags_t complain)
|| (TREE_CODE (xarg) == OFFSET_REF)))
/* Don't look for a function. */;
else
- exp = build_new_op (code, LOOKUP_NORMAL, xarg, NULL_TREE, NULL_TREE,
- /*overload=*/NULL, complain);
+ exp = build_new_op (loc, code, LOOKUP_NORMAL, xarg, NULL_TREE,
+ NULL_TREE, /*overload=*/NULL, complain);
if (!exp && code == ADDR_EXPR)
{
if (is_overloaded_fn (xarg))
@@ -5718,8 +5724,8 @@ build_x_compound_expr (tree op1, tree op2, tsubst_flags_t complain)
op2 = build_non_dependent_expr (op2);
}
- result = build_new_op (COMPOUND_EXPR, LOOKUP_NORMAL, op1, op2, NULL_TREE,
- /*overload=*/NULL, complain);
+ result = build_new_op (input_location, COMPOUND_EXPR, LOOKUP_NORMAL,
+ op1, op2, NULL_TREE, /*overload=*/NULL, complain);
if (!result)
result = cp_build_compound_expr (op1, op2, complain);
@@ -6907,9 +6913,9 @@ cp_build_modify_expr (tree lhs, enum tree_code modifycode, tree rhs,
/* Do the default thing. */;
else
{
- result = build_new_op (MODIFY_EXPR, LOOKUP_NORMAL,
- lhs, rhs, make_node (NOP_EXPR),
- /*overload=*/NULL,
+ result = build_new_op (input_location, MODIFY_EXPR,
+ LOOKUP_NORMAL, lhs, rhs,
+ make_node (NOP_EXPR), /*overload=*/NULL,
complain);
if (result == NULL_TREE)
return error_mark_node;
@@ -7109,10 +7115,9 @@ build_x_modify_expr (tree lhs, enum tree_code modifycode, tree rhs,
if (modifycode != NOP_EXPR)
{
- tree rval = build_new_op (MODIFY_EXPR, LOOKUP_NORMAL, lhs, rhs,
- make_node (modifycode),
- /*overload=*/NULL,
- complain);
+ tree rval = build_new_op (input_location, MODIFY_EXPR, LOOKUP_NORMAL,
+ lhs, rhs, make_node (modifycode),
+ /*overload=*/NULL, complain);
if (rval)
{
TREE_NO_WARNING (rval) = 1;
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index af72851a2ef..b26723f5ea3 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -1471,7 +1471,7 @@ build_scoped_ref (tree datum, tree basetype, tree* binfo_p)
delegation is detected. */
tree
-build_x_arrow (tree expr, tsubst_flags_t complain)
+build_x_arrow (location_t loc, tree expr, tsubst_flags_t complain)
{
tree orig_expr = expr;
tree type = TREE_TYPE (expr);
@@ -1493,8 +1493,8 @@ build_x_arrow (tree expr, tsubst_flags_t complain)
struct tinst_level *actual_inst = current_instantiation ();
tree fn = NULL;
- while ((expr = build_new_op (COMPONENT_REF, LOOKUP_NORMAL, expr,
- NULL_TREE, NULL_TREE,
+ while ((expr = build_new_op (loc, COMPONENT_REF,
+ LOOKUP_NORMAL, expr, NULL_TREE, NULL_TREE,
&fn, complain)))
{
if (expr == error_mark_node)
diff --git a/gcc/cse.c b/gcc/cse.c
index b7db827cf30..a8df74b0be0 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -1258,7 +1258,7 @@ insert_const_anchor (HOST_WIDE_INT anchor, rtx reg, HOST_WIDE_INT offs,
if (!elt)
elt = insert (anchor_exp, NULL, hash, mode);
- exp = plus_constant (reg, offs);
+ exp = plus_constant (mode, reg, offs);
/* REG has just been inserted and the hash codes recomputed. */
mention_regs (exp);
hash = HASH (exp, mode);
@@ -1333,7 +1333,7 @@ find_reg_offset_for_const (struct table_elt *anchor_elt, HOST_WIDE_INT offs,
if (!REG_P (elt->exp) && !exp_equiv_p (elt->exp, elt->exp, 1, false))
continue;
- x = plus_constant (elt->exp, offs);
+ x = plus_constant (GET_MODE (elt->exp), elt->exp, offs);
if (REG_P (x)
|| (GET_CODE (x) == PLUS
&& IN_RANGE (INTVAL (XEXP (x, 1)),
@@ -2218,7 +2218,7 @@ use_related_value (rtx x, struct table_elt *elt)
offset = (get_integer_term (x) - get_integer_term (p->exp));
/* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
- return plus_constant (q->exp, offset);
+ return plus_constant (q->mode, q->exp, offset);
}
@@ -3567,7 +3567,7 @@ fold_rtx (rtx x, rtx insn)
{
rtx y = lookup_as_function (XEXP (x, 0), PLUS);
if (y && CONST_INT_P (XEXP (y, 1)))
- return fold_rtx (plus_constant (copy_rtx (y),
+ return fold_rtx (plus_constant (mode, copy_rtx (y),
-INTVAL (const_arg1)),
NULL_RTX);
}
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 56f2b7f9ffe..a8c66b8dee9 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -1866,7 +1866,8 @@ cselib_subst_to_values (rtx x, enum machine_mode memmode)
i = GET_MODE_SIZE (memmode);
if (code == PRE_DEC)
i = -i;
- return cselib_subst_to_values (plus_constant (XEXP (x, 0), i),
+ return cselib_subst_to_values (plus_constant (GET_MODE (x),
+ XEXP (x, 0), i),
memmode);
case PRE_MODIFY:
@@ -2523,8 +2524,7 @@ cselib_record_sets (rtx insn)
sets[i].src_elt = cselib_lookup (src, GET_MODE (dest), 1, VOIDmode);
if (MEM_P (dest))
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
+ enum machine_mode address_mode = get_address_mode (dest);
sets[i].dest_addr_elt = cselib_lookup (XEXP (dest, 0),
address_mode, 1,
diff --git a/gcc/diagnostic.c b/gcc/diagnostic.c
index 729e8654298..4913eed9c67 100644
--- a/gcc/diagnostic.c
+++ b/gcc/diagnostic.c
@@ -147,6 +147,7 @@ diagnostic_initialize (diagnostic_context *context, int n_opts)
context->option_enabled = NULL;
context->option_state = NULL;
context->option_name = NULL;
+ context->last_location = UNKNOWN_LOCATION;
context->last_module = 0;
context->x_data = NULL;
context->lock = 0;
@@ -263,9 +264,11 @@ diagnostic_show_locus (diagnostic_context * context,
if (!context->show_caret
- || diagnostic->location <= BUILTINS_LOCATION)
+ || diagnostic->location <= BUILTINS_LOCATION
+ || diagnostic->location == context->last_location)
return;
+ context->last_location = diagnostic->location;
s = expand_location_to_spelling_point (diagnostic->location);
line = location_get_source_line (s);
if (line == NULL)
@@ -542,7 +545,8 @@ diagnostic_report_diagnostic (diagnostic_context *context,
diagnostic->kind = DK_ERROR;
}
- if (diagnostic->option_index)
+ if (diagnostic->option_index
+ && diagnostic->option_index != permissive_error_option (context))
{
diagnostic_t diag_class = DK_UNSPECIFIED;
diff --git a/gcc/diagnostic.h b/gcc/diagnostic.h
index 63eb3852958..976754ef3f2 100644
--- a/gcc/diagnostic.h
+++ b/gcc/diagnostic.h
@@ -172,6 +172,9 @@ struct diagnostic_context
/* Auxiliary data for client. */
void *x_data;
+ /* Used to detect that the last caret was printed at the same location. */
+ location_t last_location;
+
/* Used to detect when the input file stack has changed since last
described. */
const struct line_map *last_module;
diff --git a/gcc/doc/avr-mmcu.texi b/gcc/doc/avr-mmcu.texi
new file mode 100644
index 00000000000..ee5f2fa34a2
--- /dev/null
+++ b/gcc/doc/avr-mmcu.texi
@@ -0,0 +1,74 @@
+@c Copyright (C) 2012 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc/doc/include/fdl.texi.
+
+@c This file is generated automatically using
+@c gcc/config/avr/gen-avr-mmcu-texi.c from:
+@c gcc/config/avr/avr-devices.c
+@c gcc/config/avr/avr-mcus.def
+
+@c Please do not edit manually.
+
+@table @code
+
+@item avr2
+``Classic'' devices with up to 8@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{at90s2313}, @code{at90s2323}, @code{at90s2333}, @code{at90s2343}, @code{attiny22}, @code{attiny26}, @code{at90s4414}, @code{at90s4433}, @code{at90s4434}, @code{at90s8515}, @code{at90c8534}, @code{at90s8535}.
+
+@item avr25
+``Classic'' devices with up to 8@tie{}KiB of program memory and with the @code{MOVW} instruction.
+@*@var{mcu}@tie{}= @code{ata6289}, @code{attiny13}, @code{attiny13a}, @code{attiny2313}, @code{attiny2313a}, @code{attiny24}, @code{attiny24a}, @code{attiny4313}, @code{attiny44}, @code{attiny44a}, @code{attiny84}, @code{attiny84a}, @code{attiny25}, @code{attiny45}, @code{attiny85}, @code{attiny261}, @code{attiny261a}, @code{attiny461}, @code{attiny461a}, @code{attiny861}, @code{attiny861a}, @code{attiny43u}, @code{attiny87}, @code{attiny48}, @code{attiny88}, @code{at86rf401}.
+
+@item avr3
+``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{at43usb355}, @code{at76c711}.
+
+@item avr31
+``Classic'' devices with 128@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atmega103}, @code{at43usb320}.
+
+@item avr35
+``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of program memory and with the @code{MOVW} instruction.
+@*@var{mcu}@tie{}= @code{at90usb82}, @code{at90usb162}, @code{atmega8u2}, @code{atmega16u2}, @code{atmega32u2}, @code{attiny167}.
+
+@item avr4
+``Enhanced'' devices with up to 8@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atmega8}, @code{atmega48}, @code{atmega48a}, @code{atmega48p}, @code{atmega88}, @code{atmega88a}, @code{atmega88p}, @code{atmega88pa}, @code{atmega8515}, @code{atmega8535}, @code{atmega8hva}, @code{at90pwm1}, @code{at90pwm2}, @code{at90pwm2b}, @code{at90pwm3}, @code{at90pwm3b}, @code{at90pwm81}.
+
+@item avr5
+``Enhanced'' devices with 16@tie{}KiB up to 64@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atmega16}, @code{atmega16a}, @code{atmega161}, @code{atmega162}, @code{atmega163}, @code{atmega164a}, @code{atmega164p}, @code{atmega165}, @code{atmega165a}, @code{atmega165p}, @code{atmega168}, @code{atmega168a}, @code{atmega168p}, @code{atmega169}, @code{atmega169a}, @code{atmega169p}, @code{atmega169pa}, @code{atmega32}, @code{atmega323}, @code{atmega324a}, @code{atmega324p}, @code{atmega324pa}, @code{atmega325}, @code{atmega325a}, @code{atmega325p}, @code{atmega3250}, @code{atmega3250a}, @code{atmega3250p}, @code{atmega328}, @code{atmega328p}, @code{atmega329}, @code{atmega329a}, @code{atmega329p}, @code{atmega329pa}, @code{atmega3290}, @code{atmega3290a}, @code{atmega3290p}, @code{atmega406}, @code{atmega64}, @code{atmega640}, @code{atmega644}, @code{atmega644a}, @code{atmega644p}, @code{atmega644pa}, @code{atmega645}, @code{atmega645a}, @code{atmega645p}, @code{atmega6450}, @code{atmega6450a}, @code{atmega6450p}, @code{atmega649}, @code{atmega649a}, @code{atmega649p}, @code{atmega6490}, @code{atmega16hva}, @code{atmega16hva2}, @code{atmega16hvb}, @code{atmega32hvb}, @code{atmega64hve}, @code{at90can32}, @code{at90can64}, @code{at90pwm216}, @code{at90pwm316}, @code{atmega32c1}, @code{atmega64c1}, @code{atmega16m1}, @code{atmega32m1}, @code{atmega64m1}, @code{atmega16u4}, @code{atmega32u4}, @code{atmega32u6}, @code{at90scr100}, @code{at90usb646}, @code{at90usb647}, @code{at94k}, @code{m3000}.
+
+@item avr51
+``Enhanced'' devices with 128@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atmega128}, @code{atmega1280}, @code{atmega1281}, @code{atmega1284p}, @code{atmega128rfa1}, @code{at90can128}, @code{at90usb1286}, @code{at90usb1287}.
+
+@item avr6
+``Enhanced'' devices with 3-byte PC, i.e.@: with more than 128@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atmega2560}, @code{atmega2561}.
+
+@item avrxmega2
+``XMEGA'' devices with more than 8@tie{}KiB and up to 64@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atxmega16a4}, @code{atxmega16d4}, @code{atxmega16x1}, @code{atxmega32a4}, @code{atxmega32d4}, @code{atxmega32x1}.
+
+@item avrxmega4
+``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atxmega64a3}, @code{atxmega64d3}.
+
+@item avrxmega5
+``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB of program memory and more than 64@tie{}KiB of RAM.
+@*@var{mcu}@tie{}= @code{atxmega64a1}, @code{atxmega64a1u}.
+
+@item avrxmega6
+``XMEGA'' devices with more than 128@tie{}KiB of program memory.
+@*@var{mcu}@tie{}= @code{atxmega128a3}, @code{atxmega128d3}, @code{atxmega192a3}, @code{atxmega192d3}, @code{atxmega256a3}, @code{atxmega256a3b}, @code{atxmega256a3bu}, @code{atxmega256d3}.
+
+@item avrxmega7
+``XMEGA'' devices with more than 128@tie{}KiB of program memory and more than 64@tie{}KiB of RAM.
+@*@var{mcu}@tie{}= @code{atxmega128a1}, @code{atxmega128a1u}.
+
+@item avr1
+This ISA is implemented by the minimal AVR core and supported for assembler only.
+@*@var{mcu}@tie{}= @code{at90s1200}, @code{attiny11}, @code{attiny12}, @code{attiny15}, @code{attiny28}.
+
+@end table
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 95cea834407..6aaf4535647 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -2714,6 +2714,51 @@ then be sure to write this declaration in both files.
This attribute is ignored for R8C target.
+@item ifunc ("@var{resolver}")
+@cindex @code{ifunc} attribute
+The @code{ifunc} attribute is used to mark a function as an indirect
+function using the STT_GNU_IFUNC symbol type extension to the ELF
+standard. This allows the resolution of the symbol value to be
+determined dynamically at load time, and an optimized version of the
+routine can be selected for the particular processor or other system
+characteristics determined then. To use this attribute, first define
+the implementation functions available, and a resolver function that
+returns a pointer to the selected implementation function. The
+implementation functions' declarations must match the API of the
+function being implemented, the resolver's declaration is be a
+function returning pointer to void function returning void:
+
+@smallexample
+void *my_memcpy (void *dst, const void *src, size_t len)
+@{
+ @dots{}
+@}
+
+static void (*resolve_memcpy (void)) (void)
+@{
+ return my_memcpy; // we'll just always select this routine
+@}
+@end smallexample
+
+The exported header file declaring the function the user calls would
+contain:
+
+@smallexample
+extern void *memcpy (void *, const void *, size_t);
+@end smallexample
+
+allowing the user to call this as a regular function, unaware of the
+implementation. Finally, the indirect function needs to be defined in
+the same translation unit as the resolver function:
+
+@smallexample
+void *memcpy (void *, const void *, size_t)
+ __attribute__ ((ifunc ("resolve_memcpy")));
+@end smallexample
+
+Indirect functions cannot be weak, and require a recent binutils (at
+least version 2.20.1), and GNU C library (at least version 2.11.1).
+
@item interrupt
@cindex interrupt handler functions
Use this attribute on the ARM, AVR, CR16, Epiphany, M32C, M32R/D, m68k, MeP, MIPS,
@@ -2726,7 +2771,13 @@ code to initialize the interrupt vector table.
Note, interrupt handlers for the Blackfin, H8/300, H8/300H, H8S, MicroBlaze,
and SH processors can be specified via the @code{interrupt_handler} attribute.
-Note, on the AVR, interrupts will be enabled inside the function.
+Note, on the AVR, the hardware globally disables interrupts when an
+interrupt is executed. The first instruction of an interrupt handler
+declared with this attribute will be a @code{SEI} instruction to
+re-enable interrupts. See also the @code{signal} function attribute
+that does not insert a @code{SEI} instuction. If both @code{signal} and
+@code{interrupt} are specified for the same function, @code{signal}
+will be silently ignored.
Note, for the ARM, you can specify the kind of interrupt to be handled by
adding an optional parameter to the interrupt attribute like this:
@@ -2822,51 +2873,6 @@ On RL78, use @code{brk_interrupt} instead of @code{interrupt} for
handlers intended to be used with the @code{BRK} opcode (i.e. those
that must end with @code{RETB} instead of @code{RETI}).
-@item ifunc ("@var{resolver}")
-@cindex @code{ifunc} attribute
-The @code{ifunc} attribute is used to mark a function as an indirect
-function using the STT_GNU_IFUNC symbol type extension to the ELF
-standard. This allows the resolution of the symbol value to be
-determined dynamically at load time, and an optimized version of the
-routine can be selected for the particular processor or other system
-characteristics determined then. To use this attribute, first define
-the implementation functions available, and a resolver function that
-returns a pointer to the selected implementation function. The
-implementation functions' declarations must match the API of the
-function being implemented, the resolver's declaration is be a
-function returning pointer to void function returning void:
-
-@smallexample
-void *my_memcpy (void *dst, const void *src, size_t len)
-@{
- @dots{}
-@}
-
-static void (*resolve_memcpy (void)) (void)
-@{
- return my_memcpy; // we'll just always select this routine
-@}
-@end smallexample
-
-The exported header file declaring the function the user calls would
-contain:
-
-@smallexample
-extern void *memcpy (void *, const void *, size_t);
-@end smallexample
-
-allowing the user to call this as a regular function, unaware of the
-implementation. Finally, the indirect function needs to be defined in
-the same translation unit as the resolver function:
-
-@smallexample
-void *memcpy (void *, const void *, size_t)
- __attribute__ ((ifunc ("resolve_memcpy")));
-@end smallexample
-
-Indirect functions cannot be weak, and require a recent binutils (at
-least version 2.20.1), and GNU C library (at least version 2.11.1).
-
@item interrupt_handler
@cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors
Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to
@@ -3471,11 +3477,23 @@ See long_call/short_call.
See longcall/shortcall.
@item signal
-@cindex signal handler functions on the AVR processors
+@cindex interrupt handler functions on the AVR processors
Use this attribute on the AVR to indicate that the specified
-function is a signal handler. The compiler will generate function
-entry and exit sequences suitable for use in a signal handler when this
-attribute is present. Interrupts will be disabled inside the function.
+function is an interrupt handler. The compiler will generate function
+entry and exit sequences suitable for use in an interrupt handler when this
+attribute is present.
+
+See also the @code{interrupt} function attribute.
+
+The AVR hardware globally disables interrupts when an interrupt is executed.
+Interrupt handler functions defined with the @code{signal} attribute
+do not re-enable interrupts. It is save to enable interrupts in a
+@code{signal} handler. This ``save'' only applies to the code
+generated by the compiler and not to the IRQ-layout of the
+application which is responsibility of the application.
+
+If both @code{signal} and @code{interrupt} are specified for the same
+function, @code{signal} will be silently ignored.
@item sp_switch
Use this attribute on the SH to indicate an @code{interrupt_handler}
@@ -4102,8 +4120,7 @@ namespace is now in use for GCC-specific pragmas. However, it has been
found convenient to use @code{__attribute__} to achieve a natural
attachment of attributes to their corresponding declarations, whereas
@code{#pragma GCC} is of use for constructs that do not naturally form
-part of the grammar. @xref{Other Directives,,Miscellaneous
-Preprocessing Directives, cpp, The GNU C Preprocessor}.
+part of the grammar. @xref{Pragmas,,Pragmas Accepted by GCC}.
@node Attribute Syntax
@section Attribute Syntax
@@ -7093,7 +7110,8 @@ to the same names in the C++11 standard. Refer there or to the
atomic synchronization} for more detailed definitions. These memory
models integrate both barriers to code motion as well as synchronization
requirements with other threads. These are listed in approximately
-ascending order of strength.
+ascending order of strength. It is also possible to use target specific
+flags for memory model flags, like Hardware Lock Elision.
@table @code
@item __ATOMIC_RELAXED
@@ -9431,8 +9449,9 @@ executed before any constructors are called. The CPU detection code is
automatically executed in a very high priority constructor.
For example, this function has to be used in @code{ifunc} resolvers which
-check for CPU type using the builtins, @code{__builtin_cpu_is}
-and @code{__builtin_cpu_supports}.
+check for CPU type using the builtins @code{__builtin_cpu_is}
+and @code{__builtin_cpu_supports}, or in constructors on targets which
+don't support constructor priority.
@smallexample
static void (*resolve_memcpy (void)) (void)
diff --git a/gcc/doc/fragments.texi b/gcc/doc/fragments.texi
index 31674f9a5d0..f53df29603e 100644
--- a/gcc/doc/fragments.texi
+++ b/gcc/doc/fragments.texi
@@ -1,5 +1,5 @@
@c Copyright (C) 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
-@c 1999, 2000, 2001, 2003, 2004, 2005, 2008, 2011
+@c 1999, 2000, 2001, 2003, 2004, 2005, 2008, 2011, 2012
@c Free Software Foundation, Inc.
@c This is part of the GCC manual.
@c For copying conditions, see the file gcc.texi.
@@ -121,6 +121,29 @@ options enabled. Therefore @code{MULTILIB_EXCEPTIONS} is set to:
*mthumb/*mhard-float*
@end smallexample
+@findex MULTILIB_REQUIRED
+@item MULTILIB_REQUIRED
+Sometimes when there are only a few combinations are required, it would
+be a big effort to come up with a @code{MULTILIB_EXCEPTIONS} list to
+cover all undesired ones. In such a case, just listing all the required
+combinations in @code{MULTILIB_REQUIRED} would be more straightforward.
+
+The way to specify the entries in @code{MULTILIB_REQUIRED} is same with
+the way used for @code{MULTILIB_EXCEPTIONS}, only this time what are
+required will be specified. Suppose there are multiple sets of
+@code{MULTILIB_OPTIONS} and only two combinations are required, one
+for ARMv7-M and one for ARMv7-R with hard floating-point ABI and FPU, the
+@code{MULTILIB_REQUIRED} can be set to:
+@smallexample
+@code{MULTILIB_REQUIRED} = mthumb/march=armv7-m
+@code{MULTILIB_REQUIRED} += march=armv7-r/mfloat-abi=hard/mfpu=vfpv3-d16
+@end smallexample
+
+The @code{MULTILIB_REQUIRED} can be used together with
+@code{MULTILIB_EXCEPTIONS}. The option combinations generated from
+@code{MULTILIB_OPTIONS} will be filtered by @code{MULTILIB_EXCEPTIONS}
+and then by @code{MULTILIB_REQUIRED}.
+
@findex MULTILIB_EXTRA_OPTS
@item MULTILIB_EXTRA_OPTS
Sometimes it is desirable that when building multiple versions of
diff --git a/gcc/doc/gcov.texi b/gcc/doc/gcov.texi
index e771f72e402..7256664c0ee 100644
--- a/gcc/doc/gcov.texi
+++ b/gcc/doc/gcov.texi
@@ -538,6 +538,12 @@ now be calculable at compile time in some instances. Because the
coverage of all the uses of the inline function will be shown for the
same source lines, the line counts themselves might seem inconsistent.
+Long-running applications can use the @code{_gcov_reset} and @code{_gcov_dump}
+facilities to restrict profile collection to the program region of
+interest. Calling @code{_gcov_reset(void)} will clear all profile counters
+to zero, and calling @code{_gcov_dump(void)} will cause the profile information
+collected at that point to be dumped to @file{.gcda} output files.
+
@c man end
@node Gcov Data Files
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e394ca9f48a..66551da4112 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -3627,6 +3627,7 @@ To suppress this warning use the @samp{unused} attribute
@item -Wunused-local-typedefs @r{(C, Objective-C, C++ and Objective-C++ only)}
@opindex Wunused-local-typedefs
Warn when a typedef locally defined in a function is not used.
+This warning is enabled by @option{-Wall}.
@item -Wunused-parameter
@opindex Wunused-parameter
@@ -11044,88 +11045,7 @@ The default for this option is@tie{}@code{avr2}.
GCC supports the following AVR devices and ISAs:
-@table @code
-
-@item avr1
-This ISA is implemented by the minimal AVR core and supported
-for assembler only.
-@*@var{mcu}@tie{}= @code{at90s1200},
-@code{attiny10}, @code{attiny11}, @code{attiny12}, @code{attiny15},
-@code{attiny28}.
-
-@item avr2
-``Classic'' devices with up to 8@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{at90s2313}, @code{attiny26}, @code{at90c8534},
-@dots{}
-
-@item avr25
-``Classic'' devices with up to 8@tie{}KiB of program memory and with
-the @code{MOVW} instruction.
-@*@var{mcu}@tie{}= @code{attiny2313}, @code{attiny261}, @code{attiny24},
-@dots{}
-
-@item avr3
-``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{at43usb355}, @code{at76c711}.
-
-@item avr31
-``Classic'' devices with 128@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{atmega103}, @code{at43usb320}.
-
-@item avr35
-``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of program
-memory and with the @code{MOVW} instruction.
-@*@var{mcu}@tie{}= @code{at90usb162}, @code{atmega8u2},
-@code{attiny167}, @dots{}
-
-@item avr4
-``Enhanced'' devices with up to 8@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{atmega8}, @code{atmega88}, @code{at90pwm81},
-@dots{}
-
-@item avr5
-``Enhanced'' devices with 16@tie{}KiB up to 64@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{atmega16}, @code{atmega6490}, @code{at90can64},
-@dots{}
-
-@item avr51
-``Enhanced'' devices with 128@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{atmega128}, @code{at90can128}, @code{at90usb1287},
-@dots{}
-
-@item avr6
-``Enhanced'' devices with 3-byte PC, i.e.@: with at least 256@tie{}KiB
-of program memory.
-@*@var{mcu}@tie{}= @code{atmega2560}, @code{atmega2561}.
-
-@item avrxmega2
-``XMEGA'' devices with more than 8@tie{}KiB and up to 64@tie{}KiB
-of program memory.
-@*@var{mcu}@tie{}= @code{atxmega16a4}, @code{atxmega16d4},
-@dots{}
-
-@item avrxmega4
-``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB
-of program memory.
-@*@var{mcu}@tie{}= @code{atxmega64a3}, @code{atxmega64d3}.
-
-@item avrxmega5
-``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB
-of program memory and more than 64@tie{}KiB of RAM.
-@*@var{mcu}@tie{}= @code{atxmega64a1}, @code{atxmega64a1u}.
-
-@item avrxmega6
-``XMEGA'' devices with more than 128@tie{}KiB of program memory.
-@*@var{mcu}@tie{}= @code{atxmega128a3}, @code{atxmega192d3},
-@dots{}
-
-@item avrxmega7
-``XMEGA'' devices with more than 128@tie{}KiB of program memory and
-more than 64@tie{}KiB of RAM.
-@*@var{mcu}@tie{}= @code{atxmega128a1}, @code{atxmega128a1u}.
-
-@end table
-
+@include avr-mmcu.texi
@item -maccumulate-args
@opindex maccumulate-args
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index bca84a72ce5..73c800bbd04 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -5614,6 +5614,13 @@ the stack farthest from the current stack pointer that you need to validate.
Normally, on platforms where this pattern is needed, you would obtain the
stack limit from a global or thread-specific variable or register.
+@cindex @code{probe_stack_address} instruction pattern
+@item @samp{probe_stack_address}
+If stack checking (@pxref{Stack Checking}) can be done on your system by
+probing the stack but without the need to actually access it, define this
+pattern and signal an error if the stack has overflowed. The single operand
+is the memory address in the stack that needs to be probed.
+
@cindex @code{probe_stack} instruction pattern
@item @samp{probe_stack}
If stack checking (@pxref{Stack Checking}) can be done on your system by
@@ -7707,12 +7714,7 @@ scheduled.
@var{name} is a string specifying one of a fixed set of flags to test.
Test the flags @code{forward} and @code{backward} to determine the
-direction of a conditional branch. Test the flags @code{very_likely},
-@code{likely}, @code{very_unlikely}, and @code{unlikely} to determine
-if a conditional branch is expected to be taken.
-
-If the @code{very_likely} flag is true, then the @code{likely} flag is also
-true. Likewise for the @code{very_unlikely} and @code{unlikely} flags.
+direction of a conditional branch.
This example describes a conditional branch delay slot which
can be nullified for forward branches that are taken (annul-true) or
@@ -7730,11 +7732,6 @@ for backward branches which are not taken (annul-false).
The @code{forward} and @code{backward} flags are false if the current
@code{insn} being scheduled is not a conditional branch.
-The @code{very_likely} and @code{likely} flags are true if the
-@code{insn} being scheduled is not a conditional branch.
-The @code{very_unlikely} and @code{unlikely} flags are false if the
-@code{insn} being scheduled is not a conditional branch.
-
@code{attr_flag} is only used during delay slot scheduling and has no
meaning to other passes of the compiler.
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 2891bb66231..e3245d0f993 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -11362,6 +11362,11 @@ MIPS, where add-immediate takes a 16-bit signed value,
@code{TARGET_CONST_ANCHOR} is set to @samp{0x8000}. The default value
is zero, which disables this optimization. @end deftypevr
+@deftypefn {Target Hook} {unsigned HOST_WIDE_INT} TARGET_MEMMODEL_CHECK (unsigned HOST_WIDE_INT @var{val})
+Validate target specific memory model mask bits. When NULL no target specific
+memory model bits are allowed.
+@end deftypefn
+
@deftypevr {Target Hook} {unsigned char} TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
This value should be set if the result written by @code{atomic_test_and_set} is not exactly 1, i.e. the @code{bool} @code{true}.
@end deftypevr
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index a222654ac1f..51687ce8546 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -11242,4 +11242,9 @@ MIPS, where add-immediate takes a 16-bit signed value,
@code{TARGET_CONST_ANCHOR} is set to @samp{0x8000}. The default value
is zero, which disables this optimization. @end deftypevr
+@hook TARGET_MEMMODEL_CHECK
+Validate target specific memory model mask bits. When NULL no target specific
+memory model bits are allowed.
+@end deftypefn
+
@hook TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
diff --git a/gcc/dse.c b/gcc/dse.c
index 19d938301bb..6bc48c8efe1 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -1146,8 +1146,7 @@ canon_address (rtx mem,
HOST_WIDE_INT *offset,
cselib_val **base)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
+ enum machine_mode address_mode = get_address_mode (mem);
rtx mem_address = XEXP (mem, 0);
rtx expanded_address, address;
int expanded;
@@ -1561,7 +1560,7 @@ record_store (rtx body, bb_info_t bb_info)
mem_addr = group->canon_base_addr;
}
if (offset)
- mem_addr = plus_constant (mem_addr, offset);
+ mem_addr = plus_constant (get_address_mode (mem), mem_addr, offset);
}
while (ptr)
@@ -2178,7 +2177,7 @@ check_mem_read_rtx (rtx *loc, void *data)
mem_addr = group->canon_base_addr;
}
if (offset)
- mem_addr = plus_constant (mem_addr, offset);
+ mem_addr = plus_constant (get_address_mode (mem), mem_addr, offset);
}
/* We ignore the clobbers in store_info. The is mildly aggressive,
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 8bbf95477a7..1e5e335cdb3 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -2473,7 +2473,7 @@ typedef struct GTY((chain_circular ("%h.die_sib"))) die_struct {
const char * GTY ((tag ("0"))) die_symbol;
comdat_type_node_ref GTY ((tag ("1"))) die_type_node;
}
- GTY ((desc ("use_debug_types"))) die_id;
+ GTY ((desc ("%0.comdat_type_p"))) die_id;
VEC(dw_attr_node,gc) * die_attr;
dw_die_ref die_parent;
dw_die_ref die_child;
@@ -2482,10 +2482,12 @@ typedef struct GTY((chain_circular ("%h.die_sib"))) die_struct {
dw_offset die_offset;
unsigned long die_abbrev;
int die_mark;
- /* Die is used and must not be pruned as unused. */
- int die_perennial_p;
unsigned int decl_id;
enum dwarf_tag die_tag;
+ /* Die is used and must not be pruned as unused. */
+ BOOL_BITFIELD die_perennial_p : 1;
+ BOOL_BITFIELD comdat_type_p : 1; /* DIE has a type signature */
+ /* Lots of spare bits. */
}
die_node;
@@ -2984,7 +2986,7 @@ static void htab_cu_del (void *);
static int check_duplicate_cu (dw_die_ref, htab_t, unsigned *);
static void record_comdat_symbol_number (dw_die_ref, htab_t, unsigned);
static void add_sibling_attributes (dw_die_ref);
-static void build_abbrev_table (dw_die_ref);
+static void build_abbrev_table (dw_die_ref, htab_t);
static void output_location_lists (dw_die_ref);
static int constant_size (unsigned HOST_WIDE_INT);
static unsigned long size_of_die (dw_die_ref);
@@ -3716,6 +3718,16 @@ add_AT_die_ref (dw_die_ref die, enum dwarf_attribute attr_kind, dw_die_ref targ_
add_dwarf_attr (die, &attr);
}
+/* Change DIE reference REF to point to NEW_DIE instead. */
+
+static inline void
+change_AT_die_ref (dw_attr_ref ref, dw_die_ref new_die)
+{
+ gcc_assert (ref->dw_attr_val.val_class == dw_val_class_die_ref);
+ ref->dw_attr_val.v.val_die_ref.die = new_die;
+ ref->dw_attr_val.v.val_die_ref.external = 0;
+}
+
/* Add an AT_specification attribute to a DIE, and also make the back
pointer from the specification to the definition. */
@@ -4767,7 +4779,7 @@ print_die (dw_die_ref die, FILE *outfile)
fprintf (outfile, " offset: %ld", die->die_offset);
fprintf (outfile, " mark: %d\n", die->die_mark);
- if (use_debug_types && die->die_id.die_type_node)
+ if (die->comdat_type_p)
{
print_spaces (outfile);
fprintf (outfile, " signature: ");
@@ -4819,13 +4831,13 @@ print_die (dw_die_ref die, FILE *outfile)
case dw_val_class_die_ref:
if (AT_ref (a) != NULL)
{
- if (use_debug_types && AT_ref (a)->die_id.die_type_node)
+ if (AT_ref (a)->comdat_type_p)
{
fprintf (outfile, "die -> signature: ");
print_signature (outfile,
AT_ref (a)->die_id.die_type_node->signature);
}
- else if (! use_debug_types && AT_ref (a)->die_id.die_symbol)
+ else if (AT_ref (a)->die_id.die_symbol)
fprintf (outfile, "die -> label: %s",
AT_ref (a)->die_id.die_symbol);
else
@@ -5653,13 +5665,17 @@ generate_type_signature (dw_die_ref die, comdat_type_node *type_node)
type node together. */
memcpy (type_node->signature, &checksum[16 - DWARF_TYPE_SIGNATURE_SIZE],
DWARF_TYPE_SIGNATURE_SIZE);
+ die->comdat_type_p = true;
die->die_id.die_type_node = type_node;
type_node->type_die = die;
/* If the DIE is a specification, link its declaration to the type node
as well. */
if (decl != NULL)
- decl->die_id.die_type_node = type_node;
+ {
+ decl->comdat_type_p = true;
+ decl->die_id.die_type_node = type_node;
+ }
}
/* Do the location expressions look same? */
@@ -5966,7 +5982,7 @@ assign_symbol_names (dw_die_ref die)
{
dw_die_ref c;
- if (is_symbol_die (die))
+ if (is_symbol_die (die) && !die->comdat_type_p)
{
if (comdat_symbol_id)
{
@@ -6271,7 +6287,12 @@ clone_as_declaration (dw_die_ref die)
/* If the DIE is a specification, just clone its declaration DIE. */
decl = get_AT_ref (die, DW_AT_specification);
if (decl != NULL)
- return clone_die (decl);
+ {
+ clone = clone_die (decl);
+ if (die->comdat_type_p)
+ add_AT_die_ref (clone, DW_AT_signature, die);
+ return clone;
+ }
clone = ggc_alloc_cleared_die_node ();
clone->die_tag = die->die_tag;
@@ -6300,7 +6321,7 @@ clone_as_declaration (dw_die_ref die)
}
}
- if (die->die_id.die_type_node)
+ if (die->comdat_type_p)
add_AT_die_ref (clone, DW_AT_signature, die);
add_AT_flag (clone, DW_AT_declaration, 1);
@@ -6335,6 +6356,7 @@ copy_declaration_context (dw_die_ref unit, dw_die_ref die)
/* Copy the type node pointer from the new DIE to the original
declaration DIE so we can forward references later. */
+ decl->comdat_type_p = true;
decl->die_id.die_type_node = die->die_id.die_type_node;
remove_AT (die, DW_AT_specification);
@@ -6469,6 +6491,7 @@ remove_child_or_replace_with_skeleton (dw_die_ref unit, dw_die_ref child,
remove_child_with_prev (child, prev);
else
{
+ skeleton->comdat_type_p = true;
skeleton->die_id.die_type_node = child->die_id.die_type_node;
/* If the original DIE was a specification, we need to put
@@ -6684,11 +6707,10 @@ copy_decls_walk (dw_die_ref unit, dw_die_ref die, htab_t decl_table)
if (AT_class (a) == dw_val_class_die_ref)
{
dw_die_ref targ = AT_ref (a);
- comdat_type_node_ref type_node = targ->die_id.die_type_node;
void **slot;
struct decl_table_entry *entry;
- if (targ->die_mark != 0 || type_node != NULL)
+ if (targ->die_mark != 0 || targ->comdat_type_p)
continue;
slot = htab_find_slot_with_hash (decl_table, targ,
@@ -6809,13 +6831,159 @@ output_location_lists (dw_die_ref die)
FOR_EACH_CHILD (die, c, output_location_lists (c));
}
+/* We want to limit the number of external references, because they are
+ larger than local references: a relocation takes multiple words, and
+ even a sig8 reference is always eight bytes, whereas a local reference
+ can be as small as one byte (though DW_FORM_ref is usually 4 in GCC).
+ So if we encounter multiple external references to the same type DIE, we
+ make a local typedef stub for it and redirect all references there.
+
+ This is the element of the hash table for keeping track of these
+ references. */
+
+struct external_ref
+{
+ dw_die_ref type;
+ dw_die_ref stub;
+ unsigned n_refs;
+};
+
+/* Hash an external_ref. */
+
+static hashval_t
+hash_external_ref (const void *p)
+{
+ const struct external_ref *r = (const struct external_ref *)p;
+ return htab_hash_pointer (r->type);
+}
+
+/* Compare external_refs. */
+
+static int
+external_ref_eq (const void *p1, const void *p2)
+{
+ const struct external_ref *r1 = (const struct external_ref *)p1;
+ const struct external_ref *r2 = (const struct external_ref *)p2;
+ return r1->type == r2->type;
+}
+
+/* Return a pointer to the external_ref for references to DIE. */
+
+static struct external_ref *
+lookup_external_ref (htab_t map, dw_die_ref die)
+{
+ struct external_ref ref, *ref_p;
+ void ** slot;
+
+ ref.type = die;
+ slot = htab_find_slot (map, &ref, INSERT);
+ if (*slot != HTAB_EMPTY_ENTRY)
+ return (struct external_ref *) *slot;
+
+ ref_p = XCNEW (struct external_ref);
+ ref_p->type = die;
+ *slot = ref_p;
+ return ref_p;
+}
+
+/* Subroutine of optimize_external_refs, below.
+
+ If we see a type skeleton, record it as our stub. If we see external
+ references, remember how many we've seen. */
+
+static void
+optimize_external_refs_1 (dw_die_ref die, htab_t map)
+{
+ dw_die_ref c;
+ dw_attr_ref a;
+ unsigned ix;
+ struct external_ref *ref_p;
+
+ if (is_type_die (die)
+ && (c = get_AT_ref (die, DW_AT_signature)))
+ {
+ /* This is a local skeleton; use it for local references. */
+ ref_p = lookup_external_ref (map, c);
+ ref_p->stub = die;
+ }
+
+ /* Scan the DIE references, and remember any that refer to DIEs from
+ other CUs (i.e. those which are not marked). */
+ FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ if (AT_class (a) == dw_val_class_die_ref
+ && (c = AT_ref (a))->die_mark == 0
+ && is_type_die (c))
+ {
+ ref_p = lookup_external_ref (map, c);
+ ref_p->n_refs++;
+ }
+
+ FOR_EACH_CHILD (die, c, optimize_external_refs_1 (c, map));
+}
+
+/* htab_traverse callback function for optimize_external_refs, below. SLOT
+ points to an external_ref, DATA is the CU we're processing. If we don't
+ already have a local stub, and we have multiple refs, build a stub. */
+
+static int
+build_local_stub (void **slot, void *data)
+{
+ struct external_ref *ref_p = (struct external_ref *)*slot;
+ dw_die_ref cu = (dw_die_ref) data;
+ dw_die_ref type = ref_p->type;
+ dw_die_ref stub = NULL;
+
+ if (ref_p->stub == NULL && ref_p->n_refs > 1)
+ {
+ if (!dwarf_strict)
+ {
+ /* If we aren't being strict, use a typedef with no name
+ to just forward to the real type. In strict DWARF, a
+ typedef must have a name. */
+ stub = new_die (DW_TAG_typedef, cu, NULL_TREE);
+ add_AT_die_ref (stub, DW_AT_type, type);
+ }
+ else if (type->comdat_type_p)
+ {
+ /* If we refer to this type via sig8, we can use a simple
+ declaration; this is larger than the typedef, but strictly
+ correct. */
+ stub = new_die (type->die_tag, cu, NULL_TREE);
+ add_AT_string (stub, DW_AT_name, get_AT_string (type, DW_AT_name));
+ add_AT_flag (stub, DW_AT_declaration, 1);
+ add_AT_die_ref (stub, DW_AT_signature, type);
+ }
+
+ if (stub)
+ {
+ stub->die_mark++;
+ ref_p->stub = stub;
+ }
+ }
+ return 1;
+}
+
+/* DIE is a unit; look through all the DIE references to see if there are
+ any external references to types, and if so, create local stubs for
+ them which will be applied in build_abbrev_table. This is useful because
+ references to local DIEs are smaller. */
+
+static htab_t
+optimize_external_refs (dw_die_ref die)
+{
+ htab_t map = htab_create (10, hash_external_ref, external_ref_eq, free);
+ optimize_external_refs_1 (die, map);
+ htab_traverse (map, build_local_stub, die);
+ return map;
+}
+
/* The format of each DIE (and its attribute value pairs) is encoded in an
abbreviation table. This routine builds the abbreviation table and assigns
a unique abbreviation id for each abbreviation entry. The children of each
die are visited recursively. */
static void
-build_abbrev_table (dw_die_ref die)
+build_abbrev_table (dw_die_ref die, htab_t extern_map)
{
unsigned long abbrev_id;
unsigned int n_alloc;
@@ -6823,14 +6991,22 @@ build_abbrev_table (dw_die_ref die)
dw_attr_ref a;
unsigned ix;
- /* Scan the DIE references, and mark as external any that refer to
- DIEs from other CUs (i.e. those which are not marked). */
+ /* Scan the DIE references, and replace any that refer to
+ DIEs from other CUs (i.e. those which are not marked) with
+ the local stubs we built in optimize_external_refs. */
FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_die_ref
- && AT_ref (a)->die_mark == 0)
+ && (c = AT_ref (a))->die_mark == 0)
{
- gcc_assert (use_debug_types || AT_ref (a)->die_id.die_symbol);
- set_AT_ref_external (a, 1);
+ struct external_ref *ref_p;
+ gcc_assert (AT_ref (a)->comdat_type_p || AT_ref (a)->die_id.die_symbol);
+
+ ref_p = lookup_external_ref (extern_map, c);
+ if (ref_p->stub && ref_p->stub != die)
+ change_AT_die_ref (a, ref_p->stub);
+ else
+ /* We aren't changing this reference, so mark it external. */
+ set_AT_ref_external (a, 1);
}
for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id)
@@ -6881,7 +7057,7 @@ build_abbrev_table (dw_die_ref die)
}
die->die_abbrev = abbrev_id;
- FOR_EACH_CHILD (die, c, build_abbrev_table (c));
+ FOR_EACH_CHILD (die, c, build_abbrev_table (c, extern_map));
}
/* Return the power-of-two number of bytes necessary to represent VALUE. */
@@ -7398,6 +7574,8 @@ output_die_symbol (dw_die_ref die)
{
const char *sym = die->die_id.die_symbol;
+ gcc_assert (!die->comdat_type_p);
+
if (sym == 0)
return;
@@ -7522,7 +7700,7 @@ output_die (dw_die_ref die)
/* If someone in another CU might refer to us, set up a symbol for
them to point to. */
- if (! use_debug_types && die->die_id.die_symbol)
+ if (! die->comdat_type_p && die->die_id.die_symbol)
output_die_symbol (die);
dw2_asm_output_data_uleb128 (die->die_abbrev, "(DIE (%#lx) %s)",
@@ -7668,7 +7846,7 @@ output_die (dw_die_ref die)
case dw_val_class_die_ref:
if (AT_ref_external (a))
{
- if (use_debug_types)
+ if (AT_ref (a)->comdat_type_p)
{
comdat_type_node_ref type_node =
AT_ref (a)->die_id.die_type_node;
@@ -7802,6 +7980,7 @@ output_comp_unit (dw_die_ref die, int output_if_empty)
{
const char *secname, *oldsym;
char *tmp;
+ htab_t extern_map;
/* Unless we are outputting main CU, we may throw away empty ones. */
if (!output_if_empty && die->die_child == NULL)
@@ -7814,7 +7993,11 @@ output_comp_unit (dw_die_ref die, int output_if_empty)
this CU so we know which get local refs. */
mark_dies (die);
- build_abbrev_table (die);
+ extern_map = optimize_external_refs (die);
+
+ build_abbrev_table (die, extern_map);
+
+ htab_delete (extern_map);
/* Initialize the beginning DIE offset - and calculate sizes/offsets. */
next_die_offset = DWARF_COMPILE_UNIT_HEADER_SIZE;
@@ -7861,11 +8044,16 @@ output_comdat_type_unit (comdat_type_node *node)
#if defined (OBJECT_FORMAT_ELF)
tree comdat_key;
#endif
+ htab_t extern_map;
/* First mark all the DIEs in this CU so we know which get local refs. */
mark_dies (node->root_die);
- build_abbrev_table (node->root_die);
+ extern_map = optimize_external_refs (node->root_die);
+
+ build_abbrev_table (node->root_die, extern_map);
+
+ htab_delete (extern_map);
/* Initialize the beginning DIE offset - and calculate sizes/offsets. */
next_die_offset = DWARF_COMDAT_TYPE_UNIT_HEADER_SIZE;
@@ -8085,6 +8273,8 @@ output_aranges (unsigned long aranges_length)
FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, fde_idx, fde)
{
+ if (DECL_IGNORED_P (fde->decl))
+ continue;
if (!fde->in_std_section)
{
dw2_asm_output_addr (DWARF2_ADDR_SIZE, fde->dw_fde_begin,
@@ -9019,6 +9209,7 @@ modified_type_die (tree type, int is_const_type, int is_volatile_type,
tree item_type = NULL;
tree qualified_type;
tree name, low, high;
+ dw_die_ref mod_scope;
if (code == ERROR_MARK)
return NULL;
@@ -9079,6 +9270,8 @@ modified_type_die (tree type, int is_const_type, int is_volatile_type,
/* Else cv-qualified version of named type; fall through. */
}
+ mod_scope = scope_die_for (type, context_die);
+
if (is_const_type
/* If both is_const_type and is_volatile_type, prefer the path
which leads to a qualified type. */
@@ -9086,17 +9279,17 @@ modified_type_die (tree type, int is_const_type, int is_volatile_type,
|| get_qualified_type (type, TYPE_QUAL_CONST) == NULL_TREE
|| get_qualified_type (type, TYPE_QUAL_VOLATILE) != NULL_TREE))
{
- mod_type_die = new_die (DW_TAG_const_type, comp_unit_die (), type);
+ mod_type_die = new_die (DW_TAG_const_type, mod_scope, type);
sub_die = modified_type_die (type, 0, is_volatile_type, context_die);
}
else if (is_volatile_type)
{
- mod_type_die = new_die (DW_TAG_volatile_type, comp_unit_die (), type);
+ mod_type_die = new_die (DW_TAG_volatile_type, mod_scope, type);
sub_die = modified_type_die (type, is_const_type, 0, context_die);
}
else if (code == POINTER_TYPE)
{
- mod_type_die = new_die (DW_TAG_pointer_type, comp_unit_die (), type);
+ mod_type_die = new_die (DW_TAG_pointer_type, mod_scope, type);
add_AT_unsigned (mod_type_die, DW_AT_byte_size,
simple_type_size_in_bits (type) / BITS_PER_UNIT);
item_type = TREE_TYPE (type);
@@ -9107,10 +9300,10 @@ modified_type_die (tree type, int is_const_type, int is_volatile_type,
else if (code == REFERENCE_TYPE)
{
if (TYPE_REF_IS_RVALUE (type) && dwarf_version >= 4)
- mod_type_die = new_die (DW_TAG_rvalue_reference_type, comp_unit_die (),
+ mod_type_die = new_die (DW_TAG_rvalue_reference_type, mod_scope,
type);
else
- mod_type_die = new_die (DW_TAG_reference_type, comp_unit_die (), type);
+ mod_type_die = new_die (DW_TAG_reference_type, mod_scope, type);
add_AT_unsigned (mod_type_die, DW_AT_byte_size,
simple_type_size_in_bits (type) / BITS_PER_UNIT);
item_type = TREE_TYPE (type);
@@ -10778,17 +10971,6 @@ parameter_ref_descriptor (rtx rtl)
return ret;
}
-/* Helper function to get mode of MEM's address. */
-
-enum machine_mode
-get_address_mode (rtx mem)
-{
- enum machine_mode mode = GET_MODE (XEXP (mem, 0));
- if (mode != VOIDmode)
- return mode;
- return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
-}
-
/* The following routine converts the RTL for a variable or parameter
(resident in memory) into an equivalent Dwarf representation of a
mechanism for getting the address of that same variable onto the top of a
@@ -14093,11 +14275,12 @@ rtl_for_decl_location (tree decl)
&& (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl)))
< UNITS_PER_WORD))
{
+ enum machine_mode addr_mode = get_address_mode (rtl);
int offset = (UNITS_PER_WORD
- GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))));
rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (decl)),
- plus_constant (XEXP (rtl, 0), offset));
+ plus_constant (addr_mode, XEXP (rtl, 0), offset));
}
}
else if (TREE_CODE (decl) == VAR_DECL
@@ -14106,6 +14289,7 @@ rtl_for_decl_location (tree decl)
&& GET_MODE (rtl) != TYPE_MODE (TREE_TYPE (decl))
&& BYTES_BIG_ENDIAN)
{
+ enum machine_mode addr_mode = get_address_mode (rtl);
int rsize = GET_MODE_SIZE (GET_MODE (rtl));
int dsize = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl)));
@@ -14117,7 +14301,8 @@ rtl_for_decl_location (tree decl)
else gdb will not be able to display it. */
if (rsize > dsize)
rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (decl)),
- plus_constant (XEXP (rtl, 0), rsize-dsize));
+ plus_constant (addr_mode, XEXP (rtl, 0),
+ rsize - dsize));
}
/* A variable with no DECL_RTL but a DECL_INITIAL is a compile-time constant,
@@ -14971,6 +15156,7 @@ add_subscript_info (dw_die_ref type_die, tree type, bool collapse_p)
static void
add_byte_size_attribute (dw_die_ref die, tree tree_node)
{
+ dw_die_ref decl_die;
unsigned size;
switch (TREE_CODE (tree_node))
@@ -14982,6 +15168,12 @@ add_byte_size_attribute (dw_die_ref die, tree tree_node)
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
+ if (TREE_CODE (TYPE_SIZE_UNIT (tree_node)) == VAR_DECL
+ && (decl_die = lookup_decl_die (TYPE_SIZE_UNIT (tree_node))))
+ {
+ add_AT_die_ref (die, DW_AT_byte_size, decl_die);
+ return;
+ }
size = int_size_in_bytes (tree_node);
break;
case FIELD_DECL:
@@ -15299,10 +15491,36 @@ pop_decl_scope (void)
VEC_pop (tree, decl_scope_table);
}
+/* walk_tree helper function for uses_local_type, below. */
+
+static tree
+uses_local_type_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
+{
+ if (!TYPE_P (*tp))
+ *walk_subtrees = 0;
+ else
+ {
+ tree name = TYPE_NAME (*tp);
+ if (name && DECL_P (name) && decl_function_context (name))
+ return *tp;
+ }
+ return NULL_TREE;
+}
+
+/* If TYPE involves a function-local type (including a local typedef to a
+ non-local type), returns that type; otherwise returns NULL_TREE. */
+
+static tree
+uses_local_type (tree type)
+{
+ tree used = walk_tree_without_duplicates (&type, uses_local_type_r, NULL);
+ return used;
+}
+
/* Return the DIE for the scope that immediately contains this type.
- Non-named types get global scope. Named types nested in other
- types get their containing scope if it's open, or global scope
- otherwise. All other types (i.e. function-local named types) get
+ Non-named types that do not involve a function-local type get global
+ scope. Named types nested in namespaces or other types get their
+ containing scope. All other types (i.e. function-local named types) get
the current active scope. */
static dw_die_ref
@@ -15310,18 +15528,24 @@ scope_die_for (tree t, dw_die_ref context_die)
{
dw_die_ref scope_die = NULL;
tree containing_scope;
- int i;
/* Non-types always go in the current scope. */
gcc_assert (TYPE_P (t));
- containing_scope = TYPE_CONTEXT (t);
+ /* Use the scope of the typedef, rather than the scope of the type
+ it refers to. */
+ if (TYPE_NAME (t) && DECL_P (TYPE_NAME (t)))
+ containing_scope = DECL_CONTEXT (TYPE_NAME (t));
+ else
+ containing_scope = TYPE_CONTEXT (t);
- /* Use the containing namespace if it was passed in (for a declaration). */
+ /* Use the containing namespace if there is one. */
if (containing_scope && TREE_CODE (containing_scope) == NAMESPACE_DECL)
{
if (context_die == lookup_decl_die (containing_scope))
/* OK */;
+ else if (debug_info_level > DINFO_LEVEL_TERSE)
+ context_die = get_context_die (containing_scope);
else
containing_scope = NULL_TREE;
}
@@ -15333,30 +15557,25 @@ scope_die_for (tree t, dw_die_ref context_die)
containing_scope = NULL_TREE;
if (SCOPE_FILE_SCOPE_P (containing_scope))
- scope_die = comp_unit_die ();
+ {
+ /* If T uses a local type keep it local as well, to avoid references
+ to function-local DIEs from outside the function. */
+ if (current_function_decl && uses_local_type (t))
+ scope_die = context_die;
+ else
+ scope_die = comp_unit_die ();
+ }
else if (TYPE_P (containing_scope))
{
- /* For types, we can just look up the appropriate DIE. But
- first we check to see if we're in the middle of emitting it
- so we know where the new DIE should go. */
- for (i = VEC_length (tree, decl_scope_table) - 1; i >= 0; --i)
- if (VEC_index (tree, decl_scope_table, i) == containing_scope)
- break;
-
- if (i < 0)
+ /* For types, we can just look up the appropriate DIE. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ scope_die = get_context_die (containing_scope);
+ else
{
- gcc_assert (debug_info_level <= DINFO_LEVEL_TERSE
- || TREE_ASM_WRITTEN (containing_scope));
- /*We are not in the middle of emitting the type
- CONTAINING_SCOPE. Let's see if it's emitted already. */
- scope_die = lookup_type_die (containing_scope);
-
- /* If none of the current dies are suitable, we get file scope. */
+ scope_die = lookup_type_die_strip_naming_typedef (containing_scope);
if (scope_die == NULL)
scope_die = comp_unit_die ();
}
- else
- scope_die = lookup_type_die_strip_naming_typedef (containing_scope);
}
else
scope_die = context_die;
@@ -17047,10 +17266,13 @@ gen_variable_die (tree decl, tree origin, dw_die_ref context_die)
&& loc->expr->dw_loc_next == NULL
&& GET_CODE (loc->expr->dw_loc_oprnd1.v.val_addr)
== SYMBOL_REF)
- loc->expr->dw_loc_oprnd1.v.val_addr
- = plus_constant (loc->expr->dw_loc_oprnd1.v.val_addr, off);
- else
- loc_list_plus_const (loc, off);
+ {
+ rtx x = loc->expr->dw_loc_oprnd1.v.val_addr;
+ loc->expr->dw_loc_oprnd1.v.val_addr
+ = plus_constant (GET_MODE (x), x , off);
+ }
+ else
+ loc_list_plus_const (loc, off);
}
add_AT_location_description (var_die, DW_AT_location, loc);
remove_AT (var_die, DW_AT_declaration);
@@ -17110,8 +17332,11 @@ gen_variable_die (tree decl, tree origin, dw_die_ref context_die)
&& loc->expr->dw_loc_opc == DW_OP_addr
&& loc->expr->dw_loc_next == NULL
&& GET_CODE (loc->expr->dw_loc_oprnd1.v.val_addr) == SYMBOL_REF)
- loc->expr->dw_loc_oprnd1.v.val_addr
- = plus_constant (loc->expr->dw_loc_oprnd1.v.val_addr, off);
+ {
+ rtx x = loc->expr->dw_loc_oprnd1.v.val_addr;
+ loc->expr->dw_loc_oprnd1.v.val_addr
+ = plus_constant (GET_MODE (x), x, off);
+ }
else
loc_list_plus_const (loc, off);
}
@@ -18152,12 +18377,8 @@ gen_type_die_with_usage (tree type, dw_die_ref context_die,
/* Prevent broken recursion; we can't hand off to the same type. */
gcc_assert (DECL_ORIGINAL_TYPE (TYPE_NAME (type)) != type);
- /* Use the DIE of the containing namespace as the parent DIE of
- the type description DIE we want to generate. */
- if (DECL_FILE_SCOPE_P (TYPE_NAME (type))
- || (DECL_CONTEXT (TYPE_NAME (type))
- && TREE_CODE (DECL_CONTEXT (TYPE_NAME (type))) == NAMESPACE_DECL))
- context_die = get_context_die (DECL_CONTEXT (TYPE_NAME (type)));
+ /* Give typedefs the right scope. */
+ context_die = scope_die_for (type, context_die);
TREE_ASM_WRITTEN (type) = 1;
@@ -19122,7 +19343,9 @@ dwarf2out_decl (tree decl)
return;
/* For local statics lookup proper context die. */
- if (TREE_STATIC (decl) && decl_function_context (decl))
+ if (TREE_STATIC (decl)
+ && DECL_CONTEXT (decl)
+ && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
context_die = lookup_decl_die (DECL_CONTEXT (decl));
/* If we are in terse mode, don't generate any DIEs to represent any
@@ -19835,7 +20058,7 @@ dwarf2out_source_line (unsigned int line, const char *filename,
static void
dwarf2out_start_source_file (unsigned int lineno, const char *filename)
{
- if (flag_eliminate_dwarf2_dups && ! use_debug_types)
+ if (flag_eliminate_dwarf2_dups)
{
/* Record the beginning of the file for break_out_includes. */
dw_die_ref bincl_die;
@@ -19859,7 +20082,7 @@ dwarf2out_start_source_file (unsigned int lineno, const char *filename)
static void
dwarf2out_end_source_file (unsigned int lineno ATTRIBUTE_UNUSED)
{
- if (flag_eliminate_dwarf2_dups && ! use_debug_types)
+ if (flag_eliminate_dwarf2_dups)
/* Record the end of the file for break_out_includes. */
new_die (DW_TAG_GNU_EINCL, comp_unit_die (), NULL);
@@ -20451,9 +20674,8 @@ prune_unused_types_walk_attribs (dw_die_ref die)
/* A reference to another DIE.
Make sure that it will get emitted.
If it was broken out into a comdat group, don't follow it. */
- if (! use_debug_types
- || a->dw_attr == DW_AT_specification
- || a->dw_attr_val.v.val_die_ref.die->die_id.die_type_node == NULL)
+ if (! AT_ref (a)->comdat_type_p
+ || a->dw_attr == DW_AT_specification)
prune_unused_types_mark (a->dw_attr_val.v.val_die_ref.die, 1);
}
/* Set the string's refcount to 0 so that prune_unused_types_mark
@@ -21778,16 +22000,15 @@ dwarf2out_finish (const char *filename)
inlined and optimized out. In that case we are lost and
assign the empty child. This should not be big issue as
the function is likely unreachable too. */
- tree context = NULL_TREE;
-
gcc_assert (node->created_for);
if (DECL_P (node->created_for))
- context = DECL_CONTEXT (node->created_for);
+ origin = get_context_die (DECL_CONTEXT (node->created_for));
else if (TYPE_P (node->created_for))
- context = TYPE_CONTEXT (node->created_for);
+ origin = scope_die_for (node->created_for, comp_unit_die ());
+ else
+ origin = comp_unit_die ();
- origin = get_context_die (context);
add_child_die (origin, die);
}
}
@@ -21823,11 +22044,6 @@ dwarf2out_finish (const char *filename)
if (flag_eliminate_unused_debug_types)
prune_unused_types ();
- /* Generate separate CUs for each of the include files we've seen.
- They will go into limbo_die_list. */
- if (flag_eliminate_dwarf2_dups && ! use_debug_types)
- break_out_includes (comp_unit_die ());
-
/* Generate separate COMDAT sections for type DIEs. */
if (use_debug_types)
{
@@ -21851,6 +22067,11 @@ dwarf2out_finish (const char *filename)
prune_unused_types ();
}
+ /* Generate separate CUs for each of the include files we've seen.
+ They will go into limbo_die_list. */
+ if (flag_eliminate_dwarf2_dups)
+ break_out_includes (comp_unit_die ());
+
/* Traverse the DIE's and add add sibling attributes to those DIE's
that have children. */
add_sibling_attributes (comp_unit_die ());
@@ -21895,6 +22116,8 @@ dwarf2out_finish (const char *filename)
FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, fde_idx, fde)
{
+ if (DECL_IGNORED_P (fde->decl))
+ continue;
if (!fde->in_std_section)
add_ranges_by_labels (comp_unit_die (), fde->dw_fde_begin,
fde->dw_fde_end, &range_list_added);
diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h
index 711e8ab0d5e..1bc83aa9ade 100644
--- a/gcc/dwarf2out.h
+++ b/gcc/dwarf2out.h
@@ -228,7 +228,6 @@ extern struct dw_loc_descr_struct *mem_loc_descriptor
(rtx, enum machine_mode mode, enum machine_mode mem_mode,
enum var_init_status);
extern bool loc_descr_equal_p (dw_loc_descr_ref, dw_loc_descr_ref);
-extern enum machine_mode get_address_mode (rtx mem);
extern dw_fde_ref dwarf2out_alloc_current_fde (void);
extern unsigned long size_of_locs (dw_loc_descr_ref);
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 9da585c35a7..8a9b8b26882 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -2092,7 +2092,7 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
/* Convert a possibly large offset to a signed value within the
range of the target address space. */
- address_mode = targetm.addr_space.address_mode (attrs.addrspace);
+ address_mode = get_address_mode (memref);
pbits = GET_MODE_BITSIZE (address_mode);
if (HOST_BITS_PER_WIDE_INT > pbits)
{
@@ -2110,9 +2110,10 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
&& (unsigned HOST_WIDE_INT) offset
< GET_MODE_ALIGNMENT (GET_MODE (memref)) / BITS_PER_UNIT)
addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0),
- plus_constant (XEXP (addr, 1), offset));
+ plus_constant (address_mode,
+ XEXP (addr, 1), offset));
else
- addr = plus_constant (addr, offset);
+ addr = plus_constant (address_mode, addr, offset);
}
new_rtx = change_address_1 (memref, mode, addr, validate);
@@ -2178,7 +2179,7 @@ offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2)
struct mem_attrs attrs, *defattrs;
attrs = *get_mem_attrs (memref);
- address_mode = targetm.addr_space.address_mode (attrs.addrspace);
+ address_mode = get_address_mode (memref);
new_rtx = simplify_gen_binary (PLUS, address_mode, addr, offset);
/* At this point we don't know _why_ the address is invalid. It
diff --git a/gcc/except.c b/gcc/except.c
index 254dd8c32ae..158ca20745a 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -1155,7 +1155,7 @@ sjlj_emit_function_enter (rtx dispatch_label)
rtx x, last;
x = emit_library_call_value (setjmp_libfunc, NULL_RTX, LCT_RETURNS_TWICE,
TYPE_MODE (integer_type_node), 1,
- plus_constant (XEXP (fc, 0),
+ plus_constant (Pmode, XEXP (fc, 0),
sjlj_fc_jbuf_ofs), Pmode);
emit_cmp_and_jump_insns (x, const0_rtx, NE, 0,
@@ -1168,7 +1168,7 @@ sjlj_emit_function_enter (rtx dispatch_label)
add_reg_note (last, REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE / 100));
}
#else
- expand_builtin_setjmp_setup (plus_constant (XEXP (fc, 0),
+ expand_builtin_setjmp_setup (plus_constant (Pmode, XEXP (fc, 0),
sjlj_fc_jbuf_ofs),
dispatch_label);
#endif
@@ -2094,7 +2094,7 @@ expand_builtin_extract_return_addr (tree addr_tree)
/* Then adjust to find the real return address. */
#if defined (RETURN_ADDR_OFFSET)
- addr = plus_constant (addr, RETURN_ADDR_OFFSET);
+ addr = plus_constant (Pmode, addr, RETURN_ADDR_OFFSET);
#endif
return addr;
@@ -2113,7 +2113,7 @@ expand_builtin_frob_return_addr (tree addr_tree)
#ifdef RETURN_ADDR_OFFSET
addr = force_reg (Pmode, addr);
- addr = plus_constant (addr, -RETURN_ADDR_OFFSET);
+ addr = plus_constant (Pmode, addr, -RETURN_ADDR_OFFSET);
#endif
return addr;
diff --git a/gcc/explow.c b/gcc/explow.c
index ff26dbf605c..5513a123e3d 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -75,22 +75,18 @@ trunc_int_for_mode (HOST_WIDE_INT c, enum machine_mode mode)
}
/* Return an rtx for the sum of X and the integer C, given that X has
- mode MODE. This routine should be used instead of plus_constant
- when they want to ensure that addition happens in a particular
- mode, which is necessary when X can be a VOIDmode CONST_INT or
- CONST_DOUBLE and the width of the constant is different from the
- width of the expression. */
-/* TODO: All callers of plus_constant should migrate to this routine,
- and once they do, we can assert that mode is not VOIDmode. */
+ mode MODE. */
rtx
-plus_constant_mode (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
+plus_constant (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
{
RTX_CODE code;
rtx y;
rtx tem;
int all_constant = 0;
+ gcc_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
+
if (c == 0)
return x;
@@ -143,7 +139,7 @@ plus_constant_mode (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
{
- tem = plus_constant_mode (mode, get_pool_constant (XEXP (x, 0)), c);
+ tem = plus_constant (mode, get_pool_constant (XEXP (x, 0)), c);
tem = force_const_mem (GET_MODE (x), tem);
if (memory_address_p (GET_MODE (tem), XEXP (tem, 0)))
return tem;
@@ -173,7 +169,8 @@ plus_constant_mode (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
if (CONSTANT_P (XEXP (x, 1)))
{
- x = gen_rtx_PLUS (mode, XEXP (x, 0), plus_constant_mode (mode, XEXP (x, 1), c));
+ x = gen_rtx_PLUS (mode, XEXP (x, 0),
+ plus_constant (mode, XEXP (x, 1), c));
c = 0;
}
else if (find_constant_term_loc (&y))
@@ -183,7 +180,7 @@ plus_constant_mode (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
rtx copy = copy_rtx (x);
rtx *const_loc = find_constant_term_loc (&copy);
- *const_loc = plus_constant_mode (mode, *const_loc, c);
+ *const_loc = plus_constant (mode, *const_loc, c);
x = copy;
c = 0;
}
@@ -203,14 +200,6 @@ plus_constant_mode (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
else
return x;
}
-
-/* Return an rtx for the sum of X and the integer C. */
-
-rtx
-plus_constant (rtx x, HOST_WIDE_INT c)
-{
- return plus_constant_mode (GET_MODE (x), x, c);
-}
/* If X is a sum, return a new sum like X but lacking any constant terms.
Add all the removed constant terms into *CONSTPTR.
@@ -567,6 +556,7 @@ use_anchored_address (rtx x)
{
rtx base;
HOST_WIDE_INT offset;
+ enum machine_mode mode;
if (!flag_section_anchors)
return x;
@@ -607,10 +597,11 @@ use_anchored_address (rtx x)
/* If we're going to run a CSE pass, force the anchor into a register.
We will then be able to reuse registers for several accesses, if the
target costs say that that's worthwhile. */
+ mode = GET_MODE (base);
if (!cse_not_expected)
- base = force_reg (GET_MODE (base), base);
+ base = force_reg (mode, base);
- return replace_equiv_address (x, plus_constant (base, offset));
+ return replace_equiv_address (x, plus_constant (mode, base, offset));
}
/* Copy the value or contents of X to a new temp reg and return that reg. */
@@ -995,7 +986,8 @@ round_push (rtx size)
substituted by the right value in vregs pass and optimized
during combine. */
align_rtx = virtual_preferred_stack_boundary_rtx;
- alignm1_rtx = force_operand (plus_constant (align_rtx, -1), NULL_RTX);
+ alignm1_rtx = force_operand (plus_constant (Pmode, align_rtx, -1),
+ NULL_RTX);
}
/* CEIL_DIV_EXPR needs to worry about the addition overflowing,
@@ -1285,7 +1277,7 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
{
unsigned extra = (required_align - extra_align) / BITS_PER_UNIT;
- size = plus_constant (size, extra);
+ size = plus_constant (Pmode, size, extra);
size = force_operand (size, NULL_RTX);
if (flag_stack_usage_info)
@@ -1533,17 +1525,24 @@ set_stack_check_libfunc (const char *libfunc_name)
void
emit_stack_probe (rtx address)
{
- rtx memref = gen_rtx_MEM (word_mode, address);
+#ifdef HAVE_probe_stack_address
+ if (HAVE_probe_stack_address)
+ emit_insn (gen_probe_stack_address (address));
+ else
+#endif
+ {
+ rtx memref = gen_rtx_MEM (word_mode, address);
- MEM_VOLATILE_P (memref) = 1;
+ MEM_VOLATILE_P (memref) = 1;
- /* See if we have an insn to probe the stack. */
+ /* See if we have an insn to probe the stack. */
#ifdef HAVE_probe_stack
- if (HAVE_probe_stack)
- emit_insn (gen_probe_stack (memref));
- else
+ if (HAVE_probe_stack)
+ emit_insn (gen_probe_stack (memref));
+ else
#endif
- emit_move_insn (memref, const0_rtx);
+ emit_move_insn (memref, const0_rtx);
+ }
}
/* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive.
@@ -1576,7 +1575,8 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
rtx addr = memory_address (Pmode,
gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
stack_pointer_rtx,
- plus_constant (size, first)));
+ plus_constant (Pmode,
+ size, first)));
emit_library_call (stack_check_libfunc, LCT_NORMAL, VOIDmode, 1, addr,
Pmode);
return;
@@ -1590,7 +1590,8 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
rtx addr = memory_address (Pmode,
gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
stack_pointer_rtx,
- plus_constant (size, first)));
+ plus_constant (Pmode,
+ size, first)));
create_input_operand (&ops[0], addr, Pmode);
if (maybe_expand_insn (CODE_FOR_check_stack, 1, ops))
@@ -1611,13 +1612,13 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
for (i = PROBE_INTERVAL; i < isize; i += PROBE_INTERVAL)
{
addr = memory_address (Pmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
STACK_GROW_OFF (first + i)));
emit_stack_probe (addr);
}
addr = memory_address (Pmode,
- plus_constant (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
STACK_GROW_OFF (first + isize)));
emit_stack_probe (addr);
}
@@ -1701,7 +1702,7 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
/* Use [base + disp} addressing mode if supported. */
HOST_WIDE_INT offset = INTVAL (temp);
addr = memory_address (Pmode,
- plus_constant (last_addr,
+ plus_constant (Pmode, last_addr,
STACK_GROW_OFF (offset)));
}
else
@@ -1759,9 +1760,9 @@ anti_adjust_stack_and_probe (rtx size, bool adjust_back)
}
if (first_probe)
- anti_adjust_stack (plus_constant (size, PROBE_INTERVAL + dope));
+ anti_adjust_stack (plus_constant (Pmode, size, PROBE_INTERVAL + dope));
else
- anti_adjust_stack (plus_constant (size, PROBE_INTERVAL - i));
+ anti_adjust_stack (plus_constant (Pmode, size, PROBE_INTERVAL - i));
emit_stack_probe (stack_pointer_rtx);
}
@@ -1839,7 +1840,7 @@ anti_adjust_stack_and_probe (rtx size, bool adjust_back)
/* Adjust back and account for the additional first interval. */
if (adjust_back)
- adjust_stack (plus_constant (size, PROBE_INTERVAL + dope));
+ adjust_stack (plus_constant (Pmode, size, PROBE_INTERVAL + dope));
else
adjust_stack (GEN_INT (PROBE_INTERVAL + dope));
}
diff --git a/gcc/expmed.c b/gcc/expmed.c
index a0a0960bfa2..45b150efc3e 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -4786,7 +4786,7 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
remainder = expand_binop (compute_mode, sub_optab, op0, tem,
remainder, 1, OPTAB_LIB_WIDEN);
}
- tem = plus_constant (op1, -1);
+ tem = plus_constant (compute_mode, op1, -1);
tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
expand_inc (quotient, const1_rtx);
diff --git a/gcc/expr.c b/gcc/expr.c
index 3e8e004063b..3edb4a27d9e 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -867,8 +867,8 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
unsigned int align, int endp)
{
struct move_by_pieces_d data;
- enum machine_mode to_addr_mode, from_addr_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (from));
+ enum machine_mode to_addr_mode;
+ enum machine_mode from_addr_mode = get_address_mode (from);
rtx to_addr, from_addr = XEXP (from, 0);
unsigned int max_size = MOVE_MAX_PIECES + 1;
enum insn_code icode;
@@ -879,7 +879,7 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
data.from_addr = from_addr;
if (to)
{
- to_addr_mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (to));
+ to_addr_mode = get_address_mode (to);
to_addr = XEXP (to, 0);
data.to = to;
data.autinc_to
@@ -927,7 +927,8 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from)
{
data.from_addr = copy_to_mode_reg (from_addr_mode,
- plus_constant (from_addr, len));
+ plus_constant (from_addr_mode,
+ from_addr, len));
data.autinc_from = 1;
data.explicit_inc_from = -1;
}
@@ -942,7 +943,8 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
if (USE_STORE_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_to)
{
data.to_addr = copy_to_mode_reg (to_addr_mode,
- plus_constant (to_addr, len));
+ plus_constant (to_addr_mode,
+ to_addr, len));
data.autinc_to = 1;
data.explicit_inc_to = -1;
}
@@ -991,7 +993,8 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
emit_insn (gen_add2_insn (data.to_addr, constm1_rtx));
else
data.to_addr = copy_to_mode_reg (to_addr_mode,
- plus_constant (data.to_addr,
+ plus_constant (to_addr_mode,
+ data.to_addr,
-1));
}
to1 = adjust_automodify_address (data.to, QImode, data.to_addr,
@@ -1431,10 +1434,8 @@ emit_block_move_via_loop (rtx x, rtx y, rtx size,
unsigned int align ATTRIBUTE_UNUSED)
{
rtx cmp_label, top_label, iter, x_addr, y_addr, tmp;
- enum machine_mode x_addr_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (x));
- enum machine_mode y_addr_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (y));
+ enum machine_mode x_addr_mode = get_address_mode (x);
+ enum machine_mode y_addr_mode = get_address_mode (y);
enum machine_mode iter_mode;
iter_mode = GET_MODE (size);
@@ -2346,6 +2347,26 @@ get_def_for_expr (tree name, enum tree_code code)
return def_stmt;
}
+
+/* Return the defining gimple statement for SSA_NAME NAME if it is an
+ assigment and the class of the expresion on the RHS is CLASS. Return
+ NULL otherwise. */
+
+static gimple
+get_def_for_expr_class (tree name, enum tree_code_class tclass)
+{
+ gimple def_stmt;
+
+ if (TREE_CODE (name) != SSA_NAME)
+ return NULL;
+
+ def_stmt = get_gimple_for_ssa_name (name);
+ if (!def_stmt
+ || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
+ return NULL;
+
+ return def_stmt;
+}
/* Determine whether the LEN bytes generated by CONSTFUN can be
@@ -2441,8 +2462,7 @@ store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode),
void *constfundata, unsigned int align, bool memsetp, int endp)
{
- enum machine_mode to_addr_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (to));
+ enum machine_mode to_addr_mode = get_address_mode (to);
struct store_by_pieces_d data;
if (len == 0)
@@ -2472,7 +2492,8 @@ store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
emit_insn (gen_add2_insn (data.to_addr, constm1_rtx));
else
data.to_addr = copy_to_mode_reg (to_addr_mode,
- plus_constant (data.to_addr,
+ plus_constant (to_addr_mode,
+ data.to_addr,
-1));
}
to1 = adjust_automodify_address (data.to, QImode, data.to_addr,
@@ -2527,8 +2548,7 @@ static void
store_by_pieces_1 (struct store_by_pieces_d *data ATTRIBUTE_UNUSED,
unsigned int align ATTRIBUTE_UNUSED)
{
- enum machine_mode to_addr_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to));
+ enum machine_mode to_addr_mode = get_address_mode (data->to);
rtx to_addr = XEXP (data->to, 0);
unsigned int max_size = STORE_MAX_PIECES + 1;
enum insn_code icode;
@@ -2560,7 +2580,9 @@ store_by_pieces_1 (struct store_by_pieces_d *data ATTRIBUTE_UNUSED,
if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to)
{
data->to_addr = copy_to_mode_reg (to_addr_mode,
- plus_constant (to_addr, data->len));
+ plus_constant (to_addr_mode,
+ to_addr,
+ data->len));
data->autinc_to = 1;
data->explicit_inc_to = -1;
}
@@ -3095,7 +3117,7 @@ emit_move_resolve_push (enum machine_mode mode, rtx x)
case POST_INC:
case POST_DEC:
case POST_MODIFY:
- temp = plus_constant (stack_pointer_rtx, -adjust);
+ temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
break;
default:
gcc_unreachable ();
@@ -3575,7 +3597,7 @@ push_block (rtx size, int extra, int below)
size = convert_modes (Pmode, ptr_mode, size, 1);
if (CONSTANT_P (size))
- anti_adjust_stack (plus_constant (size, extra));
+ anti_adjust_stack (plus_constant (Pmode, size, extra));
else if (REG_P (size) && extra == 0)
anti_adjust_stack (size);
else
@@ -3595,16 +3617,17 @@ push_block (rtx size, int extra, int below)
{
temp = virtual_outgoing_args_rtx;
if (extra != 0 && below)
- temp = plus_constant (temp, extra);
+ temp = plus_constant (Pmode, temp, extra);
}
else
{
if (CONST_INT_P (size))
- temp = plus_constant (virtual_outgoing_args_rtx,
+ temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
-INTVAL (size) - (below ? 0 : extra));
else if (extra != 0 && !below)
temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
- negate_rtx (Pmode, plus_constant (size, extra)));
+ negate_rtx (Pmode, plus_constant (Pmode, size,
+ extra)));
else
temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
negate_rtx (Pmode, size));
@@ -4077,11 +4100,12 @@ emit_push_insn (rtx x, enum machine_mode mode, tree type, rtx size,
}
else if (CONST_INT_P (args_so_far))
temp = memory_address (BLKmode,
- plus_constant (args_addr,
+ plus_constant (Pmode, args_addr,
skip + INTVAL (args_so_far)));
else
temp = memory_address (BLKmode,
- plus_constant (gen_rtx_PLUS (Pmode,
+ plus_constant (Pmode,
+ gen_rtx_PLUS (Pmode,
args_addr,
args_so_far),
skip));
@@ -4194,7 +4218,7 @@ emit_push_insn (rtx x, enum machine_mode mode, tree type, rtx size,
if (CONST_INT_P (args_so_far))
addr
= memory_address (mode,
- plus_constant (args_addr,
+ plus_constant (Pmode, args_addr,
INTVAL (args_so_far)));
else
addr = memory_address (mode, gen_rtx_PLUS (Pmode, args_addr,
@@ -4679,8 +4703,7 @@ expand_assignment (tree to, tree from, bool nontemporal)
}
offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
- address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (to_rtx));
+ address_mode = get_address_mode (to_rtx);
if (GET_MODE (offset_rtx) != address_mode)
offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
@@ -5219,8 +5242,7 @@ store_expr (tree exp, rtx target, int call_param_p, bool nontemporal)
{
enum machine_mode pointer_mode
= targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (target));
+ enum machine_mode address_mode = get_address_mode (target);
/* Compute the size of the data to copy from the string. */
tree copy_size
@@ -5244,7 +5266,8 @@ store_expr (tree exp, rtx target, int call_param_p, bool nontemporal)
Do all calculations in pointer_mode. */
if (CONST_INT_P (copy_size_rtx))
{
- size = plus_constant (size, -INTVAL (copy_size_rtx));
+ size = plus_constant (address_mode, size,
+ -INTVAL (copy_size_rtx));
target = adjust_address (target, BLKmode,
INTVAL (copy_size_rtx));
}
@@ -5787,8 +5810,7 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size)
offset_rtx = expand_normal (offset);
gcc_assert (MEM_P (to_rtx));
- address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (to_rtx));
+ address_mode = get_address_mode (to_rtx);
if (GET_MODE (offset_rtx) != address_mode)
offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
@@ -7344,6 +7366,64 @@ highest_pow2_factor_for_target (const_tree target, const_tree exp)
return MAX (factor, talign);
}
+/* Convert the tree comparision code TCODE to the rtl one where the
+ signedness is UNSIGNEDP. */
+
+static enum rtx_code
+convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
+{
+ enum rtx_code code;
+ switch (tcode)
+ {
+ case EQ_EXPR:
+ code = EQ;
+ break;
+ case NE_EXPR:
+ code = NE;
+ break;
+ case LT_EXPR:
+ code = unsignedp ? LTU : LT;
+ break;
+ case LE_EXPR:
+ code = unsignedp ? LEU : LE;
+ break;
+ case GT_EXPR:
+ code = unsignedp ? GTU : GT;
+ break;
+ case GE_EXPR:
+ code = unsignedp ? GEU : GE;
+ break;
+ case UNORDERED_EXPR:
+ code = UNORDERED;
+ break;
+ case ORDERED_EXPR:
+ code = ORDERED;
+ break;
+ case UNLT_EXPR:
+ code = UNLT;
+ break;
+ case UNLE_EXPR:
+ code = UNLE;
+ break;
+ case UNGT_EXPR:
+ code = UNGT;
+ break;
+ case UNGE_EXPR:
+ code = UNGE;
+ break;
+ case UNEQ_EXPR:
+ code = UNEQ;
+ break;
+ case LTGT_EXPR:
+ code = LTGT;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return code;
+}
+
/* Subroutine of expand_expr. Expand the two operands of a binary
expression EXP0 and EXP1 placing the results in OP0 and OP1.
The value may be stored in TARGET if TARGET is nonzero. The
@@ -7550,7 +7630,7 @@ expand_expr_addr_expr_1 (tree exp, rtx target, enum machine_mode tmode,
of such an object. */
gcc_assert ((bitpos % BITS_PER_UNIT) == 0);
- result = plus_constant (result, bitpos / BITS_PER_UNIT);
+ result = plus_constant (tmode, result, bitpos / BITS_PER_UNIT);
if (modifier < EXPAND_SUM)
result = force_operand (result, target);
}
@@ -7782,6 +7862,99 @@ expand_expr_real (tree exp, rtx target, enum machine_mode tmode,
return ret;
}
+/* Try to expand the conditional expression which is represented by
+ TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If succeseds
+ return the rtl reg which repsents the result. Otherwise return
+ NULL_RTL. */
+
+static rtx
+expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
+ tree treeop1 ATTRIBUTE_UNUSED,
+ tree treeop2 ATTRIBUTE_UNUSED)
+{
+#ifdef HAVE_conditional_move
+ rtx insn;
+ rtx op00, op01, op1, op2;
+ enum rtx_code comparison_code;
+ enum machine_mode comparison_mode;
+ gimple srcstmt;
+ rtx temp;
+ tree type = TREE_TYPE (treeop1);
+ int unsignedp = TYPE_UNSIGNED (type);
+ enum machine_mode mode = TYPE_MODE (type);
+
+ temp = assign_temp (type, 0, 0, 1);
+
+ /* If we cannot do a conditional move on the mode, try doing it
+ with the promoted mode. */
+ if (!can_conditionally_move_p (mode))
+ mode = promote_mode (type, mode, &unsignedp);
+
+ if (!can_conditionally_move_p (mode))
+ return NULL_RTX;
+
+ start_sequence ();
+ expand_operands (treeop1, treeop2,
+ temp, &op1, &op2, EXPAND_NORMAL);
+
+ if (TREE_CODE (treeop0) == SSA_NAME
+ && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
+ {
+ tree type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
+ enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
+ op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
+ op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
+ comparison_mode = TYPE_MODE (type);
+ unsignedp = TYPE_UNSIGNED (type);
+ comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
+ }
+ else if (TREE_CODE_CLASS (TREE_CODE (treeop0)) == tcc_comparison)
+ {
+ tree type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
+ enum tree_code cmpcode = TREE_CODE (treeop0);
+ op00 = expand_normal (TREE_OPERAND (treeop0, 0));
+ op01 = expand_normal (TREE_OPERAND (treeop0, 1));
+ unsignedp = TYPE_UNSIGNED (type);
+ comparison_mode = TYPE_MODE (type);
+ comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
+ }
+ else
+ {
+ op00 = expand_normal (treeop0);
+ op01 = const0_rtx;
+ comparison_code = NE;
+ comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
+ }
+
+ if (GET_MODE (op1) != mode)
+ op1 = gen_lowpart (mode, op1);
+
+ if (GET_MODE (op2) != mode)
+ op2 = gen_lowpart (mode, op2);
+
+ /* Try to emit the conditional move. */
+ insn = emit_conditional_move (temp, comparison_code,
+ op00, op01, comparison_mode,
+ op1, op2, mode,
+ unsignedp);
+
+ /* If we could do the conditional move, emit the sequence,
+ and return. */
+ if (insn)
+ {
+ rtx seq = get_insns ();
+ end_sequence ();
+ emit_insn (seq);
+ return temp;
+ }
+
+ /* Otherwise discard the sequence and fall back to code with
+ branches. */
+ end_sequence ();
+#endif
+ return NULL_RTX;
+}
+
rtx
expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
enum expand_modifier modifier)
@@ -8052,7 +8225,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
= immed_double_const (TREE_INT_CST_LOW (treeop0),
(HOST_WIDE_INT) 0,
TYPE_MODE (TREE_TYPE (treeop1)));
- op1 = plus_constant (op1, INTVAL (constant_part));
+ op1 = plus_constant (mode, op1, INTVAL (constant_part));
if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
op1 = force_operand (op1, target);
return REDUCE_BIT_FIELD (op1);
@@ -8085,7 +8258,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
= immed_double_const (TREE_INT_CST_LOW (treeop1),
(HOST_WIDE_INT) 0,
TYPE_MODE (TREE_TYPE (treeop0)));
- op0 = plus_constant (op0, INTVAL (constant_part));
+ op0 = plus_constant (mode, op0, INTVAL (constant_part));
if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
op0 = force_operand (op0, target);
return REDUCE_BIT_FIELD (op0);
@@ -8147,7 +8320,8 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
/* If the last operand is a CONST_INT, use plus_constant of
the negated constant. Else make the MINUS. */
if (CONST_INT_P (op1))
- return REDUCE_BIT_FIELD (plus_constant (op0, - INTVAL (op1)));
+ return REDUCE_BIT_FIELD (plus_constant (mode, op0,
+ -INTVAL (op1)));
else
return REDUCE_BIT_FIELD (gen_rtx_MINUS (mode, op0, op1));
}
@@ -8841,6 +9015,10 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
&& TREE_TYPE (treeop1) != void_type_node
&& TREE_TYPE (treeop2) != void_type_node);
+ temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
+ if (temp)
+ return temp;
+
/* If we are not to produce a result, we have no target. Otherwise,
if a target was specified use it; it will not be used as an
intermediate target unless it is safe. If no target, use a
@@ -8850,10 +9028,6 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
&& original_target
&& safe_from_p (original_target, treeop0, 1)
&& GET_MODE (original_target) == mode
-#ifdef HAVE_conditional_move
- && (! can_conditionally_move_p (mode)
- || REG_P (original_target))
-#endif
&& !MEM_P (original_target))
temp = original_target;
else
@@ -9756,8 +9930,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
gcc_assert (MEM_P (op0));
- address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (op0));
+ address_mode = get_address_mode (op0);
if (GET_MODE (offset_rtx) != address_mode)
offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
diff --git a/gcc/flags.h b/gcc/flags.h
index 9791e1b2344..48c20e51bd8 100644
--- a/gcc/flags.h
+++ b/gcc/flags.h
@@ -53,9 +53,6 @@ extern void set_Wstrict_aliasing (struct gcc_options *opts, int onoff);
extern bool final_insns_dump_p;
-/* Nonzero means make permerror produce warnings instead of errors. */
-
-extern int flag_permissive;
/* Other basic status info about current function. */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 41081ff919c..c68db739ae9 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -191,9 +191,6 @@ div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
does the correct thing for POINTER_PLUS_EXPR where we want
a signed division. */
uns = TYPE_UNSIGNED (TREE_TYPE (arg2));
- if (TREE_CODE (TREE_TYPE (arg2)) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (TREE_TYPE (arg2)))
- uns = false;
quo = double_int_divmod (tree_to_double_int (arg1),
tree_to_double_int (arg2),
@@ -935,15 +932,14 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2
to produce a new constant. Return NULL_TREE if we don't know how
to evaluate CODE at compile-time. */
-tree
-int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2)
+static tree
+int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
+ int overflowable)
{
double_int op1, op2, res, tmp;
tree t;
tree type = TREE_TYPE (arg1);
bool uns = TYPE_UNSIGNED (type);
- bool is_sizetype
- = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
bool overflow = false;
op1 = tree_to_double_int (arg1);
@@ -1078,13 +1074,19 @@ int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2)
return NULL_TREE;
}
- t = force_fit_type_double (TREE_TYPE (arg1), res, 1,
- ((!uns || is_sizetype) && overflow)
+ t = force_fit_type_double (TREE_TYPE (arg1), res, overflowable,
+ (!uns && overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
return t;
}
+tree
+int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2)
+{
+ return int_const_binop_1 (code, arg1, arg2, 1);
+}
+
/* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
constant. We assume ARG1 and ARG2 have the same data type, or at least
are the same kind of constant and the same machine mode. Return zero if
@@ -1423,8 +1425,10 @@ size_binop_loc (location_t loc, enum tree_code code, tree arg0, tree arg1)
return arg1;
}
- /* Handle general case of two integer constants. */
- return int_const_binop (code, arg0, arg1);
+ /* Handle general case of two integer constants. For sizetype
+ constant calculations we always want to know about overflow,
+ even in the unsigned case. */
+ return int_const_binop_1 (code, arg0, arg1, -1);
}
return fold_build2_loc (loc, code, type, arg0, arg1);
@@ -5633,8 +5637,6 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
/* ... and has wrapping overflow, and its type is smaller
than ctype, then we cannot pass through as widening. */
&& ((TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0))
- && ! (TREE_CODE (TREE_TYPE (op0)) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (TREE_TYPE (op0)))
&& (TYPE_PRECISION (ctype)
> TYPE_PRECISION (TREE_TYPE (op0))))
/* ... or this is a truncation (t is narrower than op0),
@@ -5812,7 +5814,6 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
the operation since it will change the result if the original
computation overflowed. */
if (TYPE_UNSIGNED (ctype)
- && ! (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype))
&& ctype != type)
break;
@@ -5838,14 +5839,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
(C * 8) % 4 since we know that's zero. */
if ((code == TRUNC_MOD_EXPR || code == CEIL_MOD_EXPR
|| code == FLOOR_MOD_EXPR || code == ROUND_MOD_EXPR)
- /* If the multiplication can overflow we cannot optimize this.
- ??? Until we can properly mark individual operations as
- not overflowing we need to treat sizetype special here as
- stor-layout relies on this opimization to make
- DECL_FIELD_BIT_OFFSET always a constant. */
- && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t))
- || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (TREE_TYPE (t))))
+ /* If the multiplication can overflow we cannot optimize this. */
+ && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
&& integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
@@ -5887,16 +5882,11 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
(tree_to_double_int (c),
TYPE_PRECISION (ctype), TYPE_UNSIGNED (ctype)),
false, &overflow_p);
- overflow_p = (((!TYPE_UNSIGNED (ctype)
- || (TREE_CODE (ctype) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (ctype)))
- && overflow_p)
+ overflow_p = ((!TYPE_UNSIGNED (ctype) && overflow_p)
| TREE_OVERFLOW (c) | TREE_OVERFLOW (op1));
if (!double_int_fits_to_tree_p (ctype, mul)
&& ((TYPE_UNSIGNED (ctype) && tcode != MULT_EXPR)
- || !TYPE_UNSIGNED (ctype)
- || (TREE_CODE (ctype) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (ctype))))
+ || !TYPE_UNSIGNED (ctype)))
overflow_p = 1;
if (!overflow_p)
return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
@@ -5908,11 +5898,9 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
multiple of the other, in which case we replace this with either an
operation or CODE or TCODE.
- If we have an unsigned type that is not a sizetype, we cannot do
- this since it will change the result if the original computation
- overflowed. */
- if ((TYPE_OVERFLOW_UNDEFINED (ctype)
- || (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype)))
+ If we have an unsigned type, we cannot do this since it will change
+ the result if the original computation overflowed. */
+ if (TYPE_OVERFLOW_UNDEFINED (ctype)
&& ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR)
|| (tcode == MULT_EXPR
&& code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR
@@ -9532,7 +9520,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue,
if (code == ADDR_EXPR)
{
unsigned int bitalign;
- bitalign = get_object_alignment_1 (TREE_OPERAND (expr, 0), residue);
+ get_object_alignment_1 (TREE_OPERAND (expr, 0), &bitalign, residue);
*residue /= BITS_PER_UNIT;
return bitalign / BITS_PER_UNIT;
}
@@ -9971,7 +9959,8 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg0) == ADDR_EXPR)
{
tem = try_move_mult_to_index (loc, arg0,
- fold_convert_loc (loc, sizetype, arg1));
+ fold_convert_loc (loc,
+ ssizetype, arg1));
if (tem)
return fold_convert_loc (loc, type, tem);
}
@@ -11444,6 +11433,30 @@ fold_binary_loc (location_t loc,
return fold_convert_loc (loc, type, arg0);
}
+ /* Fold (X * CST1) & CST2 to zero if we can, or drop known zero
+ bits from CST2. */
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (arg0) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ int arg1tz
+ = double_int_ctz (tree_to_double_int (TREE_OPERAND (arg0, 1)));
+ if (arg1tz > 0)
+ {
+ double_int arg1mask, masked;
+ arg1mask = double_int_not (double_int_mask (arg1tz));
+ arg1mask = double_int_ext (arg1mask, TYPE_PRECISION (type),
+ TYPE_UNSIGNED (type));
+ masked = double_int_and (arg1mask, tree_to_double_int (arg1));
+ if (double_int_zero_p (masked))
+ return omit_two_operands_loc (loc, type, build_zero_cst (type),
+ arg0, arg1);
+ else if (!double_int_equal_p (masked, tree_to_double_int (arg1)))
+ return fold_build2_loc (loc, code, type, op0,
+ double_int_to_tree (type, masked));
+ }
+ }
+
/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
((A & N) + B) & M -> (A + B) & M
Similarly if (N & M) == 0,
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index ee13c2f94f5..b26b5c72735 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,90 @@
+2012-05-08 Jan Hubicka <jh@suse.cz>
+
+ * trans-common.c (create_common): Do not fake TREE_ASM_WRITTEN.
+ * trans-decl.c (gfc_finish_cray_pointee): Likewise.
+
+2012-05-07 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/53255
+ * resolve.c (resolve_typebound_static): Fix handling
+ of overridden specific to generic operator.
+
+2012-05-06 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/41587
+ * decl.c (build_struct): Don't ignore FAILED status.
+
+2012-05-05 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/41600
+ * trans-array.c (build_array_ref): New static function.
+ (gfc_conv_array_ref, gfc_get_dataptr_offset): Call it.
+ * trans-expr.c (gfc_get_vptr_from_expr): New function.
+ (gfc_conv_derived_to_class): Add a new argument for a caller
+ supplied vptr and use it if it is not NULL.
+ (gfc_conv_procedure_call): Add NULL to call to above.
+ symbol.c (gfc_is_associate_pointer): Return true if symbol is
+ a class object.
+ * trans-stmt.c (trans_associate_var): Handle class associate-
+ names.
+ * expr.c (gfc_get_variable_expr): Supply the array-spec if
+ possible.
+ * trans-types.c (gfc_typenode_for_spec): Set GFC_CLASS_TYPE_P
+ for class types.
+ * trans.h : Add prototypes for gfc_get_vptr_from_expr and
+ gfc_conv_derived_to_class. Define GFC_CLASS_TYPE_P.
+ * resolve.c (resolve_variable): For class arrays, ensure that
+ the target expression has all the necessary _data references.
+ (resolve_assoc_var): Throw a "not yet implemented" error for
+ class array selectors that need a temporary.
+ * match.c (copy_ts_from_selector_to_associate,
+ select_derived_set_tmp, select_class_set_tmp): New functions.
+ (select_type_set_tmp): Call one of last two new functions.
+ (gfc_match_select_type): Copy_ts_from_selector_to_associate is
+ called if associate-name is typed.
+
+ PR fortran/53191
+ * resolve.c (resolve_ref): C614 applied to class expressions.
+
+2012-05-05 Janne Blomqvist <jb@gcc.gnu.org>
+
+ PR fortran/49010
+ PR fortran/24518
+ * intrinsic.texi (MOD, MODULO): Mention sign and magnitude of result.
+ * simplify.c (gfc_simplify_mod): Use mpfr_fmod.
+ (gfc_simplify_modulo): Likewise, use copysign to fix the result if
+ zero.
+ * trans-intrinsic.c (gfc_conv_intrinsic_mod): Remove fallback as
+ builtin_fmod is always available. For modulo, call copysign to fix
+ the result when signed zeros are enabled.
+
+2012-05-05 Janne Blomqvist <jb@gcc.gnu.org>
+
+ * gfortran.texi (GFORTRAN_TMPDIR): Rename to TMPDIR, explain
+ algorithm for choosing temp directory.
+
+2012-05-04 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/53175
+ * resolve.c (resolve_variable): Set public_used
+ if a private module variable is used in a (public)
+ specification expression.
+ * trans-decl.c (gfc_finish_var_decl): Mark those
+ TREE_PUBLIC.
+
+2012-05-04 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/53111
+ * resolve.c (resolve_fl_derived): Fix -std=f95
+ diagnostic for generic vs. DT names.
+
+2012-05-03 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/52864
+ * interface.c (compare_parameter_intent): Remove.
+ (check_intents): Remove call, handle CLASS pointer.
+ (compare_actual_formal): Handle CLASS pointer.
+
2012-04-30 Jan Hubicka <jh@suse.cz>
* f95-lang.c (gfc_finish): Update comments.
diff --git a/gcc/fortran/decl.c b/gcc/fortran/decl.c
index 4da21c316e3..e166bc916b1 100644
--- a/gcc/fortran/decl.c
+++ b/gcc/fortran/decl.c
@@ -1658,7 +1658,10 @@ scalar:
bool delayed = (gfc_state_stack->sym == c->ts.u.derived)
|| (!c->ts.u.derived->components
&& !c->ts.u.derived->attr.zero_comp);
- return gfc_build_class_symbol (&c->ts, &c->attr, &c->as, delayed);
+ gfc_try t2 = gfc_build_class_symbol (&c->ts, &c->attr, &c->as, delayed);
+
+ if (t != FAILURE)
+ t = t2;
}
return t;
diff --git a/gcc/fortran/expr.c b/gcc/fortran/expr.c
index d9614413e67..93d5df65455 100644
--- a/gcc/fortran/expr.c
+++ b/gcc/fortran/expr.c
@@ -3821,6 +3821,9 @@ gfc_get_variable_expr (gfc_symtree *var)
e->ref = gfc_get_ref ();
e->ref->type = REF_ARRAY;
e->ref->u.ar.type = AR_FULL;
+ e->ref->u.ar.as = gfc_copy_array_spec (var->n.sym->ts.type == BT_CLASS
+ ? CLASS_DATA (var->n.sym)->as
+ : var->n.sym->as);
}
return e;
diff --git a/gcc/fortran/gfortran.texi b/gcc/fortran/gfortran.texi
index b1790c6ad5f..96662c49423 100644
--- a/gcc/fortran/gfortran.texi
+++ b/gcc/fortran/gfortran.texi
@@ -576,10 +576,10 @@ environment variables.
Malformed environment variables are silently ignored.
@menu
+* TMPDIR:: Directory for scratch files
* GFORTRAN_STDIN_UNIT:: Unit number for standard input
* GFORTRAN_STDOUT_UNIT:: Unit number for standard output
* GFORTRAN_STDERR_UNIT:: Unit number for standard error
-* GFORTRAN_TMPDIR:: Directory for scratch files
* GFORTRAN_UNBUFFERED_ALL:: Do not buffer I/O for all units.
* GFORTRAN_UNBUFFERED_PRECONNECTED:: Do not buffer I/O for preconnected units.
* GFORTRAN_SHOW_LOCUS:: Show location for runtime errors
@@ -590,6 +590,27 @@ Malformed environment variables are silently ignored.
* GFORTRAN_ERROR_BACKTRACE:: Show backtrace on run-time errors
@end menu
+@node TMPDIR
+@section @env{TMPDIR}---Directory for scratch files
+
+When opening a file with @code{STATUS='SCRATCH'}, GNU Fortran tries to
+create the file in one of the potential directories by testing each
+directory in the order below.
+
+@enumerate
+@item
+The environment variable @env{TMPDIR}, if it exists.
+
+@item
+On the MinGW target, the directory returned by the @code{GetTempPath}
+function. Alternatively, on the Cygwin target, the @env{TMP} and
+@env{TEMP} environment variables, if they exist, in that order.
+
+@item
+The @code{P_tmpdir} macro if it is defined, otherwise the directory
+@file{/tmp}.
+@end enumerate
+
@node GFORTRAN_STDIN_UNIT
@section @env{GFORTRAN_STDIN_UNIT}---Unit number for standard input
@@ -611,14 +632,6 @@ This environment variable can be used to select the unit number
preconnected to standard error. This must be a positive integer.
The default value is 0.
-@node GFORTRAN_TMPDIR
-@section @env{GFORTRAN_TMPDIR}---Directory for scratch files
-
-This environment variable controls where scratch files are
-created. If this environment variable is missing,
-GNU Fortran searches for the environment variable @env{TMP}, then @env{TEMP}.
-If these are missing, the default is @file{/tmp}.
-
@node GFORTRAN_UNBUFFERED_ALL
@section @env{GFORTRAN_UNBUFFERED_ALL}---Do not buffer I/O on all units
diff --git a/gcc/fortran/interface.c b/gcc/fortran/interface.c
index 2f1d24e6e33..95439c118e4 100644
--- a/gcc/fortran/interface.c
+++ b/gcc/fortran/interface.c
@@ -2517,7 +2517,9 @@ compare_actual_formal (gfc_actual_arglist **ap, gfc_formal_arglist *formal,
? _("actual argument to INTENT = OUT/INOUT")
: NULL);
- if (f->sym->attr.pointer
+ if (((f->sym->ts.type == BT_CLASS && f->sym->attr.class_ok
+ && CLASS_DATA (f->sym)->attr.class_pointer)
+ || (f->sym->ts.type != BT_CLASS && f->sym->attr.pointer))
&& gfc_check_vardef_context (a->expr, true, false, context)
== FAILURE)
return 0;
@@ -2812,25 +2814,6 @@ check_some_aliasing (gfc_formal_arglist *f, gfc_actual_arglist *a)
}
-/* Given a symbol of a formal argument list and an expression,
- return nonzero if their intents are compatible, zero otherwise. */
-
-static int
-compare_parameter_intent (gfc_symbol *formal, gfc_expr *actual)
-{
- if (actual->symtree->n.sym->attr.pointer && !formal->attr.pointer)
- return 1;
-
- if (actual->symtree->n.sym->attr.intent != INTENT_IN)
- return 1;
-
- if (formal->attr.intent == INTENT_INOUT || formal->attr.intent == INTENT_OUT)
- return 0;
-
- return 1;
-}
-
-
/* Given formal and actual argument lists that correspond to one
another, check that they are compatible in the sense that intents
are not mismatched. */
@@ -2852,25 +2835,11 @@ check_intents (gfc_formal_arglist *f, gfc_actual_arglist *a)
f_intent = f->sym->attr.intent;
- if (!compare_parameter_intent(f->sym, a->expr))
- {
- gfc_error ("Procedure argument at %L is INTENT(IN) while interface "
- "specifies INTENT(%s)", &a->expr->where,
- gfc_intent_string (f_intent));
- return FAILURE;
- }
-
if (gfc_pure (NULL) && gfc_impure_variable (a->expr->symtree->n.sym))
{
- if (f_intent == INTENT_INOUT || f_intent == INTENT_OUT)
- {
- gfc_error ("Procedure argument at %L is local to a PURE "
- "procedure and is passed to an INTENT(%s) argument",
- &a->expr->where, gfc_intent_string (f_intent));
- return FAILURE;
- }
-
- if (f->sym->attr.pointer)
+ if ((f->sym->ts.type == BT_CLASS && f->sym->attr.class_ok
+ && CLASS_DATA (f->sym)->attr.class_pointer)
+ || (f->sym->ts.type != BT_CLASS && f->sym->attr.pointer))
{
gfc_error ("Procedure argument at %L is local to a PURE "
"procedure and has the POINTER attribute",
@@ -2890,7 +2859,9 @@ check_intents (gfc_formal_arglist *f, gfc_actual_arglist *a)
return FAILURE;
}
- if (f->sym->attr.pointer)
+ if ((f->sym->ts.type == BT_CLASS && f->sym->attr.class_ok
+ && CLASS_DATA (f->sym)->attr.class_pointer)
+ || (f->sym->ts.type != BT_CLASS && f->sym->attr.pointer))
{
gfc_error ("Coindexed actual argument at %L in PURE procedure "
"is passed to a POINTER dummy argument",
diff --git a/gcc/fortran/intrinsic.texi b/gcc/fortran/intrinsic.texi
index 294818e43d0..9bc36d7d415 100644
--- a/gcc/fortran/intrinsic.texi
+++ b/gcc/fortran/intrinsic.texi
@@ -8991,8 +8991,7 @@ cases, the result is of the same type and kind as @var{ARRAY}.
@table @asis
@item @emph{Description}:
-@code{MOD(A,P)} computes the remainder of the division of A by P@. It is
-calculated as @code{A - (INT(A/P) * P)}.
+@code{MOD(A,P)} computes the remainder of the division of A by P@.
@item @emph{Standard}:
Fortran 77 and later
@@ -9005,14 +9004,16 @@ Elemental function
@item @emph{Arguments}:
@multitable @columnfractions .15 .70
-@item @var{A} @tab Shall be a scalar of type @code{INTEGER} or @code{REAL}
-@item @var{P} @tab Shall be a scalar of the same type as @var{A} and not
-equal to zero
+@item @var{A} @tab Shall be a scalar of type @code{INTEGER} or @code{REAL}.
+@item @var{P} @tab Shall be a scalar of the same type and kind as @var{A}
+and not equal to zero.
@end multitable
@item @emph{Return value}:
-The kind of the return value is the result of cross-promoting
-the kinds of the arguments.
+The return value is the result of @code{A - (INT(A/P) * P)}. The type
+and kind of the return value is the same as that of the arguments. The
+returned value has the same sign as A and a magnitude less than the
+magnitude of P.
@item @emph{Example}:
@smallexample
@@ -9041,6 +9042,10 @@ end program test_mod
@item @code{AMOD(A,P)} @tab @code{REAL(4) A,P} @tab @code{REAL(4)} @tab Fortran 95 and later
@item @code{DMOD(A,P)} @tab @code{REAL(8) A,P} @tab @code{REAL(8)} @tab Fortran 95 and later
@end multitable
+
+@item @emph{See also}:
+@ref{MODULO}
+
@end table
@@ -9066,8 +9071,9 @@ Elemental function
@item @emph{Arguments}:
@multitable @columnfractions .15 .70
-@item @var{A} @tab Shall be a scalar of type @code{INTEGER} or @code{REAL}
-@item @var{P} @tab Shall be a scalar of the same type and kind as @var{A}
+@item @var{A} @tab Shall be a scalar of type @code{INTEGER} or @code{REAL}.
+@item @var{P} @tab Shall be a scalar of the same type and kind as @var{A}.
+It shall not be zero.
@end multitable
@item @emph{Return value}:
@@ -9080,7 +9086,8 @@ The type and kind of the result are those of the arguments.
@item If @var{A} and @var{P} are of type @code{REAL}:
@code{MODULO(A,P)} has the value of @code{A - FLOOR (A / P) * P}.
@end table
-In all cases, if @var{P} is zero the result is processor-dependent.
+The returned value has the same sign as P and a magnitude less than
+the magnitude of P.
@item @emph{Example}:
@smallexample
@@ -9096,6 +9103,9 @@ program test_modulo
end program
@end smallexample
+@item @emph{See also}:
+@ref{MOD}
+
@end table
diff --git a/gcc/fortran/match.c b/gcc/fortran/match.c
index 15edfc36db1..3d119180a73 100644
--- a/gcc/fortran/match.c
+++ b/gcc/fortran/match.c
@@ -5112,6 +5112,78 @@ gfc_match_select (void)
}
+/* Transfer the selector typespec to the associate name. */
+
+static void
+copy_ts_from_selector_to_associate (gfc_expr *associate, gfc_expr *selector)
+{
+ gfc_ref *ref;
+ gfc_symbol *assoc_sym;
+
+ assoc_sym = associate->symtree->n.sym;
+
+ /* Ensure that any array reference is resolved. */
+ gfc_resolve_expr (selector);
+
+ /* At this stage the expression rank and arrayspec dimensions have
+ not been completely sorted out. We must get the expr2->rank
+ right here, so that the correct class container is obtained. */
+ ref = selector->ref;
+ while (ref && ref->next)
+ ref = ref->next;
+
+ if (selector->ts.type == BT_CLASS
+ && CLASS_DATA (selector)->as
+ && ref && ref->type == REF_ARRAY)
+ {
+ if (ref->u.ar.type == AR_FULL)
+ selector->rank = CLASS_DATA (selector)->as->rank;
+ else if (ref->u.ar.type == AR_SECTION)
+ selector->rank = ref->u.ar.dimen;
+ else
+ selector->rank = 0;
+ }
+
+ if (selector->ts.type != BT_CLASS)
+ {
+ /* The correct class container has to be available. */
+ if (selector->rank)
+ {
+ assoc_sym->attr.dimension = 1;
+ assoc_sym->as = gfc_get_array_spec ();
+ assoc_sym->as->rank = selector->rank;
+ assoc_sym->as->type = AS_DEFERRED;
+ }
+ else
+ assoc_sym->as = NULL;
+
+ assoc_sym->ts.type = BT_CLASS;
+ assoc_sym->ts.u.derived = selector->ts.u.derived;
+ assoc_sym->attr.pointer = 1;
+ gfc_build_class_symbol (&assoc_sym->ts, &assoc_sym->attr,
+ &assoc_sym->as, false);
+ }
+ else
+ {
+ /* The correct class container has to be available. */
+ if (selector->rank)
+ {
+ assoc_sym->attr.dimension = 1;
+ assoc_sym->as = gfc_get_array_spec ();
+ assoc_sym->as->rank = selector->rank;
+ assoc_sym->as->type = AS_DEFERRED;
+ }
+ else
+ assoc_sym->as = NULL;
+ assoc_sym->ts.type = BT_CLASS;
+ assoc_sym->ts.u.derived = CLASS_DATA (selector)->ts.u.derived;
+ assoc_sym->attr.pointer = 1;
+ gfc_build_class_symbol (&assoc_sym->ts, &assoc_sym->attr,
+ &assoc_sym->as, false);
+ }
+}
+
+
/* Push the current selector onto the SELECT TYPE stack. */
static void
@@ -5126,64 +5198,103 @@ select_type_push (gfc_symbol *sel)
}
-/* Set the temporary for the current SELECT TYPE selector. */
+/* Set the temporary for the current derived type SELECT TYPE selector. */
-static void
-select_type_set_tmp (gfc_typespec *ts)
+static gfc_symtree *
+select_derived_set_tmp (gfc_typespec *ts)
{
char name[GFC_MAX_SYMBOL_LEN];
gfc_symtree *tmp;
- if (!ts)
+ sprintf (name, "__tmp_type_%s", ts->u.derived->name);
+ gfc_get_sym_tree (name, gfc_current_ns, &tmp, false);
+ gfc_add_type (tmp->n.sym, ts, NULL);
+
+ /* Copy across the array spec to the selector. */
+ if (select_type_stack->selector->ts.type == BT_CLASS
+ && select_type_stack->selector->attr.class_ok
+ && (CLASS_DATA (select_type_stack->selector)->attr.dimension
+ || CLASS_DATA (select_type_stack->selector)->attr.codimension))
{
- select_type_stack->tmp = NULL;
- return;
+ tmp->n.sym->attr.dimension
+ = CLASS_DATA (select_type_stack->selector)->attr.dimension;
+ tmp->n.sym->attr.codimension
+ = CLASS_DATA (select_type_stack->selector)->attr.codimension;
+ tmp->n.sym->as
+ = gfc_copy_array_spec (CLASS_DATA (select_type_stack->selector)->as);
}
+
+ gfc_set_sym_referenced (tmp->n.sym);
+ gfc_add_flavor (&tmp->n.sym->attr, FL_VARIABLE, name, NULL);
+ tmp->n.sym->attr.select_type_temporary = 1;
+
+ return tmp;
+}
+
+
+/* Set the temporary for the current class SELECT TYPE selector. */
+
+static gfc_symtree *
+select_class_set_tmp (gfc_typespec *ts)
+{
+ char name[GFC_MAX_SYMBOL_LEN];
+ gfc_symtree *tmp;
- if (!gfc_type_is_extensible (ts->u.derived))
- return;
+ if (select_type_stack->selector->ts.type == BT_CLASS
+ && !select_type_stack->selector->attr.class_ok)
+ return NULL;
- if (ts->type == BT_CLASS)
- sprintf (name, "__tmp_class_%s", ts->u.derived->name);
- else
- sprintf (name, "__tmp_type_%s", ts->u.derived->name);
+ sprintf (name, "__tmp_class_%s", ts->u.derived->name);
gfc_get_sym_tree (name, gfc_current_ns, &tmp, false);
gfc_add_type (tmp->n.sym, ts, NULL);
-/* Copy across the array spec to the selector, taking care as to
- whether or not it is a class object or not. */
+/* Copy across the array spec to the selector. */
if (select_type_stack->selector->ts.type == BT_CLASS
- && select_type_stack->selector->attr.class_ok
&& (CLASS_DATA (select_type_stack->selector)->attr.dimension
|| CLASS_DATA (select_type_stack->selector)->attr.codimension))
{
- if (ts->type == BT_CLASS)
- {
- CLASS_DATA (tmp->n.sym)->attr.dimension
+ tmp->n.sym->attr.pointer = 1;
+ tmp->n.sym->attr.dimension
= CLASS_DATA (select_type_stack->selector)->attr.dimension;
- CLASS_DATA (tmp->n.sym)->attr.codimension
+ tmp->n.sym->attr.codimension
= CLASS_DATA (select_type_stack->selector)->attr.codimension;
- CLASS_DATA (tmp->n.sym)->as = gfc_get_array_spec ();
- CLASS_DATA (tmp->n.sym)->as
- = CLASS_DATA (select_type_stack->selector)->as;
- }
- else
- {
- tmp->n.sym->attr.dimension
- = CLASS_DATA (select_type_stack->selector)->attr.dimension;
- tmp->n.sym->attr.codimension
- = CLASS_DATA (select_type_stack->selector)->attr.codimension;
- tmp->n.sym->as = gfc_get_array_spec ();
- tmp->n.sym->as = CLASS_DATA (select_type_stack->selector)->as;
- }
+ tmp->n.sym->as
+ = gfc_copy_array_spec (CLASS_DATA (select_type_stack->selector)->as);
}
gfc_set_sym_referenced (tmp->n.sym);
gfc_add_flavor (&tmp->n.sym->attr, FL_VARIABLE, name, NULL);
tmp->n.sym->attr.select_type_temporary = 1;
+ gfc_build_class_symbol (&tmp->n.sym->ts, &tmp->n.sym->attr,
+ &tmp->n.sym->as, false);
+
+ return tmp;
+}
+
+
+static void
+select_type_set_tmp (gfc_typespec *ts)
+{
+ gfc_symtree *tmp;
+
+ if (!ts)
+ {
+ select_type_stack->tmp = NULL;
+ return;
+ }
+
+ if (!gfc_type_is_extensible (ts->u.derived))
+ return;
+
+ /* Logic is a LOT clearer with separate functions for class and derived
+ type temporaries! There are not many more lines of code either. */
if (ts->type == BT_CLASS)
- gfc_build_class_symbol (&tmp->n.sym->ts, &tmp->n.sym->attr,
- &tmp->n.sym->as, false);
+ tmp = select_class_set_tmp (ts);
+ else
+ tmp = select_derived_set_tmp (ts);
+
+ if (tmp == NULL)
+ return;
/* Add an association for it, so the rest of the parser knows it is
an associate-name. The target will be set during resolution. */
@@ -5194,7 +5305,7 @@ select_type_set_tmp (gfc_typespec *ts)
select_type_stack->tmp = tmp;
}
-
+
/* Match a SELECT TYPE statement. */
match
@@ -5204,6 +5315,7 @@ gfc_match_select_type (void)
match m;
char name[GFC_MAX_SYMBOL_LEN];
bool class_array;
+ gfc_symbol *sym;
m = gfc_match_label ();
if (m == MATCH_ERROR)
@@ -5225,13 +5337,16 @@ gfc_match_select_type (void)
m = MATCH_ERROR;
goto cleanup;
}
+
+ sym = expr1->symtree->n.sym;
if (expr2->ts.type == BT_UNKNOWN)
- expr1->symtree->n.sym->attr.untyped = 1;
+ sym->attr.untyped = 1;
else
- expr1->symtree->n.sym->ts = expr2->ts;
- expr1->symtree->n.sym->attr.flavor = FL_VARIABLE;
- expr1->symtree->n.sym->attr.referenced = 1;
- expr1->symtree->n.sym->attr.class_ok = 1;
+ copy_ts_from_selector_to_associate (expr1, expr2);
+
+ sym->attr.flavor = FL_VARIABLE;
+ sym->attr.referenced = 1;
+ sym->attr.class_ok = 1;
}
else
{
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index 57da577dfaa..b3a23ed73c9 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -4904,14 +4904,19 @@ resolve_ref (gfc_expr *expr)
{
/* F03:C614. */
if (ref->u.c.component->attr.pointer
- || ref->u.c.component->attr.proc_pointer)
+ || ref->u.c.component->attr.proc_pointer
+ || (ref->u.c.component->ts.type == BT_CLASS
+ && CLASS_DATA (ref->u.c.component)->attr.pointer))
{
gfc_error ("Component to the right of a part reference "
"with nonzero rank must not have the POINTER "
"attribute at %L", &expr->where);
return FAILURE;
}
- else if (ref->u.c.component->attr.allocatable)
+ else if (ref->u.c.component->attr.allocatable
+ || (ref->u.c.component->ts.type == BT_CLASS
+ && CLASS_DATA (ref->u.c.component)->attr.allocatable))
+
{
gfc_error ("Component to the right of a part reference "
"with nonzero rank must not have the ALLOCATABLE "
@@ -5081,9 +5086,15 @@ resolve_variable (gfc_expr *e)
}
/* If this is an associate-name, it may be parsed with an array reference
- in error even though the target is scalar. Fail directly in this case. */
- if (sym->assoc && !sym->attr.dimension && e->ref && e->ref->type == REF_ARRAY)
- return FAILURE;
+ in error even though the target is scalar. Fail directly in this case.
+ TODO Understand why class scalar expressions must be excluded. */
+ if (sym->assoc && !(sym->ts.type == BT_CLASS && e->rank == 0))
+ {
+ if (sym->ts.type == BT_CLASS)
+ gfc_fix_class_refs (e);
+ if (!sym->attr.dimension && e->ref && e->ref->type == REF_ARRAY)
+ return FAILURE;
+ }
if (sym->ts.type == BT_DERIVED && sym->ts.u.derived->attr.generic)
sym->ts.u.derived = gfc_find_dt_in_generic (sym->ts.u.derived);
@@ -5124,6 +5135,19 @@ resolve_variable (gfc_expr *e)
if (check_assumed_size_reference (sym, e))
return FAILURE;
+ /* If a PRIVATE variable is used in the specification expression of the
+ result variable, it might be accessed from outside the module and can
+ thus not be TREE_PUBLIC() = 0.
+ TODO: sym->attr.public_used only has to be set for the result variable's
+ type-parameter expression and not for dummies or automatic variables.
+ Additionally, it only has to be set if the function is either PUBLIC or
+ used in a generic interface or TBP; unfortunately,
+ proc_name->attr.public_used can get set at a later stage. */
+ if (specification_expr && sym->attr.access == ACCESS_PRIVATE
+ && !sym->attr.function && !sym->attr.use_assoc
+ && gfc_current_ns->proc_name && gfc_current_ns->proc_name->attr.function)
+ sym->attr.public_used = 1;
+
/* Deal with forward references to entries during resolve_code, to
satisfy, at least partially, 12.5.2.5. */
if (gfc_current_ns->entries
@@ -5647,12 +5671,11 @@ resolve_typebound_static (gfc_expr* e, gfc_symtree** target,
e->value.compcall.actual = NULL;
/* If we find a deferred typebound procedure, check for derived types
- that an over-riding typebound procedure has not been missed. */
- if (e->value.compcall.tbp->deferred
- && e->value.compcall.name
- && !e->value.compcall.tbp->non_overridable
- && e->value.compcall.base_object
- && e->value.compcall.base_object->ts.type == BT_DERIVED)
+ that an overriding typebound procedure has not been missed. */
+ if (e->value.compcall.name
+ && !e->value.compcall.tbp->non_overridable
+ && e->value.compcall.base_object
+ && e->value.compcall.base_object->ts.type == BT_DERIVED)
{
gfc_symtree *st;
gfc_symbol *derived;
@@ -7928,7 +7951,7 @@ gfc_type_is_extensible (gfc_symbol *sym)
}
-/* Resolve an associate name: Resolve target and ensure the type-spec is
+/* Resolve an associate-name: Resolve target and ensure the type-spec is
correct as well as possibly the array-spec. */
static void
@@ -7984,8 +8007,25 @@ resolve_assoc_var (gfc_symbol* sym, bool resolve_target)
sym->attr.dimension = 0;
return;
}
- if (target->rank > 0)
+
+ /* We cannot deal with class selectors that need temporaries. */
+ if (target->ts.type == BT_CLASS
+ && gfc_ref_needs_temporary_p (target->ref))
+ {
+ gfc_error ("CLASS selector at %L needs a temporary which is not "
+ "yet implemented", &target->where);
+ return;
+ }
+
+ if (target->ts.type != BT_CLASS && target->rank > 0)
sym->attr.dimension = 1;
+ else if (target->ts.type == BT_CLASS)
+ gfc_fix_class_refs (target);
+
+ /* The associate-name will have a correct type by now. Make absolutely
+ sure that it has not picked up a dimension attribute. */
+ if (sym->ts.type == BT_CLASS)
+ sym->attr.dimension = 0;
if (sym->attr.dimension)
{
@@ -12015,6 +12055,8 @@ resolve_fl_derived (gfc_symbol *sym)
if (!sym->attr.is_class)
gfc_find_symbol (sym->name, sym->ns, 0, &gen_dt);
if (gen_dt && gen_dt->generic && gen_dt->generic->next
+ && (!gen_dt->generic->sym->attr.use_assoc
+ || gen_dt->generic->sym->module != gen_dt->generic->next->sym->module)
&& gfc_notify_std (GFC_STD_F2003, "Fortran 2003: Generic name '%s' of "
"function '%s' at %L being the same name as derived "
"type at %L", sym->name,
diff --git a/gcc/fortran/simplify.c b/gcc/fortran/simplify.c
index 706dab440ce..1578db19b94 100644
--- a/gcc/fortran/simplify.c
+++ b/gcc/fortran/simplify.c
@@ -4222,7 +4222,6 @@ gfc_expr *
gfc_simplify_mod (gfc_expr *a, gfc_expr *p)
{
gfc_expr *result;
- mpfr_t tmp;
int kind;
if (a->expr_type != EXPR_CONSTANT || p->expr_type != EXPR_CONSTANT)
@@ -4254,12 +4253,8 @@ gfc_simplify_mod (gfc_expr *a, gfc_expr *p)
}
gfc_set_model_kind (kind);
- mpfr_init (tmp);
- mpfr_div (tmp, a->value.real, p->value.real, GFC_RND_MODE);
- mpfr_trunc (tmp, tmp);
- mpfr_mul (tmp, tmp, p->value.real, GFC_RND_MODE);
- mpfr_sub (result->value.real, a->value.real, tmp, GFC_RND_MODE);
- mpfr_clear (tmp);
+ mpfr_fmod (result->value.real, a->value.real, p->value.real,
+ GFC_RND_MODE);
break;
default:
@@ -4274,7 +4269,6 @@ gfc_expr *
gfc_simplify_modulo (gfc_expr *a, gfc_expr *p)
{
gfc_expr *result;
- mpfr_t tmp;
int kind;
if (a->expr_type != EXPR_CONSTANT || p->expr_type != EXPR_CONSTANT)
@@ -4308,12 +4302,17 @@ gfc_simplify_modulo (gfc_expr *a, gfc_expr *p)
}
gfc_set_model_kind (kind);
- mpfr_init (tmp);
- mpfr_div (tmp, a->value.real, p->value.real, GFC_RND_MODE);
- mpfr_floor (tmp, tmp);
- mpfr_mul (tmp, tmp, p->value.real, GFC_RND_MODE);
- mpfr_sub (result->value.real, a->value.real, tmp, GFC_RND_MODE);
- mpfr_clear (tmp);
+ mpfr_fmod (result->value.real, a->value.real, p->value.real,
+ GFC_RND_MODE);
+ if (mpfr_cmp_ui (result->value.real, 0) != 0)
+ {
+ if (mpfr_signbit (a->value.real) != mpfr_signbit (p->value.real))
+ mpfr_add (result->value.real, result->value.real, p->value.real,
+ GFC_RND_MODE);
+ }
+ else
+ mpfr_copysign (result->value.real, result->value.real,
+ p->value.real, GFC_RND_MODE);
break;
default:
diff --git a/gcc/fortran/symbol.c b/gcc/fortran/symbol.c
index 46e5f56feee..6ca4ca33014 100644
--- a/gcc/fortran/symbol.c
+++ b/gcc/fortran/symbol.c
@@ -4882,6 +4882,9 @@ gfc_is_associate_pointer (gfc_symbol* sym)
if (!sym->assoc)
return false;
+ if (sym->ts.type == BT_CLASS)
+ return true;
+
if (!sym->assoc->variable)
return false;
diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c
index b54c95b4087..b24d1c323ed 100644
--- a/gcc/fortran/trans-array.c
+++ b/gcc/fortran/trans-array.c
@@ -3068,6 +3068,36 @@ add_to_offset (tree *cst_offset, tree *offset, tree t)
}
}
+
+static tree
+build_array_ref (tree desc, tree offset, tree decl)
+{
+ tree tmp;
+
+ /* Class array references need special treatment because the assigned
+ type size needs to be used to point to the element. */
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc))
+ && TREE_CODE (desc) == COMPONENT_REF
+ && GFC_CLASS_TYPE_P (TREE_TYPE (TREE_OPERAND (desc, 0))))
+ {
+ tree type = gfc_get_element_type (TREE_TYPE (desc));
+ tmp = TREE_OPERAND (desc, 0);
+ tmp = gfc_get_class_array_ref (offset, tmp);
+ tmp = fold_convert (build_pointer_type (type), tmp);
+ tmp = build_fold_indirect_ref_loc (input_location, tmp);
+ }
+ else
+ {
+ tmp = gfc_conv_array_data (desc);
+ tmp = build_fold_indirect_ref_loc (input_location, tmp);
+ tmp = gfc_build_array_ref (tmp, offset, decl);
+ }
+
+ return tmp;
+}
+
+
+
/* Build an array reference. se->expr already holds the array descriptor.
This should be either a variable, indirect variable reference or component
reference. For arrays which do not have a descriptor, se->expr will be
@@ -3195,10 +3225,7 @@ gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
offset = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, offset, cst_offset);
- /* Access the calculated element. */
- tmp = gfc_conv_array_data (se->expr);
- tmp = build_fold_indirect_ref (tmp);
- se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
+ se->expr = build_array_ref (se->expr, offset, sym->backend_decl);
}
@@ -6010,10 +6037,7 @@ gfc_get_dataptr_offset (stmtblock_t *block, tree parm, tree desc, tree offset,
return;
}
- tmp = gfc_conv_array_data (desc);
- tmp = build_fold_indirect_ref_loc (input_location,
- tmp);
- tmp = gfc_build_array_ref (tmp, offset, NULL);
+ tmp = build_array_ref (desc, offset, NULL);
/* Offset the data pointer for pointer assignments from arrays with
subreferences; e.g. my_integer => my_type(:)%integer_component. */
diff --git a/gcc/fortran/trans-common.c b/gcc/fortran/trans-common.c
index dcc2176a246..ce7114fb88d 100644
--- a/gcc/fortran/trans-common.c
+++ b/gcc/fortran/trans-common.c
@@ -697,8 +697,6 @@ create_common (gfc_common_head *com, segment_info *head, bool saw_equiv)
DECL_IGNORED_P (var_decl) = 1;
if (s->sym->attr.target)
TREE_ADDRESSABLE (var_decl) = 1;
- /* This is a fake variable just for debugging purposes. */
- TREE_ASM_WRITTEN (var_decl) = 1;
/* Fake variables are not visible from other translation units. */
TREE_PUBLIC (var_decl) = 0;
diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c
index d6c090e8606..b03d393aa8e 100644
--- a/gcc/fortran/trans-decl.c
+++ b/gcc/fortran/trans-decl.c
@@ -457,8 +457,6 @@ gfc_finish_cray_pointee (tree decl, gfc_symbol *sym)
SET_DECL_VALUE_EXPR (decl, value);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
GFC_DECL_CRAY_POINTEE (decl) = 1;
- /* This is a fake variable just for debugging purposes. */
- TREE_ASM_WRITTEN (decl) = 1;
}
@@ -565,7 +563,7 @@ gfc_finish_var_decl (tree decl, gfc_symbol * sym)
/* TODO: Don't set sym->module for result or dummy variables. */
gcc_assert (current_function_decl == NULL_TREE || sym->result == sym);
/* This is the declaration of a module variable. */
- if (sym->attr.access != ACCESS_PRIVATE)
+ if (sym->attr.access != ACCESS_PRIVATE || sym->attr.public_used)
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
}
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index 7092bc2f153..8045b1f029b 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -147,11 +147,25 @@ gfc_vtable_copy_get (tree decl)
#undef VTABLE_COPY_FIELD
+/* Obtain the vptr of the last class reference in an expression. */
+
+tree
+gfc_get_vptr_from_expr (tree expr)
+{
+ tree tmp = expr;
+ while (tmp && !GFC_CLASS_TYPE_P (TREE_TYPE (tmp)))
+ tmp = TREE_OPERAND (tmp, 0);
+ tmp = gfc_class_vptr_get (tmp);
+ return tmp;
+}
+
+
/* Takes a derived type expression and returns the address of a temporary
- class object of the 'declared' type. */
-static void
+ class object of the 'declared' type. If vptr is not NULL, this is
+ used for the temporary class object. */
+void
gfc_conv_derived_to_class (gfc_se *parmse, gfc_expr *e,
- gfc_typespec class_ts)
+ gfc_typespec class_ts, tree vptr)
{
gfc_symbol *vtab;
gfc_ss *ss;
@@ -167,11 +181,19 @@ gfc_conv_derived_to_class (gfc_se *parmse, gfc_expr *e,
/* Set the vptr. */
ctree = gfc_class_vptr_get (var);
- /* Remember the vtab corresponds to the derived type
- not to the class declared type. */
- vtab = gfc_find_derived_vtab (e->ts.u.derived);
- gcc_assert (vtab);
- tmp = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtab));
+ if (vptr != NULL_TREE)
+ {
+ /* Use the dynamic vptr. */
+ tmp = vptr;
+ }
+ else
+ {
+ /* In this case the vtab corresponds to the derived type and the
+ vptr must point to it. */
+ vtab = gfc_find_derived_vtab (e->ts.u.derived);
+ gcc_assert (vtab);
+ tmp = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtab));
+ }
gfc_add_modify (&parmse->pre, ctree,
fold_convert (TREE_TYPE (ctree), tmp));
@@ -3531,7 +3553,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
/* The derived type needs to be converted to a temporary
CLASS object. */
gfc_init_se (&parmse, se);
- gfc_conv_derived_to_class (&parmse, e, fsym->ts);
+ gfc_conv_derived_to_class (&parmse, e, fsym->ts, NULL);
}
else if (se->ss && se->ss->info->useflags)
{
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index ab4f47fc5d3..bfbebf3269b 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -1719,21 +1719,24 @@ gfc_conv_intrinsic_cmplx (gfc_se * se, gfc_expr * expr, int both)
se->expr = fold_build2_loc (input_location, COMPLEX_EXPR, type, real, imag);
}
+
/* Remainder function MOD(A, P) = A - INT(A / P) * P
- MODULO(A, P) = A - FLOOR (A / P) * P */
-/* TODO: MOD(x, 0) */
+ MODULO(A, P) = A - FLOOR (A / P) * P
+
+ The obvious algorithms above are numerically instable for large
+ arguments, hence these intrinsics are instead implemented via calls
+ to the fmod family of functions. It is the responsibility of the
+ user to ensure that the second argument is non-zero. */
static void
gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo)
{
tree type;
- tree itype;
tree tmp;
tree test;
tree test2;
tree fmod;
- mpfr_t huge;
- int n, ikind;
+ tree zero;
tree args[2];
gfc_conv_intrinsic_function_args (se, expr, args, 2);
@@ -1757,16 +1760,15 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo)
/* Check if we have a builtin fmod. */
fmod = gfc_builtin_decl_for_float_kind (BUILT_IN_FMOD, expr->ts.kind);
- /* Use it if it exists. */
- if (fmod != NULL_TREE)
- {
- tmp = build_addr (fmod, current_function_decl);
- se->expr = build_call_array_loc (input_location,
+ /* The builtin should always be available. */
+ gcc_assert (fmod != NULL_TREE);
+
+ tmp = build_addr (fmod, current_function_decl);
+ se->expr = build_call_array_loc (input_location,
TREE_TYPE (TREE_TYPE (fmod)),
tmp, 2, args);
- if (modulo == 0)
- return;
- }
+ if (modulo == 0)
+ return;
type = TREE_TYPE (args[0]);
@@ -1774,16 +1776,31 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo)
args[1] = gfc_evaluate_now (args[1], &se->pre);
/* Definition:
- modulo = arg - floor (arg/arg2) * arg2, so
- = test ? fmod (arg, arg2) : fmod (arg, arg2) + arg2,
- where
- test = (fmod (arg, arg2) != 0) && ((arg < 0) xor (arg2 < 0))
- thereby avoiding another division and retaining the accuracy
- of the builtin function. */
- if (fmod != NULL_TREE && modulo)
+ modulo = arg - floor (arg/arg2) * arg2
+
+ In order to calculate the result accurately, we use the fmod
+ function as follows.
+
+ res = fmod (arg, arg2);
+ if (res)
+ {
+ if ((arg < 0) xor (arg2 < 0))
+ res += arg2;
+ }
+ else
+ res = copysign (0., arg2);
+
+ => As two nested ternary exprs:
+
+ res = res ? (((arg < 0) xor (arg2 < 0)) ? res + arg2 : res)
+ : copysign (0., arg2);
+
+ */
+
+ zero = gfc_build_const (type, integer_zero_node);
+ tmp = gfc_evaluate_now (se->expr, &se->pre);
+ if (!flag_signed_zeros)
{
- tree zero = gfc_build_const (type, integer_zero_node);
- tmp = gfc_evaluate_now (se->expr, &se->pre);
test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
args[0], zero);
test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
@@ -1796,50 +1813,35 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo)
boolean_type_node, test, test2);
test = gfc_evaluate_now (test, &se->pre);
se->expr = fold_build3_loc (input_location, COND_EXPR, type, test,
- fold_build2_loc (input_location, PLUS_EXPR,
- type, tmp, args[1]), tmp);
- return;
+ fold_build2_loc (input_location,
+ PLUS_EXPR,
+ type, tmp, args[1]),
+ tmp);
}
-
- /* If we do not have a built_in fmod, the calculation is going to
- have to be done longhand. */
- tmp = fold_build2_loc (input_location, RDIV_EXPR, type, args[0], args[1]);
-
- /* Test if the value is too large to handle sensibly. */
- gfc_set_model_kind (expr->ts.kind);
- mpfr_init (huge);
- n = gfc_validate_kind (BT_INTEGER, expr->ts.kind, true);
- ikind = expr->ts.kind;
- if (n < 0)
+ else
{
- n = gfc_validate_kind (BT_INTEGER, gfc_max_integer_kind, false);
- ikind = gfc_max_integer_kind;
+ tree expr1, copysign, cscall;
+ copysign = gfc_builtin_decl_for_float_kind (BUILT_IN_COPYSIGN,
+ expr->ts.kind);
+ test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ args[0], zero);
+ test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ args[1], zero);
+ test2 = fold_build2_loc (input_location, TRUTH_XOR_EXPR,
+ boolean_type_node, test, test2);
+ expr1 = fold_build3_loc (input_location, COND_EXPR, type, test2,
+ fold_build2_loc (input_location,
+ PLUS_EXPR,
+ type, tmp, args[1]),
+ tmp);
+ test = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp, zero);
+ cscall = build_call_expr_loc (input_location, copysign, 2, zero,
+ args[1]);
+ se->expr = fold_build3_loc (input_location, COND_EXPR, type, test,
+ expr1, cscall);
}
- mpfr_set_z (huge, gfc_integer_kinds[n].huge, GFC_RND_MODE);
- test = gfc_conv_mpfr_to_tree (huge, expr->ts.kind, 0);
- test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
- tmp, test);
-
- mpfr_neg (huge, huge, GFC_RND_MODE);
- test = gfc_conv_mpfr_to_tree (huge, expr->ts.kind, 0);
- test = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, tmp,
- test);
- test2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, test, test2);
-
- itype = gfc_get_int_type (ikind);
- if (modulo)
- tmp = build_fix_expr (&se->pre, tmp, itype, RND_FLOOR);
- else
- tmp = build_fix_expr (&se->pre, tmp, itype, RND_TRUNC);
- tmp = convert (type, tmp);
- tmp = fold_build3_loc (input_location, COND_EXPR, type, test2, tmp,
- args[0]);
- tmp = fold_build2_loc (input_location, MULT_EXPR, type, tmp, args[1]);
- se->expr = fold_build2_loc (input_location, MINUS_EXPR, type, args[0],
- tmp);
- mpfr_clear (huge);
- break;
+ return;
default:
gcc_unreachable ();
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index 12a1390e2aa..323fca382c3 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -1140,6 +1140,10 @@ trans_associate_var (gfc_symbol *sym, gfc_wrapped_block *block)
gfc_expr *e;
tree tmp;
bool class_target;
+ tree desc;
+ tree offset;
+ tree dim;
+ int n;
gcc_assert (sym->assoc);
e = sym->assoc->target;
@@ -1191,8 +1195,9 @@ trans_associate_var (gfc_symbol *sym, gfc_wrapped_block *block)
gfc_finish_block (&se.post));
}
- /* CLASS arrays just need the descriptor to be directly assigned. */
- else if (class_target && sym->attr.dimension)
+ /* Derived type temporaries, arising from TYPE IS, just need the
+ descriptor of class arrays to be assigned directly. */
+ else if (class_target && sym->ts.type == BT_DERIVED && sym->attr.dimension)
{
gfc_se se;
@@ -1217,7 +1222,47 @@ trans_associate_var (gfc_symbol *sym, gfc_wrapped_block *block)
gcc_assert (!sym->attr.dimension);
gfc_init_se (&se, NULL);
- gfc_conv_expr (&se, e);
+
+ /* Class associate-names come this way because they are
+ unconditionally associate pointers and the symbol is scalar. */
+ if (sym->ts.type == BT_CLASS && CLASS_DATA (sym)->attr.dimension)
+ {
+ /* For a class array we need a descriptor for the selector. */
+ gfc_conv_expr_descriptor (&se, e, gfc_walk_expr (e));
+
+ /* Obtain a temporary class container for the result. */
+ gfc_conv_class_to_class (&se, e, sym->ts, false);
+ se.expr = build_fold_indirect_ref_loc (input_location, se.expr);
+
+ /* Set the offset. */
+ desc = gfc_class_data_get (se.expr);
+ offset = gfc_index_zero_node;
+ for (n = 0; n < e->rank; n++)
+ {
+ dim = gfc_rank_cst[n];
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ gfc_conv_descriptor_stride_get (desc, dim),
+ gfc_conv_descriptor_lbound_get (desc, dim));
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offset, tmp);
+ }
+ gfc_conv_descriptor_offset_set (&se.pre, desc, offset);
+ }
+ else if (sym->ts.type == BT_CLASS && e->ts.type == BT_CLASS
+ && CLASS_DATA (e)->attr.dimension)
+ {
+ /* This is bound to be a class array element. */
+ gfc_conv_expr_reference (&se, e);
+ /* Get the _vptr component of the class object. */
+ tmp = gfc_get_vptr_from_expr (se.expr);
+ /* Obtain a temporary class container for the result. */
+ gfc_conv_derived_to_class (&se, e, sym->ts, tmp);
+ se.expr = build_fold_indirect_ref_loc (input_location, se.expr);
+ }
+ else
+ gfc_conv_expr (&se, e);
tmp = TREE_TYPE (sym->backend_decl);
tmp = gfc_build_addr_expr (tmp, se.expr);
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index 0f2912de1af..21a94fd6f06 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -1106,6 +1106,9 @@ gfc_typenode_for_spec (gfc_typespec * spec)
case BT_CLASS:
basetype = gfc_get_derived_type (spec->u.derived);
+ if (spec->type == BT_CLASS)
+ GFC_CLASS_TYPE_P (basetype) = 1;
+
/* If we're dealing with either C_PTR or C_FUNPTR, we modified the
type and kind to fit a (void *) and the basetype returned was a
ptr_type_node. We need to pass up this new information to the
diff --git a/gcc/fortran/trans.h b/gcc/fortran/trans.h
index 08a67325274..3b77281568a 100644
--- a/gcc/fortran/trans.h
+++ b/gcc/fortran/trans.h
@@ -348,8 +348,10 @@ tree gfc_vtable_size_get (tree);
tree gfc_vtable_extends_get (tree);
tree gfc_vtable_def_init_get (tree);
tree gfc_vtable_copy_get (tree);
+tree gfc_get_vptr_from_expr (tree);
tree gfc_get_class_array_ref (tree, tree);
tree gfc_copy_class_to_class (tree, tree, tree);
+void gfc_conv_derived_to_class (gfc_se *, gfc_expr *, gfc_typespec, tree);
void gfc_conv_class_to_class (gfc_se *, gfc_expr *, gfc_typespec, bool);
/* Initialize an init/cleanup block. */
@@ -827,6 +829,8 @@ struct GTY((variable_size)) lang_decl {
#define GFC_ARRAY_TYPE_P(node) TYPE_LANG_FLAG_2(node)
/* Fortran POINTER type. */
#define GFC_POINTER_TYPE_P(node) TYPE_LANG_FLAG_3(node)
+/* Fortran CLASS type. */
+#define GFC_CLASS_TYPE_P(node) TYPE_LANG_FLAG_4(node)
/* The GFC_TYPE_ARRAY_* members are present in both descriptor and
descriptorless array types. */
#define GFC_TYPE_ARRAY_LBOUND(node, dim) \
diff --git a/gcc/function.c b/gcc/function.c
index 3e903ef94da..b5e9011ce23 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -496,12 +496,12 @@ assign_stack_local_1 (enum machine_mode mode, HOST_WIDE_INT size,
/* If we have already instantiated virtual registers, return the actual
address relative to the frame pointer. */
if (virtuals_instantiated)
- addr = plus_constant (frame_pointer_rtx,
+ addr = plus_constant (Pmode, frame_pointer_rtx,
trunc_int_for_mode
(slot_offset + bigend_correction
+ STARTING_FRAME_OFFSET, Pmode));
else
- addr = plus_constant (virtual_stack_vars_rtx,
+ addr = plus_constant (Pmode, virtual_stack_vars_rtx,
trunc_int_for_mode
(slot_offset + bigend_correction,
Pmode));
@@ -1449,7 +1449,7 @@ instantiate_virtual_regs_in_rtx (rtx *loc, void *data)
new_rtx = instantiate_new_reg (x, &offset);
if (new_rtx)
{
- *loc = plus_constant (new_rtx, offset);
+ *loc = plus_constant (GET_MODE (x), new_rtx, offset);
if (changed)
*changed = true;
}
@@ -1459,7 +1459,7 @@ instantiate_virtual_regs_in_rtx (rtx *loc, void *data)
new_rtx = instantiate_new_reg (XEXP (x, 0), &offset);
if (new_rtx)
{
- new_rtx = plus_constant (new_rtx, offset);
+ new_rtx = plus_constant (GET_MODE (x), new_rtx, offset);
*loc = simplify_gen_binary (PLUS, GET_MODE (x), new_rtx, XEXP (x, 1));
if (changed)
*changed = true;
diff --git a/gcc/function.h b/gcc/function.h
index 5aaba74dc0b..34efb3157ab 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -507,7 +507,7 @@ struct GTY(()) function {
struct control_flow_graph *cfg;
/* GIMPLE body for this function. */
- struct gimple_seq_d *gimple_body;
+ gimple_seq gimple_body;
/* SSA and dataflow information. */
struct gimple_df *gimple_df;
diff --git a/gcc/gcc-ar.c b/gcc/gcc-ar.c
index 706b2f389e5..caae1670bf6 100644
--- a/gcc/gcc-ar.c
+++ b/gcc/gcc-ar.c
@@ -1,5 +1,5 @@
/* Wrapper for ar/ranlib/nm to pass the LTO plugin.
- Copyright (C) 2011 Free Software Foundation, Inc.
+ Copyright (C) 2011, 2012 Free Software Foundation, Inc.
Contributed by Andi Kleen.
This file is part of GCC.
@@ -52,11 +52,16 @@ main(int ac, char **av)
/* XXX implement more magic from gcc.c? */
nprefix = getenv ("GCC_EXEC_PREFIX");
if (!nprefix)
+ nprefix = av[0];
+ else
+ nprefix = concat (nprefix, "gcc-" PERSONALITY, NULL);
+
+ nprefix = make_relative_prefix (nprefix,
+ standard_bin_prefix,
+ standard_libexec_prefix);
+ if (nprefix == NULL)
nprefix = standard_libexec_prefix;
- nprefix = make_relative_prefix (av[0],
- standard_bin_prefix,
- nprefix);
plugin = concat (nprefix,
dir_separator,
DEFAULT_TARGET_MACHINE,
@@ -65,7 +70,7 @@ main(int ac, char **av)
dir_separator,
LTOPLUGINSONAME,
NULL);
- if (access (plugin, X_OK))
+ if (access (plugin, R_OK))
{
fprintf (stderr, "%s: Cannot find plugin %s\n", av[0], plugin);
exit (1);
diff --git a/gcc/gcc.c b/gcc/gcc.c
index 09f9b1548b8..e152b705393 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -5313,6 +5313,8 @@ eval_spec_function (const char *func, const char *args)
int save_this_is_linker_script;
const char *save_suffix_subst;
+ int save_growing_size;
+ void *save_growing_value;
sf = lookup_spec_function (func);
if (sf == NULL)
@@ -5329,6 +5331,18 @@ eval_spec_function (const char *func, const char *args)
save_input_from_pipe = input_from_pipe;
save_suffix_subst = suffix_subst;
+ /* If we have some object growing now, finalize it so the args and function
+ eval proceed from a cleared context. This is needed to prevent the first
+ constructed arg from mistakenly including the growing value. We'll push
+ this value back on the obstack once the function evaluation is done, to
+ restore a consistent processing context for our caller. This is fine as
+ the address of growing objects isn't guaranteed to remain stable until
+ they are finalized, and we expect this situation to be rare enough for
+ the extra copy not to be an issue. */
+ save_growing_size = obstack_object_size (&obstack);
+ if (save_growing_size > 0)
+ save_growing_value = obstack_finish (&obstack);
+
/* Create a new spec processing context, and build the function
arguments. */
@@ -5354,6 +5368,9 @@ eval_spec_function (const char *func, const char *args)
input_from_pipe = save_input_from_pipe;
suffix_subst = save_suffix_subst;
+ if (save_growing_size > 0)
+ obstack_grow (&obstack, save_growing_value, save_growing_size);
+
return funcval;
}
diff --git a/gcc/gcov-io.h b/gcc/gcov-io.h
index b80f7069620..c15e64b197b 100644
--- a/gcc/gcov-io.h
+++ b/gcc/gcov-io.h
@@ -458,6 +458,12 @@ extern void __gcov_init (struct gcov_info *) ATTRIBUTE_HIDDEN;
/* Called before fork, to avoid double counting. */
extern void __gcov_flush (void) ATTRIBUTE_HIDDEN;
+/* Function to reset all counters to 0. */
+extern void __gcov_reset (void);
+
+/* Function to enable early write of profile information so far. */
+extern void __gcov_dump (void);
+
/* The merge function that just sums the counters. */
extern void __gcov_merge_add (gcov_type *, unsigned) ATTRIBUTE_HIDDEN;
diff --git a/gcc/genattr.c b/gcc/genattr.c
index 34e710d429d..33030b1b2de 100644
--- a/gcc/genattr.c
+++ b/gcc/genattr.c
@@ -344,14 +344,9 @@ main (int argc, char **argv)
/* Output flag masks for use by reorg.
- Flags are used to hold branch direction and prediction information
- for use by eligible_for_... */
+ Flags are used to hold branch direction for use by eligible_for_... */
printf("\n#define ATTR_FLAG_forward\t0x1\n");
printf("#define ATTR_FLAG_backward\t0x2\n");
- printf("#define ATTR_FLAG_likely\t0x4\n");
- printf("#define ATTR_FLAG_very_likely\t0x8\n");
- printf("#define ATTR_FLAG_unlikely\t0x10\n");
- printf("#define ATTR_FLAG_very_unlikely\t0x20\n");
puts("\n#endif /* GCC_INSN_ATTR_H */");
diff --git a/gcc/genattrtab.c b/gcc/genattrtab.c
index bfbe3e80583..60aa59c4924 100644
--- a/gcc/genattrtab.c
+++ b/gcc/genattrtab.c
@@ -275,16 +275,17 @@ static rtx copy_rtx_unchanging (rtx);
static bool attr_alt_subset_p (rtx, rtx);
static bool attr_alt_subset_of_compl_p (rtx, rtx);
static void clear_struct_flag (rtx);
-static void write_attr_valueq (struct attr_desc *, const char *);
+static void write_attr_valueq (FILE *, struct attr_desc *, const char *);
static struct attr_value *find_most_used (struct attr_desc *);
-static void write_attr_set (struct attr_desc *, int, rtx,
+static void write_attr_set (FILE *, struct attr_desc *, int, rtx,
const char *, const char *, rtx,
int, int, unsigned int);
-static void write_attr_case (struct attr_desc *, struct attr_value *,
+static void write_attr_case (FILE *, struct attr_desc *,
+ struct attr_value *,
int, const char *, const char *, int, rtx);
-static void write_attr_value (struct attr_desc *, rtx);
-static void write_upcase (const char *);
-static void write_indent (int);
+static void write_attr_value (FILE *, struct attr_desc *, rtx);
+static void write_upcase (FILE *, const char *);
+static void write_indent (FILE *, int);
static rtx identity_fn (rtx);
static rtx zero_fn (rtx);
static rtx one_fn (rtx);
@@ -294,6 +295,23 @@ static rtx min_fn (rtx);
#define oballoc(T) XOBNEW (hash_obstack, T)
#define oballocvec(T, N) XOBNEWVEC (hash_obstack, T, (N))
+/* This gen* file is unique, in that it writes out multiple files.
+
+ Before GCC 4.8, insn-attrtab.c was written out containing many large
+ functions and tables. This made insn-attrtab.c _the_ bottle-neck in
+ a parallel build, and even made it impossible to build GCC on machines
+ with relatively small RAM space (PR other/29442). Therefore, the
+ atrribute functions/tables are now written out to three separate
+ files: all "*insn_default_latency" functions go to LATENCY_FILE_NAME,
+ all "*internal_dfa_insn_code" functions go to DFA_FILE_NAME, and the
+ rest goes to ATTR_FILE_NAME. */
+
+static const char *attr_file_name = NULL;
+static const char *dfa_file_name = NULL;
+static const char *latency_file_name = NULL;
+
+static FILE *attr_file, *dfa_file, *latency_file;
+
/* Hash table for sharing RTL and strings. */
/* Each hash table slot is a bucket containing a chain of these structures.
@@ -1610,7 +1628,7 @@ min_fn (rtx exp)
}
static void
-write_length_unit_log (void)
+write_length_unit_log (FILE *outf)
{
struct attr_desc *length_attr = find_attr (&length_str, 0);
struct attr_value *av;
@@ -1633,7 +1651,7 @@ write_length_unit_log (void)
for (length_unit_log = 0; length_or & 1; length_or >>= 1)
length_unit_log++;
}
- printf ("EXPORTED_CONST int length_unit_log = %u;\n", length_unit_log);
+ fprintf (outf, "EXPORTED_CONST int length_unit_log = %u;\n", length_unit_log);
}
/* Take a COND expression and see if any of the conditions in it can be
@@ -3247,7 +3265,7 @@ find_attrs_to_cache (rtx exp, bool create)
}
}
-/* Given a piece of RTX, print a C expression to test its truth value.
+/* Given a piece of RTX, print a C expression to test its truth value to OUTF.
We use AND and IOR both for logical and bit-wise operations, so
interpret them as logical unless they are inside a comparison expression. */
@@ -3265,7 +3283,7 @@ find_attrs_to_cache (rtx exp, bool create)
#define FLG_OUTSIDE_AND 8
static unsigned int
-write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
+write_test_expr (FILE *outf, rtx exp, unsigned int attrs_cached, int flags)
{
int comparison_operator = 0;
RTX_CODE code;
@@ -3274,14 +3292,14 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
/* In order not to worry about operator precedence, surround our part of
the expression with parentheses. */
- printf ("(");
+ fprintf (outf, "(");
code = GET_CODE (exp);
switch (code)
{
/* Binary operators. */
case GEU: case GTU:
case LEU: case LTU:
- printf ("(unsigned) ");
+ fprintf (outf, "(unsigned) ");
/* Fall through. */
case EQ: case NE:
@@ -3295,7 +3313,7 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
if ((code != AND && code != IOR) || (flags & FLG_BITWISE))
{
flags &= ~(FLG_AFTER | FLG_INSIDE | FLG_OUTSIDE_AND);
- write_test_expr (XEXP (exp, 0), attrs_cached,
+ write_test_expr (outf, XEXP (exp, 0), attrs_cached,
flags | comparison_operator);
}
else
@@ -3307,78 +3325,78 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
|| (GET_CODE (XEXP (exp, 0)) == NOT
&& GET_CODE (XEXP (XEXP (exp, 0), 0)) == EQ_ATTR))
attrs_cached
- = write_test_expr (XEXP (exp, 0), attrs_cached, flags);
+ = write_test_expr (outf, XEXP (exp, 0), attrs_cached, flags);
else
- write_test_expr (XEXP (exp, 0), attrs_cached, flags);
+ write_test_expr (outf, XEXP (exp, 0), attrs_cached, flags);
}
switch (code)
{
case EQ:
- printf (" == ");
+ fprintf (outf, " == ");
break;
case NE:
- printf (" != ");
+ fprintf (outf, " != ");
break;
case GE:
- printf (" >= ");
+ fprintf (outf, " >= ");
break;
case GT:
- printf (" > ");
+ fprintf (outf, " > ");
break;
case GEU:
- printf (" >= (unsigned) ");
+ fprintf (outf, " >= (unsigned) ");
break;
case GTU:
- printf (" > (unsigned) ");
+ fprintf (outf, " > (unsigned) ");
break;
case LE:
- printf (" <= ");
+ fprintf (outf, " <= ");
break;
case LT:
- printf (" < ");
+ fprintf (outf, " < ");
break;
case LEU:
- printf (" <= (unsigned) ");
+ fprintf (outf, " <= (unsigned) ");
break;
case LTU:
- printf (" < (unsigned) ");
+ fprintf (outf, " < (unsigned) ");
break;
case PLUS:
- printf (" + ");
+ fprintf (outf, " + ");
break;
case MINUS:
- printf (" - ");
+ fprintf (outf, " - ");
break;
case MULT:
- printf (" * ");
+ fprintf (outf, " * ");
break;
case DIV:
- printf (" / ");
+ fprintf (outf, " / ");
break;
case MOD:
- printf (" %% ");
+ fprintf (outf, " %% ");
break;
case AND:
if (flags & FLG_BITWISE)
- printf (" & ");
+ fprintf (outf, " & ");
else
- printf (" && ");
+ fprintf (outf, " && ");
break;
case IOR:
if (flags & FLG_BITWISE)
- printf (" | ");
+ fprintf (outf, " | ");
else
- printf (" || ");
+ fprintf (outf, " || ");
break;
case XOR:
- printf (" ^ ");
+ fprintf (outf, " ^ ");
break;
case ASHIFT:
- printf (" << ");
+ fprintf (outf, " << ");
break;
case LSHIFTRT:
case ASHIFTRT:
- printf (" >> ");
+ fprintf (outf, " >> ");
break;
default:
gcc_unreachable ();
@@ -3409,9 +3427,9 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
|| (GET_CODE (XEXP (exp, 1)) == NOT
&& GET_CODE (XEXP (XEXP (exp, 1), 0)) == EQ_ATTR)))
attrs_cached
- = write_test_expr (XEXP (exp, 1), attrs_cached, flags);
+ = write_test_expr (outf, XEXP (exp, 1), attrs_cached, flags);
else
- write_test_expr (XEXP (exp, 1), attrs_cached,
+ write_test_expr (outf, XEXP (exp, 1), attrs_cached,
flags | comparison_operator);
break;
@@ -3421,12 +3439,14 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
{
if (XSTR (XEXP (exp, 0), 0) == alternative_name)
{
- printf ("which_alternative != %s", XSTR (XEXP (exp, 0), 1));
+ fprintf (outf, "which_alternative != %s",
+ XSTR (XEXP (exp, 0), 1));
break;
}
- printf ("! ");
- attrs_cached = write_test_expr (XEXP (exp, 0), attrs_cached, flags);
+ fprintf (outf, "! ");
+ attrs_cached =
+ write_test_expr (outf, XEXP (exp, 0), attrs_cached, flags);
break;
}
@@ -3438,22 +3458,22 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
{
case NOT:
if (flags & FLG_BITWISE)
- printf ("~ ");
+ fprintf (outf, "~ ");
else
- printf ("! ");
+ fprintf (outf, "! ");
break;
case ABS:
- printf ("abs ");
+ fprintf (outf, "abs ");
break;
case NEG:
- printf ("-");
+ fprintf (outf, "-");
break;
default:
gcc_unreachable ();
}
flags &= ~(FLG_AFTER | FLG_INSIDE | FLG_OUTSIDE_AND);
- write_test_expr (XEXP (exp, 0), attrs_cached, flags);
+ write_test_expr (outf, XEXP (exp, 0), attrs_cached, flags);
break;
case EQ_ATTR_ALT:
@@ -3491,13 +3511,13 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
if (!(set & 1))
bit++;
- printf ("which_alternative %s= %d",
- XINT (exp, 1) ? "!" : "=", bit);
+ fprintf (outf, "which_alternative %s= %d",
+ XINT (exp, 1) ? "!" : "=", bit);
}
else
{
- printf ("%s((1 << which_alternative) & %#x)",
- XINT (exp, 1) ? "!" : "", set);
+ fprintf (outf, "%s((1 << which_alternative) & %#x)",
+ XINT (exp, 1) ? "!" : "", set);
}
}
break;
@@ -3511,7 +3531,7 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
if (XSTR (exp, 0) == alternative_name)
{
- printf ("which_alternative == %s", XSTR (exp, 1));
+ fprintf (outf, "which_alternative == %s", XSTR (exp, 1));
break;
}
@@ -3521,8 +3541,10 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
/* Now is the time to expand the value of a constant attribute. */
if (attr->is_const)
{
- write_test_expr (evaluate_eq_attr (exp, attr,
- attr->default_val->value, -2, -2),
+ write_test_expr (outf,
+ evaluate_eq_attr (exp, attr,
+ attr->default_val->value,
+ -2, -2),
attrs_cached, 0);
}
else
@@ -3532,10 +3554,10 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
if (attr->name == cached_attrs[i])
break;
if (i < cached_attr_count && (attrs_cached & (1U << i)) != 0)
- printf ("cached_%s", attr->name);
+ fprintf (outf, "cached_%s", attr->name);
else if (i < cached_attr_count && (attrs_to_cache & (1U << i)) != 0)
{
- printf ("(cached_%s = get_attr_%s (insn))",
+ fprintf (outf, "(cached_%s = get_attr_%s (insn))",
attr->name, attr->name);
if (flags & FLG_AFTER)
attrs_cached_after |= (1U << i);
@@ -3544,9 +3566,9 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
attrs_cached |= (1U << i);
}
else
- printf ("get_attr_%s (insn)", attr->name);
- printf (" == ");
- write_attr_valueq (attr, XSTR (exp, 1));
+ fprintf (outf, "get_attr_%s (insn)", attr->name);
+ fprintf (outf, " == ");
+ write_attr_valueq (outf, attr, XSTR (exp, 1));
}
break;
@@ -3554,7 +3576,7 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
case ATTR_FLAG:
if (flags & FLG_BITWISE)
fatal ("ATTR_FLAG not valid inside comparison");
- printf ("(flags & ATTR_FLAG_%s) != 0", XSTR (exp, 0));
+ fprintf (outf, "(flags & ATTR_FLAG_%s) != 0", XSTR (exp, 0));
break;
/* See if an operand matches a predicate. */
@@ -3566,34 +3588,35 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
if (GET_MODE (exp) == VOIDmode)
fatal ("null MATCH_OPERAND specified as test");
else
- printf ("GET_MODE (operands[%d]) == %smode",
- XINT (exp, 0), GET_MODE_NAME (GET_MODE (exp)));
+ fprintf (outf, "GET_MODE (operands[%d]) == %smode",
+ XINT (exp, 0), GET_MODE_NAME (GET_MODE (exp)));
}
else
- printf ("%s (operands[%d], %smode)",
- XSTR (exp, 1), XINT (exp, 0), GET_MODE_NAME (GET_MODE (exp)));
+ fprintf (outf, "%s (operands[%d], %smode)",
+ XSTR (exp, 1), XINT (exp, 0), GET_MODE_NAME (GET_MODE (exp)));
break;
/* Constant integer. */
case CONST_INT:
- printf (HOST_WIDE_INT_PRINT_DEC, XWINT (exp, 0));
+ fprintf (outf, HOST_WIDE_INT_PRINT_DEC, XWINT (exp, 0));
break;
case MATCH_TEST:
- print_c_condition (XSTR (exp, 0));
+ fprint_c_condition (outf, XSTR (exp, 0));
if (flags & FLG_BITWISE)
- printf (" != 0");
+ fprintf (outf, " != 0");
break;
/* A random C expression. */
case SYMBOL_REF:
- print_c_condition (XSTR (exp, 0));
+ fprint_c_condition (outf, XSTR (exp, 0));
break;
/* The address of the branch target. */
case MATCH_DUP:
- printf ("INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[%d]) == LABEL_REF ? XEXP (operands[%d], 0) : operands[%d])) : 0",
- XINT (exp, 0), XINT (exp, 0), XINT (exp, 0));
+ fprintf (outf,
+ "INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[%d]) == LABEL_REF ? XEXP (operands[%d], 0) : operands[%d])) : 0",
+ XINT (exp, 0), XINT (exp, 0), XINT (exp, 0));
break;
case PC:
@@ -3602,19 +3625,19 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
address of the next insn for forward branches, and both with
adjustments that account for the worst-case possible stretching of
intervening alignments between this insn and its destination. */
- printf ("insn_current_reference_address (insn)");
+ fprintf (outf, "insn_current_reference_address (insn)");
break;
case CONST_STRING:
- printf ("%s", XSTR (exp, 0));
+ fprintf (outf, "%s", XSTR (exp, 0));
break;
case IF_THEN_ELSE:
- write_test_expr (XEXP (exp, 0), attrs_cached, 0);
- printf (" ? ");
- write_test_expr (XEXP (exp, 1), attrs_cached, FLG_BITWISE);
- printf (" : ");
- write_test_expr (XEXP (exp, 2), attrs_cached, FLG_BITWISE);
+ write_test_expr (outf, XEXP (exp, 0), attrs_cached, 0);
+ fprintf (outf, " ? ");
+ write_test_expr (outf, XEXP (exp, 1), attrs_cached, FLG_BITWISE);
+ fprintf (outf, " : ");
+ write_test_expr (outf, XEXP (exp, 2), attrs_cached, FLG_BITWISE);
break;
default:
@@ -3622,7 +3645,7 @@ write_test_expr (rtx exp, unsigned int attrs_cached, int flags)
GET_RTX_NAME (code));
}
- printf (")");
+ fprintf (outf, ")");
return attrs_cached;
}
@@ -3826,7 +3849,7 @@ walk_attr_value (rtx exp)
/* Write out a function to obtain the attribute for a given INSN. */
static void
-write_attr_get (struct attr_desc *attr)
+write_attr_get (FILE *outf, struct attr_desc *attr)
{
struct attr_value *av, *common_av;
int i, j;
@@ -3838,37 +3861,37 @@ write_attr_get (struct attr_desc *attr)
/* Write out start of function, then all values with explicit `case' lines,
then a `default', then the value with the most uses. */
if (attr->enum_name)
- printf ("enum %s\n", attr->enum_name);
+ fprintf (outf, "enum %s\n", attr->enum_name);
else if (!attr->is_numeric)
- printf ("enum attr_%s\n", attr->name);
+ fprintf (outf, "enum attr_%s\n", attr->name);
else
- printf ("int\n");
+ fprintf (outf, "int\n");
/* If the attribute name starts with a star, the remainder is the name of
the subroutine to use, instead of `get_attr_...'. */
if (attr->name[0] == '*')
- printf ("%s (rtx insn ATTRIBUTE_UNUSED)\n", &attr->name[1]);
+ fprintf (outf, "%s (rtx insn ATTRIBUTE_UNUSED)\n", &attr->name[1]);
else if (attr->is_const == 0)
- printf ("get_attr_%s (rtx insn ATTRIBUTE_UNUSED)\n", attr->name);
+ fprintf (outf, "get_attr_%s (rtx insn ATTRIBUTE_UNUSED)\n", attr->name);
else
{
- printf ("get_attr_%s (void)\n", attr->name);
- printf ("{\n");
+ fprintf (outf, "get_attr_%s (void)\n", attr->name);
+ fprintf (outf, "{\n");
for (av = attr->first_value; av; av = av->next)
if (av->num_insns == 1)
- write_attr_set (attr, 2, av->value, "return", ";",
+ write_attr_set (outf, attr, 2, av->value, "return", ";",
true_rtx, av->first_insn->def->insn_code,
av->first_insn->def->insn_index, 0);
else if (av->num_insns != 0)
- write_attr_set (attr, 2, av->value, "return", ";",
+ write_attr_set (outf, attr, 2, av->value, "return", ";",
true_rtx, -2, 0, 0);
- printf ("}\n\n");
+ fprintf (outf, "}\n\n");
return;
}
- printf ("{\n");
+ fprintf (outf, "{\n");
/* Find attributes that are worth caching in the conditions. */
cached_attr_count = 0;
@@ -3889,27 +3912,27 @@ write_attr_get (struct attr_desc *attr)
cached_attr = find_attr (&name, 0);
gcc_assert (cached_attr && cached_attr->is_const == 0);
if (cached_attr->enum_name)
- printf (" enum %s", cached_attr->enum_name);
+ fprintf (outf, " enum %s", cached_attr->enum_name);
else if (!cached_attr->is_numeric)
- printf (" enum attr_%s", cached_attr->name);
+ fprintf (outf, " enum attr_%s", cached_attr->name);
else
- printf (" int");
- printf (" cached_%s ATTRIBUTE_UNUSED;\n", name);
+ fprintf (outf, " int");
+ fprintf (outf, " cached_%s ATTRIBUTE_UNUSED;\n", name);
j++;
}
cached_attr_count = j;
if (cached_attr_count)
- printf ("\n");
+ fprintf (outf, "\n");
- printf (" switch (recog_memoized (insn))\n");
- printf (" {\n");
+ fprintf (outf, " switch (recog_memoized (insn))\n");
+ fprintf (outf, " {\n");
for (av = attr->first_value; av; av = av->next)
if (av != common_av)
- write_attr_case (attr, av, 1, "return", ";", 4, true_rtx);
+ write_attr_case (outf, attr, av, 1, "return", ";", 4, true_rtx);
- write_attr_case (attr, common_av, 0, "return", ";", 4, true_rtx);
- printf (" }\n}\n\n");
+ write_attr_case (outf, attr, common_av, 0, "return", ";", 4, true_rtx);
+ fprintf (outf, " }\n}\n\n");
cached_attr_count = 0;
}
@@ -3947,7 +3970,7 @@ eliminate_known_true (rtx known_true, rtx exp, int insn_code, int insn_index)
and ";"). */
static void
-write_attr_set (struct attr_desc *attr, int indent, rtx value,
+write_attr_set (FILE *outf, struct attr_desc *attr, int indent, rtx value,
const char *prefix, const char *suffix, rtx known_true,
int insn_code, int insn_index, unsigned int attrs_cached)
{
@@ -4002,49 +4025,49 @@ write_attr_set (struct attr_desc *attr, int indent, rtx value,
attrs_cached_inside = attrs_cached;
attrs_cached_after = attrs_cached;
- write_indent (indent);
- printf ("%sif ", first_if ? "" : "else ");
+ write_indent (outf, indent);
+ fprintf (outf, "%sif ", first_if ? "" : "else ");
first_if = 0;
- write_test_expr (testexp, attrs_cached,
+ write_test_expr (outf, testexp, attrs_cached,
(FLG_AFTER | FLG_INSIDE | FLG_OUTSIDE_AND));
attrs_cached = attrs_cached_after;
- printf ("\n");
- write_indent (indent + 2);
- printf ("{\n");
+ fprintf (outf, "\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "{\n");
- write_attr_set (attr, indent + 4,
+ write_attr_set (outf, attr, indent + 4,
XVECEXP (value, 0, i + 1), prefix, suffix,
inner_true, insn_code, insn_index,
attrs_cached_inside);
- write_indent (indent + 2);
- printf ("}\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "}\n");
our_known_true = newexp;
}
if (! first_if)
{
- write_indent (indent);
- printf ("else\n");
- write_indent (indent + 2);
- printf ("{\n");
+ write_indent (outf, indent);
+ fprintf (outf, "else\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "{\n");
}
- write_attr_set (attr, first_if ? indent : indent + 4, default_val,
+ write_attr_set (outf, attr, first_if ? indent : indent + 4, default_val,
prefix, suffix, our_known_true, insn_code, insn_index,
attrs_cached);
if (! first_if)
{
- write_indent (indent + 2);
- printf ("}\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "}\n");
}
}
else
{
- write_indent (indent);
- printf ("%s ", prefix);
- write_attr_value (attr, value);
- printf ("%s\n", suffix);
+ write_indent (outf, indent);
+ fprintf (outf, "%s ", prefix);
+ write_attr_value (outf, attr, value);
+ fprintf (outf, "%s\n", suffix);
}
}
@@ -4052,25 +4075,25 @@ write_attr_set (struct attr_desc *attr, int indent, rtx value,
INDENT is the amount of indentation to write before each case. */
static void
-write_insn_cases (struct insn_ent *ie, int indent)
+write_insn_cases (FILE *outf, struct insn_ent *ie, int indent)
{
for (; ie != 0; ie = ie->next)
if (ie->def->insn_code != -1)
{
- write_indent (indent);
+ write_indent (outf, indent);
if (GET_CODE (ie->def->def) == DEFINE_PEEPHOLE)
- printf ("case %d: /* define_peephole, line %d */\n",
- ie->def->insn_code, ie->def->lineno);
+ fprintf (outf, "case %d: /* define_peephole, line %d */\n",
+ ie->def->insn_code, ie->def->lineno);
else
- printf ("case %d: /* %s */\n",
- ie->def->insn_code, XSTR (ie->def->def, 0));
+ fprintf (outf, "case %d: /* %s */\n",
+ ie->def->insn_code, XSTR (ie->def->def, 0));
}
}
/* Write out the computation for one attribute value. */
static void
-write_attr_case (struct attr_desc *attr, struct attr_value *av,
+write_attr_case (FILE *outf, struct attr_desc *attr, struct attr_value *av,
int write_case_lines, const char *prefix, const char *suffix,
int indent, rtx known_true)
{
@@ -4079,22 +4102,22 @@ write_attr_case (struct attr_desc *attr, struct attr_value *av,
if (av->has_asm_insn)
{
- write_indent (indent);
- printf ("case -1:\n");
- write_indent (indent + 2);
- printf ("if (GET_CODE (PATTERN (insn)) != ASM_INPUT\n");
- write_indent (indent + 2);
- printf (" && asm_noperands (PATTERN (insn)) < 0)\n");
- write_indent (indent + 2);
- printf (" fatal_insn_not_found (insn);\n");
+ write_indent (outf, indent);
+ fprintf (outf, "case -1:\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "if (GET_CODE (PATTERN (insn)) != ASM_INPUT\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, " && asm_noperands (PATTERN (insn)) < 0)\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, " fatal_insn_not_found (insn);\n");
}
if (write_case_lines)
- write_insn_cases (av->first_insn, indent);
+ write_insn_cases (outf, av->first_insn, indent);
else
{
- write_indent (indent);
- printf ("default:\n");
+ write_indent (outf, indent);
+ fprintf (outf, "default:\n");
}
/* See what we have to do to output this value. */
@@ -4103,85 +4126,85 @@ write_attr_case (struct attr_desc *attr, struct attr_value *av,
if (must_constrain)
{
- write_indent (indent + 2);
- printf ("extract_constrain_insn_cached (insn);\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "extract_constrain_insn_cached (insn);\n");
}
else if (must_extract)
{
- write_indent (indent + 2);
- printf ("extract_insn_cached (insn);\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "extract_insn_cached (insn);\n");
}
attrs_to_cache = 0;
if (av->num_insns == 1)
- write_attr_set (attr, indent + 2, av->value, prefix, suffix,
+ write_attr_set (outf, attr, indent + 2, av->value, prefix, suffix,
known_true, av->first_insn->def->insn_code,
av->first_insn->def->insn_index, 0);
else
- write_attr_set (attr, indent + 2, av->value, prefix, suffix,
+ write_attr_set (outf, attr, indent + 2, av->value, prefix, suffix,
known_true, -2, 0, 0);
if (strncmp (prefix, "return", 6))
{
- write_indent (indent + 2);
- printf ("break;\n");
+ write_indent (outf, indent + 2);
+ fprintf (outf, "break;\n");
}
- printf ("\n");
+ fprintf (outf, "\n");
}
/* Utilities to write in various forms. */
static void
-write_attr_valueq (struct attr_desc *attr, const char *s)
+write_attr_valueq (FILE *outf, struct attr_desc *attr, const char *s)
{
if (attr->is_numeric)
{
int num = atoi (s);
- printf ("%d", num);
+ fprintf (outf, "%d", num);
if (num > 9 || num < 0)
- printf (" /* %#x */", num);
+ fprintf (outf, " /* %#x */", num);
}
else
{
- write_upcase (attr->enum_name ? attr->enum_name : attr->name);
- printf ("_");
- write_upcase (s);
+ write_upcase (outf, attr->enum_name ? attr->enum_name : attr->name);
+ fprintf (outf, "_");
+ write_upcase (outf, s);
}
}
static void
-write_attr_value (struct attr_desc *attr, rtx value)
+write_attr_value (FILE *outf, struct attr_desc *attr, rtx value)
{
int op;
switch (GET_CODE (value))
{
case CONST_STRING:
- write_attr_valueq (attr, XSTR (value, 0));
+ write_attr_valueq (outf, attr, XSTR (value, 0));
break;
case CONST_INT:
- printf (HOST_WIDE_INT_PRINT_DEC, INTVAL (value));
+ fprintf (outf, HOST_WIDE_INT_PRINT_DEC, INTVAL (value));
break;
case SYMBOL_REF:
- print_c_condition (XSTR (value, 0));
+ fprint_c_condition (outf, XSTR (value, 0));
break;
case ATTR:
{
struct attr_desc *attr2 = find_attr (&XSTR (value, 0), 0);
if (attr->enum_name)
- printf ("(enum %s)", attr->enum_name);
+ fprintf (outf, "(enum %s)", attr->enum_name);
else if (!attr->is_numeric)
- printf ("(enum attr_%s)", attr->name);
+ fprintf (outf, "(enum attr_%s)", attr->name);
else if (!attr2->is_numeric)
- printf ("(int)");
+ fprintf (outf, "(int)");
- printf ("get_attr_%s (%s)", attr2->name,
- (attr2->is_const ? "" : "insn"));
+ fprintf (outf, "get_attr_%s (%s)", attr2->name,
+ (attr2->is_const ? "" : "insn"));
}
break;
@@ -4202,11 +4225,11 @@ write_attr_value (struct attr_desc *attr, rtx value)
goto do_operator;
do_operator:
- write_attr_value (attr, XEXP (value, 0));
- putchar (' ');
- putchar (op);
- putchar (' ');
- write_attr_value (attr, XEXP (value, 1));
+ write_attr_value (outf, attr, XEXP (value, 0));
+ fputc (' ', outf);
+ fputc (op, outf);
+ fputc (' ', outf);
+ write_attr_value (outf, attr, XEXP (value, 1));
break;
default:
@@ -4215,24 +4238,24 @@ write_attr_value (struct attr_desc *attr, rtx value)
}
static void
-write_upcase (const char *str)
+write_upcase (FILE *outf, const char *str)
{
while (*str)
{
/* The argument of TOUPPER should not have side effects. */
- putchar (TOUPPER(*str));
+ fputc (TOUPPER(*str), outf);
str++;
}
}
static void
-write_indent (int indent)
+write_indent (FILE *outf, int indent)
{
for (; indent > 8; indent -= 8)
- printf ("\t");
+ fprintf (outf, "\t");
for (; indent; indent--)
- printf (" ");
+ fprintf (outf, " ");
}
/* Write a subroutine that is given an insn that requires a delay slot, a
@@ -4248,7 +4271,7 @@ write_indent (int indent)
or "annul_false"). */
static void
-write_eligible_delay (const char *kind)
+write_eligible_delay (FILE *outf, const char *kind)
{
struct delay_desc *delay;
int max_slots;
@@ -4268,19 +4291,20 @@ write_eligible_delay (const char *kind)
/* Write function prelude. */
- printf ("int\n");
- printf ("eligible_for_%s (rtx delay_insn ATTRIBUTE_UNUSED, int slot, rtx candidate_insn, int flags ATTRIBUTE_UNUSED)\n",
- kind);
- printf ("{\n");
- printf (" rtx insn;\n");
- printf ("\n");
- printf (" gcc_assert (slot < %d);\n", max_slots);
- printf ("\n");
+ fprintf (outf, "int\n");
+ fprintf (outf, "eligible_for_%s (rtx delay_insn ATTRIBUTE_UNUSED, int slot, \n"
+ " rtx candidate_insn, int flags ATTRIBUTE_UNUSED)\n",
+ kind);
+ fprintf (outf, "{\n");
+ fprintf (outf, " rtx insn;\n");
+ fprintf (outf, "\n");
+ fprintf (outf, " gcc_assert (slot < %d);\n", max_slots);
+ fprintf (outf, "\n");
/* Allow dbr_schedule to pass labels, etc. This can happen if try_split
converts a compound instruction into a loop. */
- printf (" if (!INSN_P (candidate_insn))\n");
- printf (" return 0;\n");
- printf ("\n");
+ fprintf (outf, " if (!INSN_P (candidate_insn))\n");
+ fprintf (outf, " return 0;\n");
+ fprintf (outf, "\n");
/* If more than one delay type, find out which type the delay insn is. */
@@ -4290,28 +4314,28 @@ write_eligible_delay (const char *kind)
gcc_assert (attr);
common_av = find_most_used (attr);
- printf (" insn = delay_insn;\n");
- printf (" switch (recog_memoized (insn))\n");
- printf (" {\n");
+ fprintf (outf, " insn = delay_insn;\n");
+ fprintf (outf, " switch (recog_memoized (insn))\n");
+ fprintf (outf, " {\n");
sprintf (str, " * %d;\n break;", max_slots);
for (av = attr->first_value; av; av = av->next)
if (av != common_av)
- write_attr_case (attr, av, 1, "slot +=", str, 4, true_rtx);
+ write_attr_case (outf, attr, av, 1, "slot +=", str, 4, true_rtx);
- write_attr_case (attr, common_av, 0, "slot +=", str, 4, true_rtx);
- printf (" }\n\n");
+ write_attr_case (outf, attr, common_av, 0, "slot +=", str, 4, true_rtx);
+ fprintf (outf, " }\n\n");
/* Ensure matched. Otherwise, shouldn't have been called. */
- printf (" gcc_assert (slot >= %d);\n\n", max_slots);
+ fprintf (outf, " gcc_assert (slot >= %d);\n\n", max_slots);
}
/* If just one type of delay slot, write simple switch. */
if (num_delays == 1 && max_slots == 1)
{
- printf (" insn = candidate_insn;\n");
- printf (" switch (recog_memoized (insn))\n");
- printf (" {\n");
+ fprintf (outf, " insn = candidate_insn;\n");
+ fprintf (outf, " switch (recog_memoized (insn))\n");
+ fprintf (outf, " {\n");
attr = find_attr (&delay_1_0_str, 0);
gcc_assert (attr);
@@ -4319,27 +4343,27 @@ write_eligible_delay (const char *kind)
for (av = attr->first_value; av; av = av->next)
if (av != common_av)
- write_attr_case (attr, av, 1, "return", ";", 4, true_rtx);
+ write_attr_case (outf, attr, av, 1, "return", ";", 4, true_rtx);
- write_attr_case (attr, common_av, 0, "return", ";", 4, true_rtx);
- printf (" }\n");
+ write_attr_case (outf, attr, common_av, 0, "return", ";", 4, true_rtx);
+ fprintf (outf, " }\n");
}
else
{
/* Write a nested CASE. The first indicates which condition we need to
test, and the inner CASE tests the condition. */
- printf (" insn = candidate_insn;\n");
- printf (" switch (slot)\n");
- printf (" {\n");
+ fprintf (outf, " insn = candidate_insn;\n");
+ fprintf (outf, " switch (slot)\n");
+ fprintf (outf, " {\n");
for (delay = delays; delay; delay = delay->next)
for (i = 0; i < XVECLEN (delay->def, 1); i += 3)
{
- printf (" case %d:\n",
- (i / 3) + (num_delays == 1 ? 0 : delay->num * max_slots));
- printf (" switch (recog_memoized (insn))\n");
- printf ("\t{\n");
+ fprintf (outf, " case %d:\n",
+ (i / 3) + (num_delays == 1 ? 0 : delay->num * max_slots));
+ fprintf (outf, " switch (recog_memoized (insn))\n");
+ fprintf (outf, "\t{\n");
sprintf (str, "*%s_%d_%d", kind, delay->num, i / 3);
pstr = str;
@@ -4349,18 +4373,18 @@ write_eligible_delay (const char *kind)
for (av = attr->first_value; av; av = av->next)
if (av != common_av)
- write_attr_case (attr, av, 1, "return", ";", 8, true_rtx);
+ write_attr_case (outf, attr, av, 1, "return", ";", 8, true_rtx);
- write_attr_case (attr, common_av, 0, "return", ";", 8, true_rtx);
- printf (" }\n");
+ write_attr_case (outf, attr, common_av, 0, "return", ";", 8, true_rtx);
+ fprintf (outf, " }\n");
}
- printf (" default:\n");
- printf (" gcc_unreachable ();\n");
- printf (" }\n");
+ fprintf (outf, " default:\n");
+ fprintf (outf, " gcc_unreachable ();\n");
+ fprintf (outf, " }\n");
}
- printf ("}\n\n");
+ fprintf (outf, "}\n\n");
}
/* This page contains miscellaneous utility routines. */
@@ -4499,29 +4523,29 @@ copy_rtx_unchanging (rtx orig)
number of delay slots is not a function of the length of the insn. */
static void
-write_const_num_delay_slots (void)
+write_const_num_delay_slots (FILE *outf)
{
struct attr_desc *attr = find_attr (&num_delay_slots_str, 0);
struct attr_value *av;
if (attr)
{
- printf ("int\nconst_num_delay_slots (rtx insn)\n");
- printf ("{\n");
- printf (" switch (recog_memoized (insn))\n");
- printf (" {\n");
+ fprintf (outf, "int\nconst_num_delay_slots (rtx insn)\n");
+ fprintf (outf, "{\n");
+ fprintf (outf, " switch (recog_memoized (insn))\n");
+ fprintf (outf, " {\n");
for (av = attr->first_value; av; av = av->next)
{
length_used = 0;
walk_attr_value (av->value);
if (length_used)
- write_insn_cases (av->first_insn, 4);
+ write_insn_cases (outf, av->first_insn, 4);
}
- printf (" default:\n");
- printf (" return 1;\n");
- printf (" }\n}\n\n");
+ fprintf (outf, " default:\n");
+ fprintf (outf, " return 1;\n");
+ fprintf (outf, " }\n}\n\n");
}
}
@@ -4697,7 +4721,10 @@ find_tune_attr (rtx exp)
}
}
-/* Create all of the attributes that describe automaton properties. */
+/* Create all of the attributes that describe automaton properties.
+ Write the DFA and latency function prototypes to the files that
+ need to have them, and write the init_sched_attrs(). */
+
static void
make_automaton_attrs (void)
{
@@ -4719,23 +4746,49 @@ make_automaton_attrs (void)
gcc_assert (tune_attr->is_const
&& !tune_attr->is_special
&& !tune_attr->is_numeric);
+
+ /* Write the prototypes for all DFA functions. */
+ for (val = tune_attr->first_value; val; val = val->next)
+ {
+ if (val == tune_attr->default_val)
+ continue;
+ gcc_assert (GET_CODE (val->value) == CONST_STRING);
+ fprintf (dfa_file,
+ "extern int internal_dfa_insn_code_%s (rtx);\n",
+ XSTR (val->value, 0));
+ }
+ fprintf (dfa_file, "\n");
+
+ /* Write the prototypes for all latency functions. */
for (val = tune_attr->first_value; val; val = val->next)
{
if (val == tune_attr->default_val)
continue;
gcc_assert (GET_CODE (val->value) == CONST_STRING);
- printf ("static int internal_dfa_insn_code_%s (rtx);\n"
- "static int insn_default_latency_%s (rtx);\n",
- XSTR (val->value, 0), XSTR (val->value, 0));
+ fprintf (latency_file,
+ "extern int insn_default_latency_%s (rtx);\n",
+ XSTR (val->value, 0));
}
+ fprintf (latency_file, "\n");
- printf ("\n");
- printf ("int (*internal_dfa_insn_code) (rtx);\n");
- printf ("int (*insn_default_latency) (rtx);\n");
- printf ("\n");
- printf ("void\n");
- printf ("init_sched_attrs (void)\n");
- printf ("{\n");
+ /* Write the prototypes for all automaton functions. */
+ for (val = tune_attr->first_value; val; val = val->next)
+ {
+ if (val == tune_attr->default_val)
+ continue;
+ gcc_assert (GET_CODE (val->value) == CONST_STRING);
+ fprintf (attr_file,
+ "extern int internal_dfa_insn_code_%s (rtx);\n"
+ "extern int insn_default_latency_%s (rtx);\n",
+ XSTR (val->value, 0), XSTR (val->value, 0));
+ }
+ fprintf (attr_file, "\n");
+ fprintf (attr_file, "int (*internal_dfa_insn_code) (rtx);\n");
+ fprintf (attr_file, "int (*insn_default_latency) (rtx);\n");
+ fprintf (attr_file, "\n");
+ fprintf (attr_file, "void\n");
+ fprintf (attr_file, "init_sched_attrs (void)\n");
+ fprintf (attr_file, "{\n");
for (val = tune_attr->first_value; val; val = val->next)
{
@@ -4804,27 +4857,27 @@ make_automaton_attrs (void)
if (first)
{
- printf (" if (");
+ fprintf (attr_file, " if (");
first = false;
}
else
- printf (" else if (");
- write_test_expr (test, 0, 0);
- printf (")\n");
- printf (" {\n");
- printf (" internal_dfa_insn_code\n");
- printf (" = internal_dfa_insn_code_%s;\n",
- XSTR (val->value, 0));
- printf (" insn_default_latency\n");
- printf (" = insn_default_latency_%s;\n",
- XSTR (val->value, 0));
- printf (" }\n");
+ fprintf (attr_file, " else if (");
+ write_test_expr (attr_file, test, 0, 0);
+ fprintf (attr_file, ")\n");
+ fprintf (attr_file, " {\n");
+ fprintf (attr_file, " internal_dfa_insn_code\n");
+ fprintf (attr_file, " = internal_dfa_insn_code_%s;\n",
+ XSTR (val->value, 0));
+ fprintf (attr_file, " insn_default_latency\n");
+ fprintf (attr_file, " = insn_default_latency_%s;\n",
+ XSTR (val->value, 0));
+ fprintf (attr_file, " }\n");
}
- printf (" else\n");
- printf (" gcc_unreachable ();\n");
- printf ("}\n");
- printf ("\n");
+ fprintf (attr_file, " else\n");
+ fprintf (attr_file, " gcc_unreachable ();\n");
+ fprintf (attr_file, "}\n");
+ fprintf (attr_file, "\n");
XDELETEVEC (condexps);
}
@@ -4874,7 +4927,62 @@ make_automaton_attrs (void)
}
}
- make_internal_attr ("*bypass_p", byps_exp, ATTR_NONE);
+ make_internal_attr ("*bypass_p", byps_exp, ATTR_NONE);
+}
+
+static void
+write_header (FILE *outf)
+{
+ fprintf (outf, "/* Generated automatically by the program `genattrtab'\n"
+ " from the machine description file `md'. */\n\n");
+
+ fprintf (outf, "#include \"config.h\"\n");
+ fprintf (outf, "#include \"system.h\"\n");
+ fprintf (outf, "#include \"coretypes.h\"\n");
+ fprintf (outf, "#include \"tm.h\"\n");
+ fprintf (outf, "#include \"rtl.h\"\n");
+ fprintf (outf, "#include \"insn-attr.h\"\n");
+ fprintf (outf, "#include \"tm_p.h\"\n");
+ fprintf (outf, "#include \"insn-config.h\"\n");
+ fprintf (outf, "#include \"recog.h\"\n");
+ fprintf (outf, "#include \"regs.h\"\n");
+ fprintf (outf, "#include \"real.h\"\n");
+ fprintf (outf, "#include \"output.h\"\n");
+ fprintf (outf, "#include \"toplev.h\"\n");
+ fprintf (outf, "#include \"flags.h\"\n");
+ fprintf (outf, "#include \"function.h\"\n");
+ fprintf (outf, "\n");
+ fprintf (outf, "#define operands recog_data.operand\n\n");
+}
+
+static FILE *
+open_outfile (const char *file_name)
+{
+ FILE *outf;
+ outf = fopen (file_name, "w");
+ if (! outf)
+ fatal ("cannot open file %s: %s", file_name, xstrerror (errno));
+ write_header (outf);
+ return outf;
+}
+
+static bool
+handle_arg (const char *arg)
+{
+ switch (arg[1])
+ {
+ case 'A':
+ attr_file_name = &arg[2];
+ return true;
+ case 'D':
+ dfa_file_name = &arg[2];
+ return true;
+ case 'L':
+ latency_file_name = &arg[2];
+ return true;
+ default:
+ return false;
+ }
}
int
@@ -4888,8 +4996,12 @@ main (int argc, char **argv)
progname = "genattrtab";
- if (!init_rtx_reader_args (argc, argv))
- return (FATAL_EXIT_CODE);
+ if (!init_rtx_reader_args_cb (argc, argv, handle_arg))
+ return FATAL_EXIT_CODE;
+
+ attr_file = open_outfile (attr_file_name);
+ dfa_file = open_outfile (dfa_file_name);
+ latency_file = open_outfile (latency_file_name);
obstack_init (hash_obstack);
obstack_init (temp_obstack);
@@ -4908,9 +5020,6 @@ main (int argc, char **argv)
delay_1_0_str = DEF_ATTR_STRING ("*delay_1_0");
num_delay_slots_str = DEF_ATTR_STRING ("*num_delay_slots");
- printf ("/* Generated automatically by the program `genattrtab'\n\
-from the machine description file `md'. */\n\n");
-
/* Read the machine description. */
while (1)
@@ -4970,23 +5079,6 @@ from the machine description file `md'. */\n\n");
if (num_delays)
expand_delays ();
- printf ("#include \"config.h\"\n");
- printf ("#include \"system.h\"\n");
- printf ("#include \"coretypes.h\"\n");
- printf ("#include \"tm.h\"\n");
- printf ("#include \"rtl.h\"\n");
- printf ("#include \"insn-attr.h\"\n");
- printf ("#include \"tm_p.h\"\n");
- printf ("#include \"insn-config.h\"\n");
- printf ("#include \"recog.h\"\n");
- printf ("#include \"regs.h\"\n");
- printf ("#include \"output.h\"\n");
- printf ("#include \"diagnostic-core.h\"\n");
- printf ("#include \"flags.h\"\n");
- printf ("#include \"function.h\"\n");
- printf ("\n");
- printf ("#define operands recog_data.operand\n\n");
-
/* Make `insn_alternatives'. */
insn_alternatives = oballocvec (int, insn_code_number);
for (id = defs; id; id = id->next)
@@ -5031,8 +5123,19 @@ from the machine description file `md'. */\n\n");
for (i = 0; i < MAX_ATTRS_INDEX; i++)
for (attr = attrs[i]; attr; attr = attr->next)
{
+ FILE *outf;
+
+#define IS_ATTR_GROUP(X) (!strncmp(attr->name,X,strlen(X)))
+ if (IS_ATTR_GROUP ("*internal_dfa_insn_code"))
+ outf = dfa_file;
+ else if (IS_ATTR_GROUP ("*insn_default_latency"))
+ outf = latency_file;
+ else
+ outf = attr_file;
+#undef IS_ATTR_GROUP
+
if (! attr->is_special && ! attr->is_const)
- write_attr_get (attr);
+ write_attr_get (outf, attr);
}
/* Write out delay eligibility information, if DEFINE_DELAY present.
@@ -5040,18 +5143,25 @@ from the machine description file `md'. */\n\n");
below.) */
if (num_delays)
{
- write_eligible_delay ("delay");
+ write_eligible_delay (attr_file, "delay");
if (have_annul_true)
- write_eligible_delay ("annul_true");
+ write_eligible_delay (attr_file, "annul_true");
if (have_annul_false)
- write_eligible_delay ("annul_false");
+ write_eligible_delay (attr_file, "annul_false");
}
/* Write out constant delay slot info. */
- write_const_num_delay_slots ();
+ write_const_num_delay_slots (attr_file);
- write_length_unit_log ();
+ write_length_unit_log (attr_file);
- fflush (stdout);
- return (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ if (fclose (attr_file) != 0)
+ fatal ("cannot close file %s: %s", attr_file_name, xstrerror (errno));
+ if (fclose (dfa_file) != 0)
+ fatal ("cannot close file %s: %s", dfa_file_name, xstrerror (errno));
+ if (fclose (latency_file) != 0)
+ fatal ("cannot close file %s: %s", latency_file_name, xstrerror (errno));
+
+ return SUCCESS_EXIT_CODE;
}
+
diff --git a/gcc/gengtype.c b/gcc/gengtype.c
index c0101b08ef9..d95ef09b5cc 100644
--- a/gcc/gengtype.c
+++ b/gcc/gengtype.c
@@ -3291,7 +3291,7 @@ write_types (outf_p output_header, type_p structures, type_p param_structs,
if (stru->u.s.line.file == NULL)
{
fprintf (stderr, "warning: structure `%s' used but not defined\n",
- s->u.s.tag);
+ stru->u.s.tag);
continue;
}
}
@@ -3527,7 +3527,7 @@ write_local (outf_p output_header, type_p structures, type_p param_structs)
if (stru->u.s.line.file == NULL)
{
fprintf (stderr, "warning: structure `%s' used but not defined\n",
- s->u.s.tag);
+ stru->u.s.tag);
continue;
}
diff --git a/gcc/genmultilib b/gcc/genmultilib
index 270de2b2a2c..dc4751b0439 100644
--- a/gcc/genmultilib
+++ b/gcc/genmultilib
@@ -1,6 +1,6 @@
#!/bin/sh
# Generates multilib.h.
-# Copyright (C) 1994, 1995, 1996, 1997, 1999, 2002, 2007
+# Copyright (C) 1994, 1995, 1996, 1997, 1999, 2002, 2007, 2012
# Free Software Foundation, Inc.
#This file is part of GCC.
@@ -73,6 +73,17 @@
# the os directory names are used exclusively. Use the mapping when
# there is no one-to-one equivalence between GCC levels and the OS.
+# The optional eighth argument which intends to reduce the effort to write
+# so many MULTILIB_EXCEPTIONS rules. This option defines a series of option
+# combinations that we actually required.
+# For some cases, the generated option combinations are far more than what
+# we need, we have to write a lot of rules to screen out combinations we
+# don't need. If we missed some rules, the unexpected libraries will be built.
+# Now with this argument, one can simply give what combinations are needed.
+# It is pretty straigtforward.
+# This argument can be used together with MULTILIB_EXCEPTIONS and will take
+# effect after the MULTILIB_EXCEPTIONS.
+
# The last option should be "yes" if multilibs are enabled. If it is not
# "yes", all GCC multilib dir names will be ".".
@@ -93,7 +104,7 @@
# genmultilib 'm64/m32 mno-app-regs|mcmodel=medany' '64 32 alt'
# 'mcmodel?medany=mcmodel?medmid' 'm32/mno-app-regs* m32/mcmodel=*'
# '' 'm32/!m64/mno-app-regs m32/!m64/mcmodel=medany'
-# '../lib64 ../lib32 alt' yes
+# '../lib64 ../lib32 alt' '' yes
# This produces:
# ". !m64 !m32 !mno-app-regs !mcmodel=medany;",
# "64:../lib64 m64 !m32 !mno-app-regs !mcmodel=medany;",
@@ -121,7 +132,8 @@ exceptions=$4
extra=$5
exclusions=$6
osdirnames=$7
-enable_multilib=$8
+multilib_required=$8
+enable_multilib=$9
echo "static const char *const multilib_raw[] = {"
@@ -195,6 +207,33 @@ EOF
combinations=`./tmpmultilib2 ${combinations}`
fi
+# If the MULTILIB_REQUIRED list are provided,
+# filter out combinations not in this list.
+if [ -n "${multilib_required}" ]; then
+ cat >tmpmultilib2 <<\EOF
+#!/bin/sh
+# This recursive script weeds out any combination of multilib
+# switches that not in the expected list.
+
+ for opt in $@; do
+ case "$opt" in
+EOF
+
+ for expect in ${multilib_required}; do
+ echo " /${expect}/) echo \${opt};;" >> tmpmultilib2
+ done
+
+cat >>tmpmultilib2 <<\EOF
+ *) ;;
+ esac
+ done
+EOF
+
+ chmod +x tmpmultilib2
+ combinations=`./tmpmultilib2 ${combinations}`
+
+fi
+
# Construct a sed pattern which will convert option names to directory
# names.
todirnames=
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 49505ee9494..55e7344043c 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -549,9 +549,8 @@ gimplify_and_update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
tree lhs;
gimple stmt, new_stmt;
gimple_stmt_iterator i;
- gimple_seq stmts = gimple_seq_alloc();
+ gimple_seq stmts = NULL;
struct gimplify_ctx gctx;
- gimple last;
gimple laststore;
tree reaching_vuse;
@@ -620,17 +619,9 @@ gimplify_and_update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
/* Second iterate over the statements forward, assigning virtual
operands to their uses. */
- last = NULL;
reaching_vuse = gimple_vuse (stmt);
for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i))
{
- /* Do not insert the last stmt in this loop but remember it
- for replacing the original statement. */
- if (last)
- {
- gsi_insert_before (si_p, last, GSI_NEW_STMT);
- gsi_next (si_p);
- }
new_stmt = gsi_stmt (i);
/* The replacement can expose previously unreferenced variables. */
if (gimple_in_ssa_p (cfun))
@@ -642,7 +633,6 @@ gimplify_and_update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
gimple_set_modified (new_stmt, true);
if (gimple_vdef (new_stmt))
reaching_vuse = gimple_vdef (new_stmt);
- last = new_stmt;
}
/* If the new sequence does not do a store release the virtual
@@ -659,8 +649,8 @@ gimplify_and_update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
}
}
- /* Finally replace rhe original statement with the last. */
- gsi_replace (si_p, last, false);
+ /* Finally replace the original statement with the sequence. */
+ gsi_replace_with_seq (si_p, stmts, false);
}
/* Return the string length, maximum string length or maximum value of
@@ -2755,7 +2745,7 @@ fold_array_ctor_reference (tree type, tree ctor,
double_int low_bound, elt_size;
double_int index, max_index;
double_int access_index;
- tree domain_type = NULL_TREE;
+ tree domain_type = NULL_TREE, index_type = NULL_TREE;
HOST_WIDE_INT inner_offset;
/* Compute low bound and elt size. */
@@ -2765,6 +2755,7 @@ fold_array_ctor_reference (tree type, tree ctor,
{
/* Static constructors for variably sized objects makes no sense. */
gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type)) == INTEGER_CST);
+ index_type = TREE_TYPE (TYPE_MIN_VALUE (domain_type));
low_bound = tree_to_double_int (TYPE_MIN_VALUE (domain_type));
}
else
@@ -2788,6 +2779,10 @@ fold_array_ctor_reference (tree type, tree ctor,
access_index = double_int_udiv (uhwi_to_double_int (offset / BITS_PER_UNIT),
elt_size, TRUNC_DIV_EXPR);
access_index = double_int_add (access_index, low_bound);
+ if (index_type)
+ access_index = double_int_ext (access_index,
+ TYPE_PRECISION (index_type),
+ TYPE_UNSIGNED (index_type));
/* And offset within the access. */
inner_offset = offset % (double_int_to_uhwi (elt_size) * BITS_PER_UNIT);
@@ -2798,6 +2793,11 @@ fold_array_ctor_reference (tree type, tree ctor,
return NULL_TREE;
index = double_int_sub (low_bound, double_int_one);
+ if (index_type)
+ index = double_int_ext (index,
+ TYPE_PRECISION (index_type),
+ TYPE_UNSIGNED (index_type));
+
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
{
/* Array constructor might explicitely set index, or specify range
@@ -2815,7 +2815,14 @@ fold_array_ctor_reference (tree type, tree ctor,
}
}
else
- max_index = index = double_int_add (index, double_int_one);
+ {
+ index = double_int_add (index, double_int_one);
+ if (index_type)
+ index = double_int_ext (index,
+ TYPE_PRECISION (index_type),
+ TYPE_UNSIGNED (index_type));
+ max_index = index;
+ }
/* Do we have match? */
if (double_int_cmp (access_index, index, 1) >= 0
@@ -2970,18 +2977,23 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree))
if (TREE_CODE (TREE_OPERAND (t, 1)) == SSA_NAME
&& valueize
&& (idx = (*valueize) (TREE_OPERAND (t, 1)))
- && host_integerp (idx, 0))
+ && TREE_CODE (idx) == INTEGER_CST)
{
tree low_bound, unit_size;
+ double_int doffset;
/* If the resulting bit-offset is constant, track it. */
if ((low_bound = array_ref_low_bound (t),
- host_integerp (low_bound, 0))
+ TREE_CODE (low_bound) == INTEGER_CST)
&& (unit_size = array_ref_element_size (t),
- host_integerp (unit_size, 1)))
+ host_integerp (unit_size, 1))
+ && (doffset = double_int_sext
+ (double_int_sub (TREE_INT_CST (idx),
+ TREE_INT_CST (low_bound)),
+ TYPE_PRECISION (TREE_TYPE (idx))),
+ double_int_fits_in_shwi_p (doffset)))
{
- offset = TREE_INT_CST_LOW (idx);
- offset -= TREE_INT_CST_LOW (low_bound);
+ offset = double_int_to_shwi (doffset);
offset *= TREE_INT_CST_LOW (unit_size);
offset *= BITS_PER_UNIT;
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index e387c16e31a..0f1d4975961 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -57,12 +57,17 @@ update_modified_stmts (gimple_seq seq)
starting at FIRST and LAST. */
static void
-update_bb_for_stmts (gimple_seq_node first, basic_block bb)
+update_bb_for_stmts (gimple_seq_node first, gimple_seq_node last,
+ basic_block bb)
{
gimple_seq_node n;
- for (n = first; n; n = n->next)
- gimple_set_bb (n->stmt, bb);
+ for (n = first; n; n = n->gsbase.next)
+ {
+ gimple_set_bb (n, bb);
+ if (n == last)
+ break;
+ }
}
/* Set the frequencies for the cgraph_edges for each of the calls
@@ -75,8 +80,8 @@ update_call_edge_frequencies (gimple_seq_node first, basic_block bb)
int bb_freq = 0;
gimple_seq_node n;
- for (n = first; n ; n = n->next)
- if (is_gimple_call (n->stmt))
+ for (n = first; n ; n = n->gsbase.next)
+ if (is_gimple_call (n))
{
struct cgraph_edge *e;
@@ -89,7 +94,7 @@ update_call_edge_frequencies (gimple_seq_node first, basic_block bb)
(current_function_decl, bb));
}
- e = cgraph_edge (cfun_node, n->stmt);
+ e = cgraph_edge (cfun_node, n);
if (e != NULL)
e->frequency = bb_freq;
}
@@ -113,32 +118,37 @@ gsi_insert_seq_nodes_before (gimple_stmt_iterator *i,
basic_block bb;
gimple_seq_node cur = i->ptr;
+ gcc_assert (!cur || cur->gsbase.prev);
+
if ((bb = gsi_bb (*i)) != NULL)
- update_bb_for_stmts (first, bb);
+ update_bb_for_stmts (first, last, bb);
/* Link SEQ before CUR in the sequence. */
if (cur)
{
- first->prev = cur->prev;
- if (first->prev)
- first->prev->next = first;
+ first->gsbase.prev = cur->gsbase.prev;
+ if (first->gsbase.prev->gsbase.next)
+ first->gsbase.prev->gsbase.next = first;
else
gimple_seq_set_first (i->seq, first);
- last->next = cur;
- cur->prev = last;
+ last->gsbase.next = cur;
+ cur->gsbase.prev = last;
}
else
{
- gimple_seq_node itlast = gimple_seq_last (i->seq);
+ gimple_seq_node itlast = gimple_seq_last (*i->seq);
/* If CUR is NULL, we link at the end of the sequence (this case happens
when gsi_after_labels is called for a basic block that contains only
labels, so it returns an iterator after the end of the block, and
we need to insert before it; it might be cleaner to add a flag to the
iterator saying whether we are at the start or end of the list). */
- first->prev = itlast;
+ last->gsbase.next = NULL;
if (itlast)
- itlast->next = first;
+ {
+ first->gsbase.prev = itlast;
+ itlast->gsbase.next = first;
+ }
else
gimple_seq_set_first (i->seq, first);
gimple_seq_set_last (i->seq, last);
@@ -178,15 +188,11 @@ gsi_insert_seq_before_without_update (gimple_stmt_iterator *i, gimple_seq seq,
return;
/* Don't allow inserting a sequence into itself. */
- gcc_assert (seq != i->seq);
+ gcc_assert (seq != *i->seq);
first = gimple_seq_first (seq);
last = gimple_seq_last (seq);
- gimple_seq_set_first (seq, NULL);
- gimple_seq_set_last (seq, NULL);
- gimple_seq_free (seq);
-
/* Empty sequences need no work. */
if (!first || !last)
{
@@ -230,25 +236,30 @@ gsi_insert_seq_nodes_after (gimple_stmt_iterator *i,
basic_block bb;
gimple_seq_node cur = i->ptr;
+ gcc_assert (!cur || cur->gsbase.prev);
+
/* If the iterator is inside a basic block, we need to update the
basic block information for all the nodes between FIRST and LAST. */
if ((bb = gsi_bb (*i)) != NULL)
- update_bb_for_stmts (first, bb);
+ update_bb_for_stmts (first, last, bb);
/* Link SEQ after CUR. */
if (cur)
{
- last->next = cur->next;
- if (last->next)
- last->next->prev = last;
+ last->gsbase.next = cur->gsbase.next;
+ if (last->gsbase.next)
+ {
+ last->gsbase.next->gsbase.prev = last;
+ }
else
gimple_seq_set_last (i->seq, last);
- first->prev = cur;
- cur->next = first;
+ first->gsbase.prev = cur;
+ cur->gsbase.next = first;
}
else
{
- gcc_assert (!gimple_seq_last (i->seq));
+ gcc_assert (!gimple_seq_last (*i->seq));
+ last->gsbase.next = NULL;
gimple_seq_set_first (i->seq, first);
gimple_seq_set_last (i->seq, last);
}
@@ -289,15 +300,11 @@ gsi_insert_seq_after_without_update (gimple_stmt_iterator *i, gimple_seq seq,
return;
/* Don't allow inserting a sequence into itself. */
- gcc_assert (seq != i->seq);
+ gcc_assert (seq != *i->seq);
first = gimple_seq_first (seq);
last = gimple_seq_last (seq);
- gimple_seq_set_first (seq, NULL);
- gimple_seq_set_last (seq, NULL);
- gimple_seq_free (seq);
-
/* Empty sequences need no work. */
if (!first || !last)
{
@@ -329,59 +336,81 @@ gimple_seq
gsi_split_seq_after (gimple_stmt_iterator i)
{
gimple_seq_node cur, next;
- gimple_seq old_seq, new_seq;
+ gimple_seq *pold_seq, new_seq;
cur = i.ptr;
/* How can we possibly split after the end, or before the beginning? */
- gcc_assert (cur && cur->next);
- next = cur->next;
+ gcc_assert (cur && cur->gsbase.next);
+ next = cur->gsbase.next;
- old_seq = i.seq;
- new_seq = gimple_seq_alloc ();
+ pold_seq = i.seq;
- gimple_seq_set_first (new_seq, next);
- gimple_seq_set_last (new_seq, gimple_seq_last (old_seq));
- gimple_seq_set_last (old_seq, cur);
- cur->next = NULL;
- next->prev = NULL;
+ gimple_seq_set_first (&new_seq, next);
+ gimple_seq_set_last (&new_seq, gimple_seq_last (*pold_seq));
+ gimple_seq_set_last (pold_seq, cur);
+ cur->gsbase.next = NULL;
return new_seq;
}
+/* Set the statement to which GSI points to STMT. This only updates
+ the iterator and the gimple sequence, it doesn't do the bookkeeping
+ of gsi_replace. */
+
+void
+gsi_set_stmt (gimple_stmt_iterator *gsi, gimple stmt)
+{
+ gimple orig_stmt = gsi_stmt (*gsi);
+ gimple prev, next;
+
+ stmt->gsbase.next = next = orig_stmt->gsbase.next;
+ stmt->gsbase.prev = prev = orig_stmt->gsbase.prev;
+ /* Note how we don't clear next/prev of orig_stmt. This is so that
+ copies of *GSI our callers might still hold (to orig_stmt)
+ can be advanced as if they too were replaced. */
+ if (prev->gsbase.next)
+ prev->gsbase.next = stmt;
+ else
+ gimple_seq_set_first (gsi->seq, stmt);
+ if (next)
+ next->gsbase.prev = stmt;
+ else
+ gimple_seq_set_last (gsi->seq, stmt);
+
+ gsi->ptr = stmt;
+}
+
+
/* Move all statements in the sequence before I to a new sequence.
Return this new sequence. I is set to the head of the new list. */
-gimple_seq
-gsi_split_seq_before (gimple_stmt_iterator *i)
+void
+gsi_split_seq_before (gimple_stmt_iterator *i, gimple_seq *pnew_seq)
{
gimple_seq_node cur, prev;
- gimple_seq old_seq, new_seq;
+ gimple_seq old_seq;
cur = i->ptr;
/* How can we possibly split after the end? */
gcc_assert (cur);
- prev = cur->prev;
+ prev = cur->gsbase.prev;
- old_seq = i->seq;
- new_seq = gimple_seq_alloc ();
- i->seq = new_seq;
+ old_seq = *i->seq;
+ if (!prev->gsbase.next)
+ *i->seq = NULL;
+ i->seq = pnew_seq;
/* Set the limits on NEW_SEQ. */
- gimple_seq_set_first (new_seq, cur);
- gimple_seq_set_last (new_seq, gimple_seq_last (old_seq));
+ gimple_seq_set_first (pnew_seq, cur);
+ gimple_seq_set_last (pnew_seq, gimple_seq_last (old_seq));
/* Cut OLD_SEQ before I. */
- gimple_seq_set_last (old_seq, prev);
- cur->prev = NULL;
- if (prev)
- prev->next = NULL;
- else
- gimple_seq_set_first (old_seq, NULL);
-
- return new_seq;
+ gimple_seq_set_last (&old_seq, prev);
+ if (prev->gsbase.next)
+ prev->gsbase.next = NULL;
}
@@ -416,12 +445,38 @@ gsi_replace (gimple_stmt_iterator *gsi, gimple stmt, bool update_eh_info)
gimple_remove_stmt_histograms (cfun, orig_stmt);
delink_stmt_imm_use (orig_stmt);
- *gsi_stmt_ptr (gsi) = stmt;
+ gsi_set_stmt (gsi, stmt);
gimple_set_modified (stmt, true);
update_modified_stmt (stmt);
}
+/* Replace the statement pointed-to by GSI with the sequence SEQ.
+ If UPDATE_EH_INFO is true, the exception handling information of
+ the original statement is moved to the last statement of the new
+ sequence. If the old statement is an assignment, then so must
+ be the last statement of the new sequence, and they must have the
+ same LHS. */
+
+void
+gsi_replace_with_seq (gimple_stmt_iterator *gsi, gimple_seq seq,
+ bool update_eh_info)
+{
+ gimple_stmt_iterator seqi;
+ gimple last;
+ if (gimple_seq_empty_p (seq))
+ {
+ gsi_remove (gsi, true);
+ return;
+ }
+ seqi = gsi_last (seq);
+ last = gsi_stmt (seqi);
+ gsi_remove (&seqi, false);
+ gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
+ gsi_replace (gsi, last, update_eh_info);
+}
+
+
/* Insert statement STMT before the statement pointed-to by iterator I.
M specifies how to update iterator I after insertion (see enum
gsi_iterator_update).
@@ -435,12 +490,7 @@ void
gsi_insert_before_without_update (gimple_stmt_iterator *i, gimple stmt,
enum gsi_iterator_update m)
{
- gimple_seq_node n;
-
- n = ggc_alloc_gimple_seq_node_d ();
- n->prev = n->next = NULL;
- n->stmt = stmt;
- gsi_insert_seq_nodes_before (i, n, n, m);
+ gsi_insert_seq_nodes_before (i, stmt, stmt, m);
}
/* Insert statement STMT before the statement pointed-to by iterator I.
@@ -470,12 +520,7 @@ void
gsi_insert_after_without_update (gimple_stmt_iterator *i, gimple stmt,
enum gsi_iterator_update m)
{
- gimple_seq_node n;
-
- n = ggc_alloc_gimple_seq_node_d ();
- n->prev = n->next = NULL;
- n->stmt = stmt;
- gsi_insert_seq_nodes_after (i, n, n, m);
+ gsi_insert_seq_nodes_after (i, stmt, stmt, m);
}
@@ -525,19 +570,24 @@ gsi_remove (gimple_stmt_iterator *i, bool remove_permanently)
/* Update the iterator and re-wire the links in I->SEQ. */
cur = i->ptr;
- next = cur->next;
- prev = cur->prev;
-
- if (prev)
- prev->next = next;
- else
- gimple_seq_set_first (i->seq, next);
+ next = cur->gsbase.next;
+ prev = cur->gsbase.prev;
+ /* See gsi_set_stmt for why we don't reset prev/next of STMT. */
if (next)
- next->prev = prev;
- else
+ /* Cur is not last. */
+ next->gsbase.prev = prev;
+ else if (prev->gsbase.next)
+ /* Cur is last but not first. */
gimple_seq_set_last (i->seq, prev);
+ if (prev->gsbase.next)
+ /* Cur is not first. */
+ prev->gsbase.next = next;
+ else
+ /* Cur is first. */
+ *i->seq = next;
+
i->ptr = next;
return require_eh_edge_purge;
@@ -557,11 +607,8 @@ gsi_for_stmt (gimple stmt)
else
i = gsi_start_bb (bb);
- for (; !gsi_end_p (i); gsi_next (&i))
- if (gsi_stmt (i) == stmt)
- return i;
-
- gcc_unreachable ();
+ i.ptr = stmt;
+ return i;
}
@@ -727,7 +774,6 @@ basic_block
gsi_insert_on_edge_immediate (edge e, gimple stmt)
{
gimple_stmt_iterator gsi;
- struct gimple_seq_node_d node;
basic_block new_bb = NULL;
bool ins_after;
@@ -735,9 +781,7 @@ gsi_insert_on_edge_immediate (edge e, gimple stmt)
ins_after = gimple_find_edge_insert_loc (e, &gsi, &new_bb);
- node.stmt = stmt;
- node.prev = node.next = NULL;
- update_call_edge_frequencies (&node, gsi.bb);
+ update_call_edge_frequencies (stmt, gsi.bb);
if (ins_after)
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
@@ -820,5 +864,6 @@ gsi_commit_one_edge_insert (edge e, basic_block *new_bb)
gimple_stmt_iterator
gsi_start_phis (basic_block bb)
{
- return gsi_start (phi_nodes (bb));
+ gimple_seq *pseq = phi_nodes_ptr (bb);
+ return gsi_start_1 (pseq);
}
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index 4a1ae0bfa9a..04d4275f75b 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -112,10 +112,6 @@ lower_function_body (void)
i = gsi_start (lowered_body);
lower_gimple_bind (&i, &data);
- /* Once the old body has been lowered, replace it with the new
- lowered sequence. */
- gimple_set_body (current_function_decl, lowered_body);
-
i = gsi_last (lowered_body);
/* If the function falls off the end, we need a null return statement.
@@ -179,6 +175,10 @@ lower_function_body (void)
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
}
+ /* Once the old body has been lowered, replace it with the new
+ lowered sequence. */
+ gimple_set_body (current_function_decl, lowered_body);
+
gcc_assert (data.block == DECL_INITIAL (current_function_decl));
BLOCK_SUBBLOCKS (data.block)
= blocks_nreverse (BLOCK_SUBBLOCKS (data.block));
@@ -305,11 +305,11 @@ gimple_check_call_matching_types (gimple call_stmt, tree callee)
do it explicitly. DATA is passed through the recursion. */
static void
-lower_sequence (gimple_seq seq, struct lower_data *data)
+lower_sequence (gimple_seq *seq, struct lower_data *data)
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start (seq); !gsi_end_p (gsi); )
+ for (gsi = gsi_start (*seq); !gsi_end_p (gsi); )
lower_stmt (&gsi, data);
}
@@ -324,11 +324,10 @@ lower_omp_directive (gimple_stmt_iterator *gsi, struct lower_data *data)
stmt = gsi_stmt (*gsi);
- lower_sequence (gimple_omp_body (stmt), data);
- gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
- gsi_insert_seq_before (gsi, gimple_omp_body (stmt), GSI_SAME_STMT);
+ lower_sequence (gimple_omp_body_ptr (stmt), data);
+ gsi_insert_seq_after (gsi, gimple_omp_body (stmt), GSI_CONTINUE_LINKING);
gimple_omp_set_body (stmt, NULL);
- gsi_remove (gsi, false);
+ gsi_next (gsi);
}
@@ -376,10 +375,10 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
case GIMPLE_TRY:
{
bool try_cannot_fallthru;
- lower_sequence (gimple_try_eval (stmt), data);
+ lower_sequence (gimple_try_eval_ptr (stmt), data);
try_cannot_fallthru = data->cannot_fallthru;
data->cannot_fallthru = false;
- lower_sequence (gimple_try_cleanup (stmt), data);
+ lower_sequence (gimple_try_cleanup_ptr (stmt), data);
/* See gimple_stmt_may_fallthru for the rationale. */
if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
{
@@ -392,17 +391,17 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
case GIMPLE_CATCH:
data->cannot_fallthru = false;
- lower_sequence (gimple_catch_handler (stmt), data);
+ lower_sequence (gimple_catch_handler_ptr (stmt), data);
break;
case GIMPLE_EH_FILTER:
data->cannot_fallthru = false;
- lower_sequence (gimple_eh_filter_failure (stmt), data);
+ lower_sequence (gimple_eh_filter_failure_ptr (stmt), data);
break;
case GIMPLE_EH_ELSE:
- lower_sequence (gimple_eh_else_n_body (stmt), data);
- lower_sequence (gimple_eh_else_e_body (stmt), data);
+ lower_sequence (gimple_eh_else_n_body_ptr (stmt), data);
+ lower_sequence (gimple_eh_else_e_body_ptr (stmt), data);
break;
case GIMPLE_NOP:
@@ -456,7 +455,7 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
return;
case GIMPLE_TRANSACTION:
- lower_sequence (gimple_transaction_body (stmt), data);
+ lower_sequence (gimple_transaction_body_ptr (stmt), data);
break;
default:
@@ -505,7 +504,7 @@ lower_gimple_bind (gimple_stmt_iterator *gsi, struct lower_data *data)
}
record_vars (gimple_bind_vars (stmt));
- lower_sequence (gimple_bind_body (stmt), data);
+ lower_sequence (gimple_bind_body_ptr (stmt), data);
if (new_block)
{
@@ -585,7 +584,7 @@ gimple_try_catch_may_fallthru (gimple stmt)
if (gimple_seq_may_fallthru (gimple_try_eval (stmt)))
return true;
- i = gsi_start (gimple_try_cleanup (stmt));
+ i = gsi_start (*gimple_try_cleanup_ptr (stmt));
switch (gimple_code (gsi_stmt (i)))
{
case GIMPLE_CATCH:
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 7b29aa6c17e..2e3cb0ca872 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -1590,14 +1590,14 @@ dump_gimple_phi (pretty_printer *buffer, gimple phi, int spc, int flags)
&& POINTER_TYPE_P (TREE_TYPE (lhs))
&& SSA_NAME_PTR_INFO (lhs))
{
+ unsigned int align, misalign;
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
pp_string (buffer, "PT = ");
pp_points_to_solution (buffer, &pi->pt);
newline_and_indent (buffer, spc);
- if (pi->align != 1)
+ if (get_ptr_info_alignment (pi, &align, &misalign))
{
- pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u",
- pi->align, pi->misalign);
+ pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u", align, misalign);
newline_and_indent (buffer, spc);
}
pp_string (buffer, "# ");
@@ -1889,14 +1889,15 @@ dump_gimple_stmt (pretty_printer *buffer, gimple gs, int spc, int flags)
&& POINTER_TYPE_P (TREE_TYPE (lhs))
&& SSA_NAME_PTR_INFO (lhs))
{
+ unsigned int align, misalign;
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
pp_string (buffer, "# PT = ");
pp_points_to_solution (buffer, &pi->pt);
newline_and_indent (buffer, spc);
- if (pi->align != 1)
+ if (get_ptr_info_alignment (pi, &align, &misalign))
{
pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u",
- pi->align, pi->misalign);
+ align, misalign);
newline_and_indent (buffer, spc);
}
}
diff --git a/gcc/gimple.c b/gcc/gimple.c
index 95d79dc1e02..79da12cde51 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -90,16 +90,11 @@ static const char * const gimple_alloc_kind_names[] = {
"assignments",
"phi nodes",
"conditionals",
- "sequences",
"everything else"
};
#endif /* GATHER_STATISTICS */
-/* A cache of gimple_seq objects. Sequences are created and destroyed
- fairly often during gimplification. */
-static GTY ((deletable)) struct gimple_seq_d *gimple_seq_cache;
-
/* Private API manipulation functions shared only with some
other files. */
extern void gimple_set_stored_syms (gimple, bitmap, bitmap_obstack *);
@@ -154,6 +149,7 @@ gimple_alloc_stat (enum gimple_code code, unsigned num_ops MEM_STAT_DECL)
/* Do not call gimple_set_modified here as it has other side
effects and this tuple is still not completely built. */
stmt->gsbase.modified = 1;
+ gimple_init_singleton (stmt);
return stmt;
}
@@ -1201,53 +1197,6 @@ gimple_check_failed (const_gimple gs, const char *file, int line,
#endif /* ENABLE_GIMPLE_CHECKING */
-/* Allocate a new GIMPLE sequence in GC memory and return it. If
- there are free sequences in GIMPLE_SEQ_CACHE return one of those
- instead. */
-
-gimple_seq
-gimple_seq_alloc (void)
-{
- gimple_seq seq = gimple_seq_cache;
- if (seq)
- {
- gimple_seq_cache = gimple_seq_cache->next_free;
- gcc_assert (gimple_seq_cache != seq);
- memset (seq, 0, sizeof (*seq));
- }
- else
- {
- seq = ggc_alloc_cleared_gimple_seq_d ();
-#ifdef GATHER_STATISTICS
- gimple_alloc_counts[(int) gimple_alloc_kind_seq]++;
- gimple_alloc_sizes[(int) gimple_alloc_kind_seq] += sizeof (*seq);
-#endif
- }
-
- return seq;
-}
-
-/* Return SEQ to the free pool of GIMPLE sequences. */
-
-void
-gimple_seq_free (gimple_seq seq)
-{
- if (seq == NULL)
- return;
-
- gcc_assert (gimple_seq_first (seq) == NULL);
- gcc_assert (gimple_seq_last (seq) == NULL);
-
- /* If this triggers, it's a sign that the same list is being freed
- twice. */
- gcc_assert (seq != gimple_seq_cache || gimple_seq_cache == NULL);
-
- /* Add SEQ to the pool of free sequences. */
- seq->next_free = gimple_seq_cache;
- gimple_seq_cache = seq;
-}
-
-
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. */
@@ -1255,13 +1204,9 @@ void
gimple_seq_add_stmt (gimple_seq *seq_p, gimple gs)
{
gimple_stmt_iterator si;
-
if (gs == NULL)
return;
- if (*seq_p == NULL)
- *seq_p = gimple_seq_alloc ();
-
si = gsi_last (*seq_p);
gsi_insert_after (&si, gs, GSI_NEW_STMT);
}
@@ -1274,13 +1219,9 @@ void
gimple_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
-
if (src == NULL)
return;
- if (*dst_p == NULL)
- *dst_p = gimple_seq_alloc ();
-
si = gsi_last (*dst_p);
gsi_insert_seq_after (&si, src, GSI_NEW_STMT);
}
@@ -1324,7 +1265,7 @@ gimple_seq
gimple_seq_copy (gimple_seq src)
{
gimple_stmt_iterator gsi;
- gimple_seq new_seq = gimple_seq_alloc ();
+ gimple_seq new_seq = NULL;
gimple stmt;
for (gsi = gsi_start (src); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -1337,7 +1278,7 @@ gimple_seq_copy (gimple_seq src)
}
-/* Walk all the statements in the sequence SEQ calling walk_gimple_stmt
+/* Walk all the statements in the sequence *PSEQ calling walk_gimple_stmt
on each one. WI is as in walk_gimple_stmt.
If walk_gimple_stmt returns non-NULL, the walk is stopped, and the
@@ -1349,12 +1290,12 @@ gimple_seq_copy (gimple_seq src)
Otherwise, all the statements are walked and NULL returned. */
gimple
-walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
- walk_tree_fn callback_op, struct walk_stmt_info *wi)
+walk_gimple_seq_mod (gimple_seq *pseq, walk_stmt_fn callback_stmt,
+ walk_tree_fn callback_op, struct walk_stmt_info *wi)
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start (seq); !gsi_end_p (gsi); )
+ for (gsi = gsi_start (*pseq); !gsi_end_p (gsi); )
{
tree ret = walk_gimple_stmt (&gsi, callback_stmt, callback_op, wi);
if (ret)
@@ -1378,6 +1319,20 @@ walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
}
+/* Like walk_gimple_seq_mod, but ensure that the head of SEQ isn't
+ changed by the callbacks. */
+
+gimple
+walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
+ walk_tree_fn callback_op, struct walk_stmt_info *wi)
+{
+ gimple_seq seq2 = seq;
+ gimple ret = walk_gimple_seq_mod (&seq2, callback_stmt, callback_op, wi);
+ gcc_assert (seq2 == seq);
+ return ret;
+}
+
+
/* Helper function for walk_gimple_stmt. Walk operands of a GIMPLE_ASM. */
static tree
@@ -1808,51 +1763,51 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
switch (gimple_code (stmt))
{
case GIMPLE_BIND:
- ret = walk_gimple_seq (gimple_bind_body (stmt), callback_stmt,
- callback_op, wi);
+ ret = walk_gimple_seq_mod (gimple_bind_body_ptr (stmt), callback_stmt,
+ callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_CATCH:
- ret = walk_gimple_seq (gimple_catch_handler (stmt), callback_stmt,
- callback_op, wi);
+ ret = walk_gimple_seq_mod (gimple_catch_handler_ptr (stmt), callback_stmt,
+ callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_EH_FILTER:
- ret = walk_gimple_seq (gimple_eh_filter_failure (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_eh_filter_failure_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_EH_ELSE:
- ret = walk_gimple_seq (gimple_eh_else_n_body (stmt),
+ ret = walk_gimple_seq_mod (gimple_eh_else_n_body_ptr (stmt),
callback_stmt, callback_op, wi);
if (ret)
return wi->callback_result;
- ret = walk_gimple_seq (gimple_eh_else_e_body (stmt),
+ ret = walk_gimple_seq_mod (gimple_eh_else_e_body_ptr (stmt),
callback_stmt, callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_TRY:
- ret = walk_gimple_seq (gimple_try_eval (stmt), callback_stmt, callback_op,
+ ret = walk_gimple_seq_mod (gimple_try_eval_ptr (stmt), callback_stmt, callback_op,
wi);
if (ret)
return wi->callback_result;
- ret = walk_gimple_seq (gimple_try_cleanup (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_try_cleanup_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_OMP_FOR:
- ret = walk_gimple_seq (gimple_omp_for_pre_body (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
@@ -1866,21 +1821,21 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
- ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_WITH_CLEANUP_EXPR:
- ret = walk_gimple_seq (gimple_wce_cleanup (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_wce_cleanup_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_TRANSACTION:
- ret = walk_gimple_seq (gimple_transaction_body (stmt),
+ ret = walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
callback_stmt, callback_op, wi);
if (ret)
return wi->callback_result;
@@ -2181,6 +2136,7 @@ gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *gsi, enum tree_code code
tree lhs = gimple_assign_lhs (stmt);
gimple new_stmt = gimple_alloc (gimple_code (stmt), new_rhs_ops + 1);
memcpy (new_stmt, stmt, gimple_size (gimple_code (stmt)));
+ gimple_init_singleton (new_stmt);
gsi_replace (gsi, new_stmt, true);
stmt = new_stmt;
@@ -2270,7 +2226,8 @@ gimple_replace_lhs (gimple stmt, tree nlhs)
/* Return a deep copy of statement STMT. All the operands from STMT
are reallocated and copied using unshare_expr. The DEF, USE, VDEF
- and VUSE operand arrays are set to empty in the new copy. */
+ and VUSE operand arrays are set to empty in the new copy. The new
+ copy isn't part of any sequence. */
gimple
gimple_copy (gimple stmt)
@@ -2282,6 +2239,7 @@ gimple_copy (gimple stmt)
/* Shallow copy all the fields from STMT. */
memcpy (copy, stmt, gimple_size (code));
+ gimple_init_singleton (copy);
/* If STMT has sub-statements, deep-copy them as well. */
if (gimple_has_substatements (stmt))
@@ -3368,8 +3326,7 @@ gtc_visit (tree t1, tree t2,
return false;
if (TREE_CODE (t1) == INTEGER_TYPE
- && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2)
- || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)))
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
return false;
/* That's all we need to check for float and fixed-point types. */
@@ -3798,8 +3755,7 @@ gimple_types_compatible_p (tree t1, tree t2)
return false;
if (TREE_CODE (t1) == INTEGER_TYPE
- && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2)
- || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)))
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
return false;
/* That's all we need to check for float and fixed-point types. */
@@ -4248,10 +4204,7 @@ iterative_hash_canonical_type (tree type, hashval_t val)
/* For integer types hash the types min/max values and the string flag. */
if (TREE_CODE (type) == INTEGER_TYPE)
- {
- v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
- v = iterative_hash_hashval_t (TYPE_IS_SIZETYPE (type), v);
- }
+ v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
/* For array types hash their domain and the string flag. */
if (TREE_CODE (type) == ARRAY_TYPE
@@ -4467,8 +4420,7 @@ gimple_canonical_types_compatible_p (tree t1, tree t2)
return false;
if (TREE_CODE (t1) == INTEGER_TYPE
- && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2)
- || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)))
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
return false;
/* For canonical type comparisons we do not want to build SCCs
diff --git a/gcc/gimple.h b/gcc/gimple.h
index c3e07983d73..80271a1e358 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -32,9 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-alias.h"
#include "internal-fn.h"
-struct gimple_seq_node_d;
-typedef struct gimple_seq_node_d *gimple_seq_node;
-typedef const struct gimple_seq_node_d *const_gimple_seq_node;
+typedef gimple gimple_seq_node;
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
@@ -133,130 +131,6 @@ enum plf_mask {
GF_PLF_2 = 1 << 1
};
-/* A node in a gimple_seq_d. */
-struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d {
- gimple stmt;
- struct gimple_seq_node_d *prev;
- struct gimple_seq_node_d *next;
-};
-
-/* A double-linked sequence of gimple statements. */
-struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d {
- /* First and last statements in the sequence. */
- gimple_seq_node first;
- gimple_seq_node last;
-
- /* Sequences are created/destroyed frequently. To minimize
- allocation activity, deallocated sequences are kept in a pool of
- available sequences. This is the pointer to the next free
- sequence in the pool. */
- gimple_seq next_free;
-};
-
-
-/* Return the first node in GIMPLE sequence S. */
-
-static inline gimple_seq_node
-gimple_seq_first (const_gimple_seq s)
-{
- return s ? s->first : NULL;
-}
-
-
-/* Return the first statement in GIMPLE sequence S. */
-
-static inline gimple
-gimple_seq_first_stmt (const_gimple_seq s)
-{
- gimple_seq_node n = gimple_seq_first (s);
- return (n) ? n->stmt : NULL;
-}
-
-
-/* Return the last node in GIMPLE sequence S. */
-
-static inline gimple_seq_node
-gimple_seq_last (const_gimple_seq s)
-{
- return s ? s->last : NULL;
-}
-
-
-/* Return the last statement in GIMPLE sequence S. */
-
-static inline gimple
-gimple_seq_last_stmt (const_gimple_seq s)
-{
- gimple_seq_node n = gimple_seq_last (s);
- return (n) ? n->stmt : NULL;
-}
-
-
-/* Set the last node in GIMPLE sequence S to LAST. */
-
-static inline void
-gimple_seq_set_last (gimple_seq s, gimple_seq_node last)
-{
- s->last = last;
-}
-
-
-/* Set the first node in GIMPLE sequence S to FIRST. */
-
-static inline void
-gimple_seq_set_first (gimple_seq s, gimple_seq_node first)
-{
- s->first = first;
-}
-
-
-/* Return true if GIMPLE sequence S is empty. */
-
-static inline bool
-gimple_seq_empty_p (const_gimple_seq s)
-{
- return s == NULL || s->first == NULL;
-}
-
-
-void gimple_seq_add_stmt (gimple_seq *, gimple);
-
-/* Link gimple statement GS to the end of the sequence *SEQ_P. If
- *SEQ_P is NULL, a new sequence is allocated. This function is
- similar to gimple_seq_add_stmt, but does not scan the operands.
- During gimplification, we need to manipulate statement sequences
- before the def/use vectors have been constructed. */
-void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
-
-/* Allocate a new sequence and initialize its first element with STMT. */
-
-static inline gimple_seq
-gimple_seq_alloc_with_stmt (gimple stmt)
-{
- gimple_seq seq = NULL;
- gimple_seq_add_stmt (&seq, stmt);
- return seq;
-}
-
-
-/* Returns the sequence of statements in BB. */
-
-static inline gimple_seq
-bb_seq (const_basic_block bb)
-{
- return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL;
-}
-
-
-/* Sets the sequence of statements in BB to SEQ. */
-
-static inline void
-set_bb_seq (basic_block bb, gimple_seq seq)
-{
- gcc_checking_assert (!(bb->flags & BB_RTL));
- bb->il.gimple->seq = seq;
-}
-
/* Iterator object for GIMPLE statement sequences. */
typedef struct
@@ -268,7 +142,7 @@ typedef struct
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
- gimple_seq seq;
+ gimple_seq *seq;
basic_block bb;
} gimple_stmt_iterator;
@@ -332,7 +206,16 @@ struct GTY(()) gimple_statement_base {
Basic block holding this statement. */
struct basic_block_def *bb;
- /* [ WORD 4 ]
+ /* [ WORD 4-5 ]
+ Linked lists of gimple statements. The next pointers form
+ a NULL terminated list, the prev pointers are a cyclic list.
+ A gimple statement is hence also a double-ended list of
+ statements, with the pointer itself being the first element,
+ and the prev pointer being the last. */
+ gimple next;
+ gimple GTY((skip)) prev;
+
+ /* [ WORD 6 ]
Lexical block holding this statement. */
tree block;
};
@@ -342,10 +225,10 @@ struct GTY(()) gimple_statement_base {
struct GTY(()) gimple_statement_with_ops_base
{
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5-6 ]
+ /* [ WORD 7-8 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
@@ -359,10 +242,10 @@ struct GTY(()) gimple_statement_with_ops_base
struct GTY(()) gimple_statement_with_ops
{
- /* [ WORD 1-6 ] */
+ /* [ WORD 1-8 ] */
struct gimple_statement_with_ops_base opbase;
- /* [ WORD 7 ]
+ /* [ WORD 9 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
@@ -374,10 +257,10 @@ struct GTY(()) gimple_statement_with_ops
struct GTY(()) gimple_statement_with_memory_ops_base
{
- /* [ WORD 1-6 ] */
+ /* [ WORD 1-8 ] */
struct gimple_statement_with_ops_base opbase;
- /* [ WORD 7-8 ]
+ /* [ WORD 9-10 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
@@ -389,10 +272,10 @@ struct GTY(()) gimple_statement_with_memory_ops_base
struct GTY(()) gimple_statement_with_memory_ops
{
- /* [ WORD 1-8 ] */
+ /* [ WORD 1-10 ] */
struct gimple_statement_with_memory_ops_base membase;
- /* [ WORD 9 ]
+ /* [ WORD 11 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
@@ -404,20 +287,20 @@ struct GTY(()) gimple_statement_with_memory_ops
struct GTY(()) gimple_statement_call
{
- /* [ WORD 1-8 ] */
+ /* [ WORD 1-10 ] */
struct gimple_statement_with_memory_ops_base membase;
- /* [ WORD 9-12 ] */
+ /* [ WORD 11-14 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
- /* [ WORD 13 ] */
+ /* [ WORD 15 ] */
union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
- /* [ WORD 14 ]
+ /* [ WORD 16 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
@@ -428,10 +311,10 @@ struct GTY(()) gimple_statement_call
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ] */
+ /* [ WORD 7 ] */
gimple_seq body;
};
@@ -439,14 +322,14 @@ struct GTY(()) gimple_statement_omp {
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ]
+ /* [ WORD 7 ]
Variables declared in this scope. */
tree vars;
- /* [ WORD 6 ]
+ /* [ WORD 8 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
@@ -454,7 +337,7 @@ struct GTY(()) gimple_statement_bind {
gimple-low.c. */
tree block;
- /* [ WORD 7 ] */
+ /* [ WORD 9 ] */
gimple_seq body;
};
@@ -462,13 +345,13 @@ struct GTY(()) gimple_statement_bind {
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ] */
+ /* [ WORD 7 ] */
tree types;
- /* [ WORD 6 ] */
+ /* [ WORD 8 ] */
gimple_seq handler;
};
@@ -476,14 +359,14 @@ struct GTY(()) gimple_statement_catch {
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ]
+ /* [ WORD 7 ]
Filter types. */
tree types;
- /* [ WORD 6 ]
+ /* [ WORD 8 ]
Failure actions. */
gimple_seq failure;
};
@@ -491,37 +374,37 @@ struct GTY(()) gimple_statement_eh_filter {
/* GIMPLE_EH_ELSE */
struct GTY(()) gimple_statement_eh_else {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5,6 ] */
+ /* [ WORD 7,8 ] */
gimple_seq n_body, e_body;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ] Abort function decl. */
+ /* [ WORD 7 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ] */
+ /* [ WORD 7 ] */
unsigned capacity;
unsigned nargs;
- /* [ WORD 6 ] */
+ /* [ WORD 8 ] */
tree result;
- /* [ WORD 7 ] */
+ /* [ WORD 9 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
@@ -530,10 +413,10 @@ struct GTY(()) gimple_statement_phi {
struct GTY(()) gimple_statement_eh_ctrl
{
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ]
+ /* [ WORD 7 ]
Exception region number. */
int region;
};
@@ -542,14 +425,14 @@ struct GTY(()) gimple_statement_eh_ctrl
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ]
+ /* [ WORD 7 ]
Expression to evaluate. */
gimple_seq eval;
- /* [ WORD 6 ]
+ /* [ WORD 8 ]
Cleanup expression. */
gimple_seq cleanup;
};
@@ -571,7 +454,7 @@ enum gimple_try_flags
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
@@ -579,7 +462,7 @@ struct GTY(()) gimple_statement_wce {
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
- /* [ WORD 5 ]
+ /* [ WORD 7 ]
Cleanup expression. */
gimple_seq cleanup;
};
@@ -589,21 +472,21 @@ struct GTY(()) gimple_statement_wce {
struct GTY(()) gimple_statement_asm
{
- /* [ WORD 1-8 ] */
+ /* [ WORD 1-10 ] */
struct gimple_statement_with_memory_ops_base membase;
- /* [ WORD 9 ]
+ /* [ WORD 11 ]
__asm__ statement. */
const char *string;
- /* [ WORD 10 ]
+ /* [ WORD 12 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
- /* [ WORD 11 ]
+ /* [ WORD 13 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
@@ -613,10 +496,10 @@ struct GTY(()) gimple_statement_asm
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
- /* [ WORD 1-5 ] */
+ /* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
- /* [ WORD 6 ]
+ /* [ WORD 8 ]
Critical section name. */
tree name;
};
@@ -642,20 +525,20 @@ struct GTY(()) gimple_omp_for_iter {
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
- /* [ WORD 1-5 ] */
+ /* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
- /* [ WORD 6 ] */
+ /* [ WORD 8 ] */
tree clauses;
- /* [ WORD 7 ]
+ /* [ WORD 9 ]
Number of elements in iter array. */
size_t collapse;
- /* [ WORD 8 ] */
+ /* [ WORD 10 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
- /* [ WORD 9 ]
+ /* [ WORD 11 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
@@ -664,18 +547,18 @@ struct GTY(()) gimple_statement_omp_for {
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
- /* [ WORD 1-5 ] */
+ /* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
- /* [ WORD 6 ]
+ /* [ WORD 8 ]
Clauses. */
tree clauses;
- /* [ WORD 7 ]
+ /* [ WORD 9 ]
Child function holding the body of the parallel region. */
tree child_fn;
- /* [ WORD 8 ]
+ /* [ WORD 10 ]
Shared data argument. */
tree data_arg;
};
@@ -684,14 +567,14 @@ struct GTY(()) gimple_statement_omp_parallel {
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
- /* [ WORD 1-8 ] */
+ /* [ WORD 1-10 ] */
struct gimple_statement_omp_parallel par;
- /* [ WORD 9 ]
+ /* [ WORD 11 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
- /* [ WORD 10-11 ]
+ /* [ WORD 12-13 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
@@ -705,13 +588,13 @@ struct GTY(()) gimple_statement_omp_task {
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
- /* [ WORD 1-5 ] */
+ /* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
- /* [ WORD 6 ] */
+ /* [ WORD 8 ] */
tree clauses;
- /* [ WORD 7 ]
+ /* [ WORD 9 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
@@ -723,23 +606,23 @@ struct GTY(()) gimple_statement_omp_sections {
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ] */
+ /* [ WORD 7 ] */
tree control_def;
- /* [ WORD 6 ] */
+ /* [ WORD 8 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
- /* [ WORD 1-5 ] */
+ /* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
- /* [ WORD 6 ] */
+ /* [ WORD 7 ] */
tree clauses;
};
@@ -749,10 +632,10 @@ struct GTY(()) gimple_statement_omp_single {
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5-6 ] */
+ /* [ WORD 7-8 ] */
tree rhs, lhs;
};
@@ -760,10 +643,10 @@ struct GTY(()) gimple_statement_omp_atomic_load {
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
- /* [ WORD 1-4 ] */
+ /* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
- /* [ WORD 5 ] */
+ /* [ WORD 7 ] */
tree val;
};
@@ -815,7 +698,8 @@ enum gimple_statement_structure_enum {
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
-union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d {
+union GTY ((desc ("gimple_statement_structure (&%h)"),
+ chain_next ("%h.gsbase.next"), variable_size)) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
@@ -922,6 +806,7 @@ gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (VEC(tree,heap) *);
+void preprocess_case_label_vec_for_gimple (VEC(tree,heap) *, tree, tree *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
@@ -1135,6 +1020,115 @@ extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
+/* Return the first node in GIMPLE sequence S. */
+
+static inline gimple_seq_node
+gimple_seq_first (gimple_seq s)
+{
+ return s;
+}
+
+
+/* Return the first statement in GIMPLE sequence S. */
+
+static inline gimple
+gimple_seq_first_stmt (gimple_seq s)
+{
+ gimple_seq_node n = gimple_seq_first (s);
+ return n;
+}
+
+
+/* Return the last node in GIMPLE sequence S. */
+
+static inline gimple_seq_node
+gimple_seq_last (gimple_seq s)
+{
+ return s ? s->gsbase.prev : NULL;
+}
+
+
+/* Return the last statement in GIMPLE sequence S. */
+
+static inline gimple
+gimple_seq_last_stmt (gimple_seq s)
+{
+ gimple_seq_node n = gimple_seq_last (s);
+ return n;
+}
+
+
+/* Set the last node in GIMPLE sequence *PS to LAST. */
+
+static inline void
+gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
+{
+ (*ps)->gsbase.prev = last;
+}
+
+
+/* Set the first node in GIMPLE sequence *PS to FIRST. */
+
+static inline void
+gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
+{
+ *ps = first;
+}
+
+
+/* Return true if GIMPLE sequence S is empty. */
+
+static inline bool
+gimple_seq_empty_p (gimple_seq s)
+{
+ return s == NULL;
+}
+
+
+void gimple_seq_add_stmt (gimple_seq *, gimple);
+
+/* Link gimple statement GS to the end of the sequence *SEQ_P. If
+ *SEQ_P is NULL, a new sequence is allocated. This function is
+ similar to gimple_seq_add_stmt, but does not scan the operands.
+ During gimplification, we need to manipulate statement sequences
+ before the def/use vectors have been constructed. */
+void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
+
+/* Allocate a new sequence and initialize its first element with STMT. */
+
+static inline gimple_seq
+gimple_seq_alloc_with_stmt (gimple stmt)
+{
+ gimple_seq seq = NULL;
+ gimple_seq_add_stmt (&seq, stmt);
+ return seq;
+}
+
+
+/* Returns the sequence of statements in BB. */
+
+static inline gimple_seq
+bb_seq (const_basic_block bb)
+{
+ return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
+}
+
+static inline gimple_seq *
+bb_seq_addr (basic_block bb)
+{
+ return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
+}
+
+/* Sets the sequence of statements in BB to SEQ. */
+
+static inline void
+set_bb_seq (basic_block bb, gimple_seq seq)
+{
+ gcc_checking_assert (!(bb->flags & BB_RTL));
+ bb->il.gimple.seq = seq;
+}
+
+
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
@@ -1357,6 +1351,16 @@ gimple_uid (const_gimple g)
}
+/* Make statement G a singleton sequence. */
+
+static inline void
+gimple_init_singleton (gimple g)
+{
+ g->gsbase.next = NULL;
+ g->gsbase.prev = g;
+}
+
+
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
@@ -2818,13 +2822,19 @@ gimple_bind_append_vars (gimple gs, tree vars)
}
+static inline gimple_seq *
+gimple_bind_body_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_BIND);
+ return &gs->gimple_bind.body;
+}
+
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- return gs->gimple_bind.body;
+ return *gimple_bind_body_ptr (gs);
}
@@ -3107,17 +3117,6 @@ gimple_catch_types_ptr (gimple gs)
}
-/* Return the GIMPLE sequence representing the body of the handler of
- GIMPLE_CATCH statement GS. */
-
-static inline gimple_seq
-gimple_catch_handler (gimple gs)
-{
- GIMPLE_CHECK (gs, GIMPLE_CATCH);
- return gs->gimple_catch.handler;
-}
-
-
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
@@ -3129,6 +3128,16 @@ gimple_catch_handler_ptr (gimple gs)
}
+/* Return the GIMPLE sequence representing the body of the handler of
+ GIMPLE_CATCH statement GS. */
+
+static inline gimple_seq
+gimple_catch_handler (gimple gs)
+{
+ return *gimple_catch_handler_ptr (gs);
+}
+
+
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
@@ -3170,14 +3179,24 @@ gimple_eh_filter_types_ptr (gimple gs)
}
+/* Return a pointer to the sequence of statement to execute when
+ GIMPLE_EH_FILTER statement fails. */
+
+static inline gimple_seq *
+gimple_eh_filter_failure_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
+ return &gs->gimple_eh_filter.failure;
+}
+
+
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
- return gs->gimple_eh_filter.failure;
+ return *gimple_eh_filter_failure_ptr (gs);
}
@@ -3221,18 +3240,30 @@ gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
/* GIMPLE_EH_ELSE accessors. */
+static inline gimple_seq *
+gimple_eh_else_n_body_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+ return &gs->gimple_eh_else.n_body;
+}
+
static inline gimple_seq
gimple_eh_else_n_body (gimple gs)
{
+ return *gimple_eh_else_n_body_ptr (gs);
+}
+
+static inline gimple_seq *
+gimple_eh_else_e_body_ptr (gimple gs)
+{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
- return gs->gimple_eh_else.n_body;
+ return &gs->gimple_eh_else.e_body;
}
static inline gimple_seq
gimple_eh_else_e_body (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
- return gs->gimple_eh_else.e_body;
+ return *gimple_eh_else_e_body_ptr (gs);
}
static inline void
@@ -3285,13 +3316,34 @@ gimple_try_catch_is_cleanup (const_gimple gs)
}
+/* Return a pointer to the sequence of statements used as the
+ body for GIMPLE_TRY GS. */
+
+static inline gimple_seq *
+gimple_try_eval_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRY);
+ return &gs->gimple_try.eval;
+}
+
+
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
+ return *gimple_try_eval_ptr (gs);
+}
+
+
+/* Return a pointer to the sequence of statements used as the cleanup body for
+ GIMPLE_TRY GS. */
+
+static inline gimple_seq *
+gimple_try_cleanup_ptr (gimple gs)
+{
GIMPLE_CHECK (gs, GIMPLE_TRY);
- return gs->gimple_try.eval;
+ return &gs->gimple_try.cleanup;
}
@@ -3301,8 +3353,7 @@ gimple_try_eval (gimple gs)
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRY);
- return gs->gimple_try.cleanup;
+ return *gimple_try_cleanup_ptr (gs);
}
@@ -3341,13 +3392,22 @@ gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
}
+/* Return a pointer to the cleanup sequence for cleanup statement GS. */
+
+static inline gimple_seq *
+gimple_wce_cleanup_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
+ return &gs->gimple_wce.cleanup;
+}
+
+
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
- return gs->gimple_wce.cleanup;
+ return *gimple_wce_cleanup_ptr (gs);
}
@@ -3746,12 +3806,20 @@ gimple_debug_source_bind_set_value (gimple dbg, tree value)
gimple_set_op (dbg, 1, value);
}
+/* Return a pointer to the body for the OMP statement GS. */
+
+static inline gimple_seq *
+gimple_omp_body_ptr (gimple gs)
+{
+ return &gs->omp.body;
+}
+
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
- return gs->omp.body;
+ return *gimple_omp_body_ptr (gs);
}
/* Set BODY to be the body for the OMP statement GS. */
@@ -3965,14 +4033,24 @@ gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
}
+/* Return a pointer to the sequence of statements to execute before the OMP_FOR
+ statement GS starts. */
+
+static inline gimple_seq *
+gimple_omp_for_pre_body_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
+ return &gs->gimple_omp_for.pre_body;
+}
+
+
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- return gs->gimple_omp_for.pre_body;
+ return *gimple_omp_for_pre_body_ptr (gs);
}
@@ -4626,13 +4704,21 @@ gimple_omp_continue_set_control_use (gimple g, tree use)
g->gimple_omp_continue.control_use = use;
}
+/* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */
+
+static inline gimple_seq *
+gimple_transaction_body_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ return &gs->gimple_transaction.body;
+}
+
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq
gimple_transaction_body (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- return gs->gimple_transaction.body;
+ return *gimple_transaction_body_ptr (gs);
}
/* Return the label associated with a GIMPLE_TRANSACTION. */
@@ -4857,17 +4943,28 @@ is_gimple_reg_type (tree type)
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
-gsi_start (gimple_seq seq)
+gsi_start_1 (gimple_seq *seq)
{
gimple_stmt_iterator i;
- i.ptr = gimple_seq_first (seq);
+ i.ptr = gimple_seq_first (*seq);
i.seq = seq;
- i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
+ i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
return i;
}
+#define gsi_start(x) gsi_start_1(&(x))
+
+static inline gimple_stmt_iterator
+gsi_none (void)
+{
+ gimple_stmt_iterator i;
+ i.ptr = NULL;
+ i.seq = NULL;
+ i.bb = NULL;
+ return i;
+}
/* Return a new iterator pointing to the first statement in basic block BB. */
@@ -4875,10 +4972,10 @@ static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
- gimple_seq seq;
+ gimple_seq *seq;
- seq = bb_seq (bb);
- i.ptr = gimple_seq_first (seq);
+ seq = bb_seq_addr (bb);
+ i.ptr = gimple_seq_first (*seq);
i.seq = seq;
i.bb = bb;
@@ -4889,17 +4986,18 @@ gsi_start_bb (basic_block bb)
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
-gsi_last (gimple_seq seq)
+gsi_last_1 (gimple_seq *seq)
{
gimple_stmt_iterator i;
- i.ptr = gimple_seq_last (seq);
+ i.ptr = gimple_seq_last (*seq);
i.seq = seq;
- i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
+ i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
return i;
}
+#define gsi_last(x) gsi_last_1(&(x))
/* Return a new iterator pointing to the last statement in basic block BB. */
@@ -4907,10 +5005,10 @@ static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
- gimple_seq seq;
+ gimple_seq *seq;
- seq = bb_seq (bb);
- i.ptr = gimple_seq_last (seq);
+ seq = bb_seq_addr (bb);
+ i.ptr = gimple_seq_last (*seq);
i.seq = seq;
i.bb = bb;
@@ -4932,7 +5030,7 @@ gsi_end_p (gimple_stmt_iterator i)
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
- return i.ptr != NULL && i.ptr->next == NULL;
+ return i.ptr != NULL && i.ptr->gsbase.next == NULL;
}
@@ -4941,7 +5039,7 @@ gsi_one_before_end_p (gimple_stmt_iterator i)
static inline void
gsi_next (gimple_stmt_iterator *i)
{
- i->ptr = i->ptr->next;
+ i->ptr = i->ptr->gsbase.next;
}
/* Advance the iterator to the previous gimple statement. */
@@ -4949,7 +5047,11 @@ gsi_next (gimple_stmt_iterator *i)
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
- i->ptr = i->ptr->prev;
+ gimple prev = i->ptr->gsbase.prev;
+ if (prev->gsbase.next)
+ i->ptr = prev;
+ else
+ i->ptr = NULL;
}
/* Return the current stmt. */
@@ -4957,7 +5059,7 @@ gsi_prev (gimple_stmt_iterator *i)
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
- return i.ptr->stmt;
+ return i.ptr;
}
/* Return a block statement iterator that points to the first non-label
@@ -5026,18 +5128,6 @@ gsi_last_nondebug_bb (basic_block bb)
return i;
}
-/* Return a pointer to the current stmt.
-
- NOTE: You may want to use gsi_replace on the iterator itself,
- as this performs additional bookkeeping that will not be done
- if you simply assign through a pointer returned by gsi_stmt_ptr. */
-
-static inline gimple *
-gsi_stmt_ptr (gimple_stmt_iterator *i)
-{
- return &i->ptr->stmt;
-}
-
/* Return the basic block associated with this iterator. */
@@ -5053,7 +5143,7 @@ gsi_bb (gimple_stmt_iterator i)
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
- return i.seq;
+ return *i.seq;
}
@@ -5070,8 +5160,10 @@ enum gsi_iterator_update
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
-gimple_seq gsi_split_seq_before (gimple_stmt_iterator *);
+void gsi_split_seq_before (gimple_stmt_iterator *, gimple_seq *);
+void gsi_set_stmt (gimple_stmt_iterator *, gimple);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
+void gsi_replace_with_seq (gimple_stmt_iterator *, gimple_seq, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
@@ -5166,6 +5258,8 @@ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
+gimple walk_gimple_seq_mod (gimple_seq *, walk_stmt_fn, walk_tree_fn,
+ struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
@@ -5178,7 +5272,6 @@ enum gimple_alloc_kind
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
- gimple_alloc_kind_seq, /* Sequences. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 9c58a38b927..ca38a0e2675 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -169,11 +169,7 @@ gimple_seq_add_stmt_without_update (gimple_seq *seq_p, gimple gs)
if (gs == NULL)
return;
- if (*seq_p == NULL)
- *seq_p = gimple_seq_alloc ();
-
si = gsi_last (*seq_p);
-
gsi_insert_after_without_update (&si, gs, GSI_NEW_STMT);
}
@@ -200,9 +196,6 @@ gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
if (src == NULL)
return;
- if (*dst_p == NULL)
- *dst_p = gimple_seq_alloc ();
-
si = gsi_last (*dst_p);
gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT);
}
@@ -1448,6 +1441,13 @@ gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p)
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
gimplify_type_sizes (TREE_TYPE (decl), seq_p);
+ /* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified
+ in case its size expressions contain problematic nodes like CALL_EXPR. */
+ if (TREE_CODE (decl) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (decl)
+ && !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl)))
+ gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p);
+
if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
@@ -1538,7 +1538,7 @@ gimplify_statement_list (tree *expr_p, gimple_seq *pre_p)
return GS_ALL_DONE;
}
-
+
/* Compare two case labels. Because the front end should already have
made sure that case ranges do not overlap, it is enough to only compare
the CASE_LOW values of each case label. */
@@ -1565,8 +1565,183 @@ sort_case_labels (VEC(tree,heap)* label_vec)
{
VEC_qsort (tree, label_vec, compare_case_labels);
}
+
+/* Prepare a vector of case labels to be used in a GIMPLE_SWITCH statement.
+
+ LABELS is a vector that contains all case labels to look at.
+
+ INDEX_TYPE is the type of the switch index expression. Case labels
+ in LABELS are discarded if their values are not in the value range
+ covered by INDEX_TYPE. The remaining case label values are folded
+ to INDEX_TYPE.
+
+ If a default case exists in LABELS, it is removed from LABELS and
+ returned in DEFAULT_CASEP. If no default case exists, but the
+ case labels already cover the whole range of INDEX_TYPE, a default
+ case is returned pointing to one of the existing case labels.
+ Otherwise DEFAULT_CASEP is set to NULL_TREE.
+
+ DEFAULT_CASEP may be NULL, in which case the above comment doesn't
+ apply and no action is taken regardless of whether a default case is
+ found or not. */
+
+void
+preprocess_case_label_vec_for_gimple (VEC(tree,heap) *labels,
+ tree index_type,
+ tree *default_casep)
+{
+ tree min_value, max_value;
+ tree default_case = NULL_TREE;
+ size_t i, len;
+
+ i = 0;
+ min_value = TYPE_MIN_VALUE (index_type);
+ max_value = TYPE_MAX_VALUE (index_type);
+ while (i < VEC_length (tree, labels))
+ {
+ tree elt = VEC_index (tree, labels, i);
+ tree low = CASE_LOW (elt);
+ tree high = CASE_HIGH (elt);
+ bool remove_element = FALSE;
+
+ if (low)
+ {
+ gcc_checking_assert (TREE_CODE (low) == INTEGER_CST);
+ gcc_checking_assert (!high || TREE_CODE (high) == INTEGER_CST);
+
+ /* This is a non-default case label, i.e. it has a value.
+
+ See if the case label is reachable within the range of
+ the index type. Remove out-of-range case values. Turn
+ case ranges into a canonical form (high > low strictly)
+ and convert the case label values to the index type.
+
+ NB: The type of gimple_switch_index() may be the promoted
+ type, but the case labels retain the original type. */
+
+ if (high)
+ {
+ /* This is a case range. Discard empty ranges.
+ If the bounds or the range are equal, turn this
+ into a simple (one-value) case. */
+ int cmp = tree_int_cst_compare (high, low);
+ if (cmp < 0)
+ remove_element = TRUE;
+ else if (cmp == 0)
+ high = NULL_TREE;
+ }
+
+ if (! high)
+ {
+ /* If the simple case value is unreachable, ignore it. */
+ if ((TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (low, min_value) < 0)
+ || (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (low, max_value) > 0))
+ remove_element = TRUE;
+ else
+ low = fold_convert (index_type, low);
+ }
+ else
+ {
+ /* If the entire case range is unreachable, ignore it. */
+ if ((TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (high, min_value) < 0)
+ || (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (low, max_value) > 0))
+ remove_element = TRUE;
+ else
+ {
+ /* If the lower bound is less than the index type's
+ minimum value, truncate the range bounds. */
+ if (TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (low, min_value) < 0)
+ low = min_value;
+ low = fold_convert (index_type, low);
+
+ /* If the upper bound is greater than the index type's
+ maximum value, truncate the range bounds. */
+ if (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (high, max_value) > 0)
+ high = max_value;
+ high = fold_convert (index_type, high);
+
+ /* We may have folded a case range to a one-value case. */
+ if (tree_int_cst_equal (low, high))
+ high = NULL_TREE;
+ }
+ }
+
+ CASE_LOW (elt) = low;
+ CASE_HIGH (elt) = high;
+ }
+ else
+ {
+ gcc_assert (!default_case);
+ default_case = elt;
+ /* The default case must be passed separately to the
+ gimple_build_switch routines. But if DEFAULT_CASEP
+ is NULL, we do not remove the default case (it would
+ be completely lost). */
+ if (default_casep)
+ remove_element = TRUE;
+ }
+
+ if (remove_element)
+ VEC_ordered_remove (tree, labels, i);
+ else
+ i++;
+ }
+ len = i;
+
+ if (!VEC_empty (tree, labels))
+ sort_case_labels (labels);
+
+ if (default_casep && !default_case)
+ {
+ /* If the switch has no default label, add one, so that we jump
+ around the switch body. If the labels already cover the whole
+ range of the switch index_type, add the default label pointing
+ to one of the existing labels. */
+ if (len
+ && TYPE_MIN_VALUE (index_type)
+ && TYPE_MAX_VALUE (index_type)
+ && tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)),
+ TYPE_MIN_VALUE (index_type)))
+ {
+ tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1));
+ if (!high)
+ high = CASE_LOW (VEC_index (tree, labels, len - 1));
+ if (tree_int_cst_equal (high, TYPE_MAX_VALUE (index_type)))
+ {
+ for (i = 1; i < len; i++)
+ {
+ high = CASE_LOW (VEC_index (tree, labels, i));
+ low = CASE_HIGH (VEC_index (tree, labels, i - 1));
+ if (!low)
+ low = CASE_LOW (VEC_index (tree, labels, i - 1));
+ if ((TREE_INT_CST_LOW (low) + 1
+ != TREE_INT_CST_LOW (high))
+ || (TREE_INT_CST_HIGH (low)
+ + (TREE_INT_CST_LOW (high) == 0)
+ != TREE_INT_CST_HIGH (high)))
+ break;
+ }
+ if (i == len)
+ {
+ tree label = CASE_LABEL (VEC_index (tree, labels, 0));
+ default_case = build_case_label (NULL_TREE, NULL_TREE,
+ label);
+ }
+ }
+ }
+ }
-/* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can
+ if (default_casep)
+ *default_casep = default_case;
+}
+
+/* Gimplify a SWITCH_EXPR, and collect the vector of labels it can
branch to. */
static enum gimplify_status
@@ -1588,9 +1763,7 @@ gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
{
VEC (tree,heap) *labels;
VEC (tree,heap) *saved_labels;
- tree min_value, max_value;
tree default_case = NULL_TREE;
- size_t i, len;
gimple gimple_switch;
/* If someone can be bothered to fill in the labels, they can
@@ -1606,155 +1779,22 @@ gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
- i = 0;
- min_value = TYPE_MIN_VALUE (index_type);
- max_value = TYPE_MAX_VALUE (index_type);
- while (i < VEC_length (tree, labels))
- {
- tree elt = VEC_index (tree, labels, i);
- tree low = CASE_LOW (elt);
- tree high = CASE_HIGH (elt);
- bool remove_element = FALSE;
-
-
- if (low)
- {
- gcc_checking_assert (TREE_CODE (low) == INTEGER_CST);
- gcc_checking_assert (!high || TREE_CODE (high) == INTEGER_CST);
-
- /* This is a non-default case label, i.e. it has a value.
-
- See if the case label is reachable within the range of
- the index type. Remove out-of-range case values. Turn
- case ranges into a canonical form (high > low strictly)
- and convert the case label values to the index type.
-
- NB: The type of gimple_switch_index() may be the promoted
- type, but the case labels retain the original type. */
-
- if (high)
- {
- /* This is a case range. Discard empty ranges.
- If the bounds or the range are equal, turn this
- into a simple (one-value) case. */
- int cmp = tree_int_cst_compare (high, low);
- if (cmp < 0)
- remove_element = TRUE;
- else if (cmp == 0)
- high = NULL_TREE;
- }
-
- if (! high)
- {
- /* If the simple case value is unreachable, ignore it. */
- if ((TREE_CODE (min_value) == INTEGER_CST
- && tree_int_cst_compare (low, min_value) < 0)
- || (TREE_CODE (max_value) == INTEGER_CST
- && tree_int_cst_compare (low, max_value) > 0))
- remove_element = TRUE;
- else
- low = fold_convert (index_type, low);
- }
- else
- {
- /* If the entire case range is unreachable, ignore it. */
- if ((TREE_CODE (min_value) == INTEGER_CST
- && tree_int_cst_compare (high, min_value) < 0)
- || (TREE_CODE (max_value) == INTEGER_CST
- && tree_int_cst_compare (low, max_value) > 0))
- remove_element = TRUE;
- else
- {
- /* If the lower bound is less than the index type's
- minimum value, truncate the range bounds. */
- if (TREE_CODE (min_value) == INTEGER_CST
- && tree_int_cst_compare (low, min_value) < 0)
- low = min_value;
- low = fold_convert (index_type, low);
-
- /* If the upper bound is greater than the index type's
- maximum value, truncate the range bounds. */
- if (TREE_CODE (max_value) == INTEGER_CST
- && tree_int_cst_compare (high, max_value) > 0)
- high = max_value;
- high = fold_convert (index_type, high);
- }
- }
-
- CASE_LOW (elt) = low;
- CASE_HIGH (elt) = high;
- }
- else
- {
- /* The default case must be the last label in the list. */
- gcc_assert (!default_case);
- default_case = elt;
- remove_element = TRUE;
- }
-
- if (remove_element)
- VEC_ordered_remove (tree, labels, i);
- else
- i++;
- }
- len = i;
-
- if (!VEC_empty (tree, labels))
- sort_case_labels (labels);
+ preprocess_case_label_vec_for_gimple (labels, index_type,
+ &default_case);
if (!default_case)
{
- /* If the switch has no default label, add one, so that we jump
- around the switch body. If the labels already cover the whole
- range of the switch index_type, add the default label pointing
- to one of the existing labels. */
- if (len
- && TYPE_MIN_VALUE (index_type)
- && TYPE_MAX_VALUE (index_type)
- && tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)),
- TYPE_MIN_VALUE (index_type)))
- {
- tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1));
- if (!high)
- high = CASE_LOW (VEC_index (tree, labels, len - 1));
- if (tree_int_cst_equal (high, TYPE_MAX_VALUE (index_type)))
- {
- for (i = 1; i < len; i++)
- {
- high = CASE_LOW (VEC_index (tree, labels, i));
- low = CASE_HIGH (VEC_index (tree, labels, i - 1));
- if (!low)
- low = CASE_LOW (VEC_index (tree, labels, i - 1));
- if ((TREE_INT_CST_LOW (low) + 1
- != TREE_INT_CST_LOW (high))
- || (TREE_INT_CST_HIGH (low)
- + (TREE_INT_CST_LOW (high) == 0)
- != TREE_INT_CST_HIGH (high)))
- break;
- }
- if (i == len)
- {
- tree label = CASE_LABEL (VEC_index (tree, labels, 0));
- default_case = build_case_label (NULL_TREE, NULL_TREE,
- label);
- }
- }
- }
+ gimple new_default;
- if (!default_case)
- {
- gimple new_default;
-
- default_case
- = build_case_label (NULL_TREE, NULL_TREE,
- create_artificial_label (UNKNOWN_LOCATION));
- new_default = gimple_build_label (CASE_LABEL (default_case));
- gimplify_seq_add_stmt (&switch_body_seq, new_default);
- }
+ default_case
+ = build_case_label (NULL_TREE, NULL_TREE,
+ create_artificial_label (UNKNOWN_LOCATION));
+ new_default = gimple_build_label (CASE_LABEL (default_case));
+ gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr),
- default_case, labels);
+ default_case, labels);
gimplify_seq_add_stmt (pre_p, gimple_switch);
gimplify_seq_add_seq (pre_p, switch_body_seq);
VEC_free(tree, heap, labels);
@@ -5431,8 +5471,8 @@ gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
- *gsi_stmt_ptr (&iter) = gtry;
- iter = gsi_start (seq);
+ gsi_set_stmt (&iter, gtry);
+ iter = gsi_start (gtry->gimple_try.eval);
}
}
else
@@ -5614,9 +5654,6 @@ gimplify_stmt (tree *stmt_p, gimple_seq *seq_p)
{
gimple_seq_node last;
- if (!*seq_p)
- *seq_p = gimple_seq_alloc ();
-
last = gimple_seq_last (*seq_p);
gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none);
return last != gimple_seq_last (*seq_p);
@@ -6113,8 +6150,8 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
gimplify_omp_ctxp = ctx;
push_gimplify_context (&gctx);
- OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = gimple_seq_alloc ();
- OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = gimple_seq_alloc ();
+ OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
@@ -6450,7 +6487,7 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
- for_body = gimple_seq_alloc ();
+ for_body = NULL;
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
@@ -8068,7 +8105,7 @@ gimplify_type_sizes (tree type, gimple_seq *list_p)
void
gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
{
- tree type, expr = *expr_p;
+ tree expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
@@ -8080,30 +8117,10 @@ gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
|| CONTAINS_PLACEHOLDER_P (expr))
return;
- type = TREE_TYPE (expr);
*expr_p = unshare_expr (expr);
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue);
expr = *expr_p;
-
- /* Verify that we've an exact type match with the original expression.
- In particular, we do not wish to drop a "sizetype" in favour of a
- type of similar dimensions. We don't want to pollute the generic
- type-stripping code with this knowledge because it doesn't matter
- for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT
- and friends retain their "sizetype-ness". */
- if (TREE_TYPE (expr) != type
- && TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type))
- {
- tree tmp;
- gimple stmt;
-
- *expr_p = create_tmp_var (type, NULL);
- tmp = build1 (NOP_EXPR, type, expr);
- stmt = gimplify_assign (*expr_p, tmp, stmt_p);
- gimple_set_location (stmt, EXPR_LOC_OR_HERE (expr));
- }
}
/* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node
@@ -8286,7 +8303,7 @@ gimplify_function_tree (tree fndecl)
/* The tree body of the function is no longer needed, replace it
with the new GIMPLE body. */
- seq = gimple_seq_alloc ();
+ seq = NULL;
gimple_seq_add_stmt (&seq, bind);
gimple_set_body (fndecl, seq);
@@ -8335,7 +8352,7 @@ gimplify_function_tree (tree fndecl)
/* Replace the current function body with the body
wrapped in the try/finally TF. */
- seq = gimple_seq_alloc ();
+ seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
}
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index 951d3f9a137..3980213e516 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,6 +1,18 @@
-2012-04-30 Jan Hubicka <jh@suse.cz>
+2012-05-09 Ian Lance Taylor <iant@google.com>
- * gogo-tree.cc (Gogo::write_globals): Use finalize_compilation_unit.
+ * lang.opt: Add -fgo-pkgpath.
+ * go-lang.c (go_pkgpath): New static variable.
+ (go_prefix): New static variable.
+ (go_langhook_init): Pass go_pkgpath and go_prefix to
+ go_create_gogo.
+ (go_langhook_handle_option): Handle -fgo-pkgpath. Change
+ -fgo-prefix handling to just set go_prefix.
+ * go-c.h (go_set_prefix): Don't declare.
+ (go_create_gogo): Add pkgpath and prefix to declaration.
+ * go-gcc.cc (Gcc_backend::global_variable): Change unique_prefix
+ to pkgpath. Don't include the package name in the asm name.
+ * gccgo.texi (Invoking gccgo): Document -fgo-pkgpath. Update the
+ docs for -fgo-prefix.
2012-04-23 Ian Lance Taylor <iant@google.com>
diff --git a/gcc/go/gccgo.texi b/gcc/go/gccgo.texi
index 5d0efb44fdb..a5e37e76e80 100644
--- a/gcc/go/gccgo.texi
+++ b/gcc/go/gccgo.texi
@@ -157,14 +157,32 @@ compile time.
When linking, specify a library search directory, as with
@command{gcc}.
+@item -fgo-pkgpath=@var{string}
+@cindex @option{-fgo-pkgpath}
+Set the package path to use. This sets the value returned by the
+PkgPath method of reflect.Type objects. It is also used for the names
+of globally visible symbols. The argument to this option should
+normally be the string that will be used to import this package after
+it has been installed; in other words, a pathname within the
+directories specified by the @option{-I} option.
+
@item -fgo-prefix=@var{string}
@cindex @option{-fgo-prefix}
+An alternative to @option{-fgo-pkgpath}. The argument will be
+combined with the package name from the source file to produce the
+package path. If @option{-fgo-pkgpath} is used, @option{-fgo-prefix}
+will be ignored.
+
Go permits a single program to include more than one package with the
-same name. This option is required to make this work with
-@command{gccgo}. The argument to this option may be any string. Each
-package with the same name must use a distinct @option{-fgo-prefix}
-option. The argument is typically the full path under which the
-package will be installed, as that must obviously be unique.
+same name in the @code{package} clause in the source file, though
+obviously the two packages must be imported using different pathnames.
+In order for this to work with @command{gccgo}, either
+@option{-fgo-pkgpath} or @option{-fgo-prefix} must be specified when
+compiling a package.
+
+Using either @option{-fgo-pkgpath} or @option{-fgo-prefix} disables
+the special treatment of the @code{main} package and permits that
+package to be imported like any other.
@item -frequire-return-statement
@itemx -fno-require-return-statement
diff --git a/gcc/go/go-c.h b/gcc/go/go-c.h
index e123d52d8d1..d46a08796e3 100644
--- a/gcc/go/go-c.h
+++ b/gcc/go/go-c.h
@@ -38,11 +38,11 @@ extern "C"
extern int go_enable_dump (const char*);
extern int go_enable_optimize (const char*);
-extern void go_set_prefix (const char*);
extern void go_add_search_path (const char*);
-extern void go_create_gogo (int int_type_size, int pointer_size);
+extern void go_create_gogo (int int_type_size, int pointer_size,
+ const char* pkgpath, const char *prefix);
extern void go_parse_input_files (const char**, unsigned int,
bool only_check_syntax,
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index 08950b8e1d9..4729a3bdbd6 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -271,7 +271,7 @@ class Gcc_backend : public Backend
Bvariable*
global_variable(const std::string& package_name,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
const std::string& name,
Btype* btype,
bool is_external,
@@ -1281,7 +1281,7 @@ Gcc_backend::non_zero_size_type(tree type)
Bvariable*
Gcc_backend::global_variable(const std::string& package_name,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
const std::string& name,
Btype* btype,
bool is_external,
@@ -1310,9 +1310,9 @@ Gcc_backend::global_variable(const std::string& package_name,
{
TREE_PUBLIC(decl) = 1;
- std::string asm_name(unique_prefix);
+ std::string asm_name(pkgpath);
asm_name.push_back('.');
- asm_name.append(var_name);
+ asm_name.append(name);
SET_DECL_ASSEMBLER_NAME(decl, get_identifier_from_string(asm_name));
}
TREE_USED(decl) = 1;
diff --git a/gcc/go/go-lang.c b/gcc/go/go-lang.c
index 895e39d12e3..f02f769252b 100644
--- a/gcc/go/go-lang.c
+++ b/gcc/go/go-lang.c
@@ -81,6 +81,11 @@ struct GTY(()) language_function
int dummy;
};
+/* Option information we need to pass to go_create_gogo. */
+
+static const char *go_pkgpath = NULL;
+static const char *go_prefix = NULL;
+
/* Language hooks. */
static bool
@@ -96,7 +101,7 @@ go_langhook_init (void)
to, e.g., unsigned_char_type_node) but before calling
build_common_builtin_nodes (because it calls, indirectly,
go_type_for_size). */
- go_create_gogo (INT_TYPE_SIZE, POINTER_SIZE);
+ go_create_gogo (INT_TYPE_SIZE, POINTER_SIZE, go_pkgpath, go_prefix);
build_common_builtin_nodes ();
@@ -227,8 +232,12 @@ go_langhook_handle_option (
ret = go_enable_optimize (arg) ? true : false;
break;
+ case OPT_fgo_pkgpath_:
+ go_pkgpath = arg;
+ break;
+
case OPT_fgo_prefix_:
- go_set_prefix (arg);
+ go_prefix = arg;
break;
default:
diff --git a/gcc/go/gofrontend/backend.h b/gcc/go/gofrontend/backend.h
index d31404555e6..2b14132804f 100644
--- a/gcc/go/gofrontend/backend.h
+++ b/gcc/go/gofrontend/backend.h
@@ -321,16 +321,16 @@ class Backend
error_variable() = 0;
// Create a global variable. PACKAGE_NAME is the name of the
- // package where the variable is defined. UNIQUE_PREFIX is the
- // prefix for that package, from the -fgo-prefix option. NAME is
- // the name of the variable. BTYPE is the type of the variable.
- // IS_EXTERNAL is true if the variable is defined in some other
- // package. IS_HIDDEN is true if the variable is not exported (name
- // begins with a lower case letter). LOCATION is where the variable
- // was defined.
+ // package where the variable is defined. PKGPATH is the package
+ // path for that package, from the -fgo-pkgpath or -fgo-prefix
+ // option. NAME is the name of the variable. BTYPE is the type of
+ // the variable. IS_EXTERNAL is true if the variable is defined in
+ // some other package. IS_HIDDEN is true if the variable is not
+ // exported (name begins with a lower case letter). LOCATION is
+ // where the variable was defined.
virtual Bvariable*
global_variable(const std::string& package_name,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
const std::string& name,
Btype* btype,
bool is_external,
diff --git a/gcc/go/gofrontend/export.cc b/gcc/go/gofrontend/export.cc
index 174596753ef..13c61a589fe 100644
--- a/gcc/go/gofrontend/export.cc
+++ b/gcc/go/gofrontend/export.cc
@@ -33,7 +33,7 @@ const int Export::v1_checksum_len;
// Constructor.
Export::Export(Stream* stream)
- : stream_(stream), type_refs_(), type_index_(1)
+ : stream_(stream), type_refs_(), type_index_(1), packages_()
{
}
@@ -91,7 +91,7 @@ should_export(Named_object* no)
void
Export::export_globals(const std::string& package_name,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
int package_priority,
const std::map<std::string, Package*>& imports,
const std::string& import_init_fn,
@@ -140,9 +140,9 @@ Export::export_globals(const std::string& package_name,
this->write_string(package_name);
this->write_c_string(";\n");
- // The unique prefix. This prefix is used for all global symbols.
- this->write_c_string("prefix ");
- this->write_string(unique_prefix);
+ // The package path, used for all global symbols.
+ this->write_c_string("pkgpath ");
+ this->write_string(pkgpath);
this->write_c_string(";\n");
// The package priority.
@@ -209,12 +209,14 @@ Export::write_imports(const std::map<std::string, Package*>& imports)
++p)
{
this->write_c_string("import ");
- this->write_string(p->second->name());
+ this->write_string(p->second->package_name());
this->write_c_string(" ");
- this->write_string(p->second->unique_prefix());
+ this->write_string(p->second->pkgpath());
this->write_c_string(" \"");
this->write_string(p->first);
this->write_c_string("\";\n");
+
+ this->packages_.insert(p->second);
}
}
@@ -333,7 +335,7 @@ Export::write_type(const Type* type)
{
// The builtin types should have been predefined.
go_assert(!Linemap::is_predeclared_location(named_type->location())
- || (named_type->named_object()->package()->name()
+ || (named_type->named_object()->package()->package_name()
== "unsafe"));
named_object = named_type->named_object();
}
@@ -345,15 +347,26 @@ Export::write_type(const Type* type)
std::string s = "\"";
if (package != NULL && !Gogo::is_hidden_name(named_object->name()))
{
- s += package->unique_prefix();
- s += '.';
- s += package->name();
+ s += package->pkgpath();
s += '.';
}
s += named_object->name();
s += "\" ";
this->write_string(s);
+ // It is possible that this type was imported indirectly, and is
+ // not in a package in the import list. If we have not
+ // mentioned this package before, write out the package name
+ // here so that any package importing this one will know it.
+ if (package != NULL
+ && this->packages_.find(package) == this->packages_.end())
+ {
+ this->write_c_string("\"");
+ this->write_string(package->package_name());
+ this->packages_.insert(package);
+ this->write_c_string("\" ");
+ }
+
// We must add a named type to the table now, since the
// definition of the type may refer to the named type via a
// pointer.
diff --git a/gcc/go/gofrontend/export.h b/gcc/go/gofrontend/export.h
index 0e03f4853d6..c6a4810510a 100644
--- a/gcc/go/gofrontend/export.h
+++ b/gcc/go/gofrontend/export.h
@@ -117,7 +117,7 @@ class Export : public String_dump
// Export the identifiers in BINDINGS which are marked for export.
// The exporting is done via a series of calls to THIS->STREAM_. If
// is nothing to export, this->stream_->write will not be called.
- // UNIQUE_PREFIX is a prefix for all global symbols.
+ // PKGPATH is the package path.
// PACKAGE_PRIORITY is the priority to use for this package.
// IMPORT_INIT_FN is the name of the import initialization function
// for this package; it will be empty if none is needed.
@@ -125,7 +125,7 @@ class Export : public String_dump
// imported packages.
void
export_globals(const std::string& package_name,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
int package_priority,
const std::map<std::string, Package*>& imports,
const std::string& import_init_fn,
@@ -182,6 +182,8 @@ class Export : public String_dump
Type_refs type_refs_;
// Index number of next type.
int type_index_;
+ // Packages we have written out.
+ Unordered_set(const Package*) packages_;
};
// An export streamer which puts the export stream in a named section.
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index a266694d24c..f6fb65866f0 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -3606,8 +3606,7 @@ Unary_expression::do_lower(Gogo*, Named_object*, Statement_inserter*, int)
return Expression::make_error(this->location());
}
- if (op == OPERATOR_PLUS || op == OPERATOR_MINUS
- || op == OPERATOR_NOT || op == OPERATOR_XOR)
+ if (op == OPERATOR_PLUS || op == OPERATOR_MINUS || op == OPERATOR_XOR)
{
Numeric_constant nc;
if (expr->numeric_constant_value(&nc))
@@ -3697,10 +3696,10 @@ Unary_expression::eval_constant(Operator op, const Numeric_constant* unc,
else
go_unreachable();
- case OPERATOR_NOT:
case OPERATOR_XOR:
break;
+ case OPERATOR_NOT:
case OPERATOR_AND:
case OPERATOR_MULT:
return false;
@@ -3713,7 +3712,10 @@ Unary_expression::eval_constant(Operator op, const Numeric_constant* unc,
return false;
mpz_t uval;
- unc->get_int(&uval);
+ if (unc->is_rune())
+ unc->get_rune(&uval);
+ else
+ unc->get_int(&uval);
mpz_t val;
mpz_init(val);
@@ -3911,6 +3913,10 @@ Unary_expression::do_check_types(Gogo*)
break;
case OPERATOR_NOT:
+ if (!type->is_boolean_type())
+ this->report_error(_("expected boolean type"));
+ break;
+
case OPERATOR_XOR:
if (type->integer_type() == NULL
&& !type->is_boolean_type())
diff --git a/gcc/go/gofrontend/go.cc b/gcc/go/gofrontend/go.cc
index bfa3afdfc3c..1f2ce8adcde 100644
--- a/gcc/go/gofrontend/go.cc
+++ b/gcc/go/gofrontend/go.cc
@@ -13,11 +13,6 @@
#include "backend.h"
#include "gogo.h"
-// The unique prefix to use for exported symbols. This is set during
-// option processing.
-
-static std::string unique_prefix;
-
// The data structures we build to represent the file.
static Gogo* gogo;
@@ -25,38 +20,22 @@ static Gogo* gogo;
GO_EXTERN_C
void
-go_create_gogo(int int_type_size, int pointer_size)
+go_create_gogo(int int_type_size, int pointer_size, const char *pkgpath,
+ const char *prefix)
{
go_assert(::gogo == NULL);
Linemap* linemap = go_get_linemap();
::gogo = new Gogo(go_get_backend(), linemap, int_type_size, pointer_size);
- if (!unique_prefix.empty())
- ::gogo->set_unique_prefix(unique_prefix);
+
+ if (pkgpath != NULL)
+ ::gogo->set_pkgpath(pkgpath);
+ else if (prefix != NULL)
+ ::gogo->set_prefix(prefix);
// FIXME: This should be in the gcc dependent code.
::gogo->define_builtin_function_trees();
}
-// Set the unique prefix we use for exported symbols.
-
-GO_EXTERN_C
-void
-go_set_prefix(const char* arg)
-{
- unique_prefix = arg;
- for (size_t i = 0; i < unique_prefix.length(); ++i)
- {
- char c = unique_prefix[i];
- if ((c >= 'a' && c <= 'z')
- || (c >= 'A' && c <= 'Z')
- || (c >= '0' && c <= '9')
- || c == '_')
- ;
- else
- unique_prefix[i] = '_';
- }
-}
-
// Parse the input files.
GO_EXTERN_C
diff --git a/gcc/go/gofrontend/gogo-tree.cc b/gcc/go/gofrontend/gogo-tree.cc
index 7f732386959..5f74de5a8b3 100644
--- a/gcc/go/gofrontend/gogo-tree.cc
+++ b/gcc/go/gofrontend/gogo-tree.cc
@@ -260,9 +260,7 @@ Gogo::get_init_fn_name()
}
else
{
- std::string s = this->unique_prefix();
- s.append(1, '.');
- s.append(this->package_name());
+ std::string s = this->pkgpath_symbol();
s.append("..import");
this->init_fn_name_ = s;
}
@@ -590,10 +588,11 @@ Find_var::expression(Expression** pexpr)
return TRAVERSE_CONTINUE;
}
-// Return true if EXPR refers to VAR.
+// Return true if EXPR, PREINIT, or DEP refers to VAR.
static bool
-expression_requires(Expression* expr, Block* preinit, Named_object* var)
+expression_requires(Expression* expr, Block* preinit, Named_object* dep,
+ Named_object* var)
{
Find_var::Seen_objects seen_objects;
Find_var find_var(var, &seen_objects);
@@ -601,7 +600,15 @@ expression_requires(Expression* expr, Block* preinit, Named_object* var)
Expression::traverse(&expr, &find_var);
if (preinit != NULL)
preinit->traverse(&find_var);
-
+ if (dep != NULL)
+ {
+ Expression* init = dep->var_value()->init();
+ if (init != NULL)
+ Expression::traverse(&init, &find_var);
+ if (dep->var_value()->has_pre_init())
+ dep->var_value()->preinit()->traverse(&find_var);
+ }
+
return find_var.found();
}
@@ -658,7 +665,7 @@ typedef std::list<Var_init> Var_inits;
// variable V2 then we initialize V1 after V2.
static void
-sort_var_inits(Var_inits* var_inits)
+sort_var_inits(Gogo* gogo, Var_inits* var_inits)
{
Var_inits ready;
while (!var_inits->empty())
@@ -667,6 +674,7 @@ sort_var_inits(Var_inits* var_inits)
Named_object* var = p1->var();
Expression* init = var->var_value()->init();
Block* preinit = var->var_value()->preinit();
+ Named_object* dep = gogo->var_depends_on(var->var_value());
// Start walking through the list to see which variables VAR
// needs to wait for. We can skip P1->WAITING variables--that
@@ -678,20 +686,22 @@ sort_var_inits(Var_inits* var_inits)
for (; p2 != var_inits->end(); ++p2)
{
- if (expression_requires(init, preinit, p2->var()))
+ Named_object* p2var = p2->var();
+ if (expression_requires(init, preinit, dep, p2var))
{
// Check for cycles.
- if (expression_requires(p2->var()->var_value()->init(),
- p2->var()->var_value()->preinit(),
+ if (expression_requires(p2var->var_value()->init(),
+ p2var->var_value()->preinit(),
+ gogo->var_depends_on(p2var->var_value()),
var))
{
error_at(var->location(),
("initialization expressions for %qs and "
"%qs depend upon each other"),
var->message_name().c_str(),
- p2->var()->message_name().c_str());
+ p2var->message_name().c_str());
inform(p2->var()->location(), "%qs defined here",
- p2->var()->message_name().c_str());
+ p2var->message_name().c_str());
p2 = var_inits->end();
}
else
@@ -714,9 +724,11 @@ sort_var_inits(Var_inits* var_inits)
// VAR does not depends upon any other initialization expressions.
// Check for a loop of VAR on itself. We only do this if
- // INIT is not NULL; when INIT is NULL, it means that
- // PREINIT sets VAR, which we will interpret as a loop.
- if (init != NULL && expression_requires(init, preinit, var))
+ // INIT is not NULL and there is no dependency; when INIT is
+ // NULL, it means that PREINIT sets VAR, which we will
+ // interpret as a loop.
+ if (init != NULL && dep == NULL
+ && expression_requires(init, preinit, NULL, var))
error_at(var->location(),
"initialization expression for %qs depends upon itself",
var->message_name().c_str());
@@ -783,7 +795,7 @@ Gogo::write_globals()
}
// There is nothing useful we can output for constants which
- // have ideal or non-integeral type.
+ // have ideal or non-integral type.
if (no->is_const())
{
Type* type = no->const_value()->type();
@@ -834,7 +846,9 @@ Gogo::write_globals()
;
else if (TREE_CONSTANT(init))
{
- if (expression_requires(no->var_value()->init(), NULL, no))
+ if (expression_requires(no->var_value()->init(), NULL,
+ this->var_depends_on(no->var_value()),
+ no))
error_at(no->location(),
"initialization expression for %qs depends "
"upon itself",
@@ -879,6 +893,14 @@ Gogo::write_globals()
else
var_inits.push_back(Var_init(no, var_init_tree));
}
+ else if (this->var_depends_on(no->var_value()) != NULL)
+ {
+ // This variable is initialized from something that is
+ // not in its init or preinit. This variable needs to
+ // participate in dependency analysis sorting, in case
+ // some other variable depends on this one.
+ var_inits.push_back(Var_init(no, integer_zero_node));
+ }
if (!is_sink && no->var_value()->type()->has_pointer())
var_gc.push_back(no);
@@ -896,7 +918,7 @@ Gogo::write_globals()
// workable order.
if (!var_inits.empty())
{
- sort_var_inits(&var_inits);
+ sort_var_inits(this, &var_inits);
for (Var_inits::const_iterator p = var_inits.begin();
p != var_inits.end();
++p)
@@ -960,7 +982,7 @@ Named_object::get_id(Gogo* gogo)
if (this->package_ == NULL)
package_name = gogo->package_name();
else
- package_name = this->package_->name();
+ package_name = this->package_->package_name();
decl_name = package_name + '.' + Gogo::unpack_hidden_name(this->name_);
@@ -1253,9 +1275,15 @@ Function::get_or_make_decl(Gogo* gogo, Named_object* no, tree id)
|| this->type_->is_method())
{
TREE_PUBLIC(decl) = 1;
- std::string asm_name = gogo->unique_prefix();
+ std::string asm_name = gogo->pkgpath_symbol();
asm_name.append(1, '.');
- asm_name.append(IDENTIFIER_POINTER(id), IDENTIFIER_LENGTH(id));
+ asm_name.append(Gogo::unpack_hidden_name(no->name()));
+ if (this->type_->is_method())
+ {
+ asm_name.append(1, '.');
+ Type* rtype = this->type_->receiver()->type();
+ asm_name.append(rtype->mangled_name(gogo));
+ }
SET_DECL_ASSEMBLER_NAME(decl,
get_identifier_from_string(asm_name));
}
@@ -1358,10 +1386,16 @@ Function_declaration::get_or_make_decl(Gogo* gogo, Named_object* no, tree id)
if (this->asm_name_.empty())
{
std::string asm_name = (no->package() == NULL
- ? gogo->unique_prefix()
- : no->package()->unique_prefix());
+ ? gogo->pkgpath_symbol()
+ : no->package()->pkgpath_symbol());
asm_name.append(1, '.');
- asm_name.append(IDENTIFIER_POINTER(id), IDENTIFIER_LENGTH(id));
+ asm_name.append(Gogo::unpack_hidden_name(no->name()));
+ if (this->fntype_->is_method())
+ {
+ asm_name.append(1, '.');
+ Type* rtype = this->fntype_->receiver()->type();
+ asm_name.append(rtype->mangled_name(gogo));
+ }
SET_DECL_ASSEMBLER_NAME(decl,
get_identifier_from_string(asm_name));
}
diff --git a/gcc/go/gofrontend/gogo.cc b/gcc/go/gofrontend/gogo.cc
index 7bc0b557c9f..80ffe240c7f 100644
--- a/gcc/go/gofrontend/gogo.cc
+++ b/gcc/go/gofrontend/gogo.cc
@@ -32,11 +32,16 @@ Gogo::Gogo(Backend* backend, Linemap* linemap, int int_type_size,
imported_unsafe_(false),
packages_(),
init_functions_(),
+ var_deps_(),
need_init_fn_(false),
init_fn_name_(),
imported_init_fns_(),
- unique_prefix_(),
- unique_prefix_specified_(false),
+ pkgpath_(),
+ pkgpath_symbol_(),
+ prefix_(),
+ pkgpath_set_(false),
+ pkgpath_from_option_(false),
+ prefix_from_option_(false),
verify_types_(),
interface_types_(),
specific_type_functions_(),
@@ -232,6 +237,72 @@ Gogo::Gogo(Backend* backend, Linemap* linemap, int int_type_size,
this->globals_->add_function_declaration("delete", NULL, delete_type, loc);
}
+// Convert a pkgpath into a string suitable for a symbol. Note that
+// this transformation is convenient but imperfect. A -fgo-pkgpath
+// option of a/b_c will conflict with a -fgo-pkgpath option of a_b/c,
+// possibly leading to link time errors.
+
+std::string
+Gogo::pkgpath_for_symbol(const std::string& pkgpath)
+{
+ std::string s = pkgpath;
+ for (size_t i = 0; i < s.length(); ++i)
+ {
+ char c = s[i];
+ if ((c >= 'a' && c <= 'z')
+ || (c >= 'A' && c <= 'Z')
+ || (c >= '0' && c <= '9')
+ || c == '_'
+ || c == '.'
+ || c == '$')
+ ;
+ else
+ s[i] = '_';
+ }
+ return s;
+}
+
+// Get the package path to use for type reflection data. This should
+// ideally be unique across the entire link.
+
+const std::string&
+Gogo::pkgpath() const
+{
+ go_assert(this->pkgpath_set_);
+ return this->pkgpath_;
+}
+
+// Set the package path from the -fgo-pkgpath command line option.
+
+void
+Gogo::set_pkgpath(const std::string& arg)
+{
+ go_assert(!this->pkgpath_set_);
+ this->pkgpath_ = arg;
+ this->pkgpath_set_ = true;
+ this->pkgpath_from_option_ = true;
+}
+
+// Get the package path to use for symbol names.
+
+const std::string&
+Gogo::pkgpath_symbol() const
+{
+ go_assert(this->pkgpath_set_);
+ return this->pkgpath_symbol_;
+}
+
+// Set the unique prefix to use to determine the package path, from
+// the -fgo-prefix command line option.
+
+void
+Gogo::set_prefix(const std::string& arg)
+{
+ go_assert(!this->prefix_from_option_);
+ this->prefix_ = arg;
+ this->prefix_from_option_ = true;
+}
+
// Munge name for use in an error message.
std::string
@@ -246,7 +317,7 @@ const std::string&
Gogo::package_name() const
{
go_assert(this->package_ != NULL);
- return this->package_->name();
+ return this->package_->package_name();
}
// Set the package name.
@@ -255,24 +326,29 @@ void
Gogo::set_package_name(const std::string& package_name,
Location location)
{
- if (this->package_ != NULL && this->package_->name() != package_name)
+ if (this->package_ != NULL)
{
- error_at(location, "expected package %<%s%>",
- Gogo::message_name(this->package_->name()).c_str());
+ if (this->package_->package_name() != package_name)
+ error_at(location, "expected package %<%s%>",
+ Gogo::message_name(this->package_->package_name()).c_str());
return;
}
- // If the user did not specify a unique prefix, we always use "go".
- // This in effect requires that the package name be unique.
- if (this->unique_prefix_.empty())
- this->unique_prefix_ = "go";
+ // Now that we know the name of the package we are compiling, set
+ // the package path to use for reflect.Type.PkgPath and global
+ // symbol names.
+ if (!this->pkgpath_set_)
+ {
+ if (!this->prefix_from_option_)
+ this->prefix_ = "go";
+ this->pkgpath_ = this->prefix_ + '.' + package_name;
+ this->pkgpath_set_ = true;
+ }
- this->package_ = this->register_package(package_name, this->unique_prefix_,
- location);
+ this->pkgpath_symbol_ = Gogo::pkgpath_for_symbol(this->pkgpath_);
- // We used to permit people to qualify symbols with the current
- // package name (e.g., P.x), but we no longer do.
- // this->globals_->add_package(package_name, this->package_);
+ this->package_ = this->register_package(this->pkgpath_, location);
+ this->package_->set_package_name(package_name, location);
if (this->is_main_package())
{
@@ -286,12 +362,14 @@ Gogo::set_package_name(const std::string& package_name,
}
// Return whether this is the "main" package. This is not true if
-// -fgo-prefix was used.
+// -fgo-pkgpath or -fgo-prefix was used.
bool
Gogo::is_main_package() const
{
- return this->package_name() == "main" && !this->unique_prefix_specified_;
+ return (this->package_name() == "main"
+ && !this->pkgpath_from_option_
+ && !this->prefix_from_option_);
}
// Import a package.
@@ -318,7 +396,8 @@ Gogo::import_package(const std::string& filename,
bool is_ln_exported = is_local_name_exported;
if (ln.empty())
{
- ln = package->name();
+ ln = package->package_name();
+ go_assert(!ln.empty());
is_ln_exported = Lex::is_exported_name(ln);
}
if (ln == ".")
@@ -352,11 +431,10 @@ Gogo::import_package(const std::string& filename,
Package* package = imp.import(this, local_name, is_local_name_exported);
if (package != NULL)
{
- if (package->name() == this->package_name()
- && package->unique_prefix() == this->unique_prefix())
+ if (package->pkgpath() == this->pkgpath())
error_at(location,
- ("imported package uses same package name and prefix "
- "as package being compiled (see -fgo-prefix option)"));
+ ("imported package uses same package path as package "
+ "being compiled (see -fgo-pkgpath option)"));
this->imports_.insert(std::make_pair(filename, package));
package->set_is_imported();
@@ -509,38 +587,21 @@ Package*
Gogo::add_imported_package(const std::string& real_name,
const std::string& alias_arg,
bool is_alias_exported,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
Location location,
bool* padd_to_globals)
{
- // FIXME: Now that we compile packages as a whole, should we permit
- // importing the current package?
- if (this->package_name() == real_name
- && this->unique_prefix() == unique_prefix)
- {
- *padd_to_globals = false;
- if (!alias_arg.empty() && alias_arg != ".")
- {
- std::string alias = this->pack_hidden_name(alias_arg,
- is_alias_exported);
- this->package_->bindings()->add_package(alias, this->package_);
- }
- return this->package_;
- }
- else if (alias_arg == ".")
- {
- *padd_to_globals = true;
- return this->register_package(real_name, unique_prefix, location);
- }
+ Package* ret = this->register_package(pkgpath, location);
+ ret->set_package_name(real_name, location);
+
+ *padd_to_globals = false;
+
+ if (alias_arg == ".")
+ *padd_to_globals = true;
else if (alias_arg == "_")
- {
- Package* ret = this->register_package(real_name, unique_prefix, location);
- ret->set_uses_sink_alias();
- return ret;
- }
+ ret->set_uses_sink_alias();
else
{
- *padd_to_globals = false;
std::string alias = alias_arg;
if (alias.empty())
{
@@ -548,57 +609,37 @@ Gogo::add_imported_package(const std::string& real_name,
is_alias_exported = Lex::is_exported_name(alias);
}
alias = this->pack_hidden_name(alias, is_alias_exported);
- Named_object* no = this->add_package(real_name, alias, unique_prefix,
- location);
+ Named_object* no = this->package_->bindings()->add_package(alias, ret);
if (!no->is_package())
return NULL;
- return no->package_value();
}
-}
-// Add a package.
-
-Named_object*
-Gogo::add_package(const std::string& real_name, const std::string& alias,
- const std::string& unique_prefix, Location location)
-{
- go_assert(this->in_global_scope());
-
- // Register the package. Note that we might have already seen it in
- // an earlier import.
- Package* package = this->register_package(real_name, unique_prefix, location);
-
- return this->package_->bindings()->add_package(alias, package);
+ return ret;
}
// Register a package. This package may or may not be imported. This
// returns the Package structure for the package, creating if it
-// necessary.
+// necessary. LOCATION is the location of the import statement that
+// led us to see this package.
Package*
-Gogo::register_package(const std::string& package_name,
- const std::string& unique_prefix,
- Location location)
+Gogo::register_package(const std::string& pkgpath, Location location)
{
- go_assert(!unique_prefix.empty() && !package_name.empty());
- std::string name = unique_prefix + '.' + package_name;
Package* package = NULL;
std::pair<Packages::iterator, bool> ins =
- this->packages_.insert(std::make_pair(name, package));
+ this->packages_.insert(std::make_pair(pkgpath, package));
if (!ins.second)
{
// We have seen this package name before.
package = ins.first->second;
- go_assert(package != NULL);
- go_assert(package->name() == package_name
- && package->unique_prefix() == unique_prefix);
+ go_assert(package != NULL && package->pkgpath() == pkgpath);
if (Linemap::is_unknown_location(package->location()))
package->set_location(location);
}
else
{
// First time we have seen this package name.
- package = new Package(package_name, unique_prefix, location);
+ package = new Package(pkgpath, location);
go_assert(ins.first->second == NULL);
ins.first->second = package;
}
@@ -1150,7 +1191,7 @@ Gogo::clear_file_scope()
&& !package->uses_sink_alias()
&& !saw_errors())
error_at(package->location(), "imported and not used: %s",
- Gogo::message_name(package->name()).c_str());
+ Gogo::message_name(package->package_name()).c_str());
package->clear_is_imported();
package->clear_uses_sink_alias();
package->clear_used();
@@ -2821,27 +2862,6 @@ Gogo::check_return_statements()
this->traverse(&traverse);
}
-// Get the unique prefix to use before all exported symbols. This
-// must be unique across the entire link.
-
-const std::string&
-Gogo::unique_prefix() const
-{
- go_assert(!this->unique_prefix_.empty());
- return this->unique_prefix_;
-}
-
-// Set the unique prefix to use before all exported symbols. This
-// comes from the command line option -fgo-prefix=XXX.
-
-void
-Gogo::set_unique_prefix(const std::string& arg)
-{
- go_assert(this->unique_prefix_.empty());
- this->unique_prefix_ = arg;
- this->unique_prefix_specified_ = true;
-}
-
// Work out the package priority. It is one more than the maximum
// priority of an imported package.
@@ -2869,7 +2889,7 @@ Gogo::do_exports()
Export exp(&stream);
exp.register_builtin_types(this);
exp.export_globals(this->package_name(),
- this->unique_prefix(),
+ this->pkgpath(),
this->package_priority(),
this->imports_,
(this->need_init_fn_ && !this->is_main_package()
@@ -3820,6 +3840,10 @@ void
Variable::lower_init_expression(Gogo* gogo, Named_object* function,
Statement_inserter* inserter)
{
+ Named_object* dep = gogo->var_depends_on(this);
+ if (dep != NULL && dep->is_variable())
+ dep->var_value()->lower_init_expression(gogo, function, inserter);
+
if (this->init_ != NULL && !this->init_is_lowered_)
{
if (this->seen_)
@@ -4194,10 +4218,10 @@ Variable::get_backend_variable(Gogo* gogo, Named_object* function,
if (this->is_global_)
bvar = backend->global_variable((package == NULL
? gogo->package_name()
- : package->name()),
+ : package->package_name()),
(package == NULL
- ? gogo->unique_prefix()
- : package->unique_prefix()),
+ ? gogo->pkgpath_symbol()
+ : package->pkgpath_symbol()),
n,
btype,
package != NULL,
@@ -4551,7 +4575,12 @@ Named_object::message_name() const
{
if (this->package_ == NULL)
return Gogo::message_name(this->name_);
- std::string ret = Gogo::message_name(this->package_->name());
+ std::string ret;
+ if (this->package_->has_package_name())
+ ret = this->package_->package_name();
+ else
+ ret = this->package_->pkgpath();
+ ret = Gogo::message_name(ret);
ret += '.';
ret += Gogo::message_name(this->name_);
return ret;
@@ -4941,11 +4970,6 @@ Bindings::new_definition(Named_object* old_object, Named_object* new_object)
break;
case Named_object::NAMED_OBJECT_PACKAGE:
- if (new_object->is_package()
- && (old_object->package_value()->name()
- == new_object->package_value()->name()))
- return old_object;
-
break;
}
@@ -5213,13 +5237,29 @@ Unnamed_label::get_goto(Translate_context* context, Location location)
// Class Package.
-Package::Package(const std::string& name, const std::string& unique_prefix,
- Location location)
- : name_(name), unique_prefix_(unique_prefix), bindings_(new Bindings(NULL)),
- priority_(0), location_(location), used_(false), is_imported_(false),
+Package::Package(const std::string& pkgpath, Location location)
+ : pkgpath_(pkgpath), pkgpath_symbol_(Gogo::pkgpath_for_symbol(pkgpath)),
+ package_name_(), bindings_(new Bindings(NULL)), priority_(0),
+ location_(location), used_(false), is_imported_(false),
uses_sink_alias_(false)
{
- go_assert(!name.empty() && !unique_prefix.empty());
+ go_assert(!pkgpath.empty());
+
+}
+
+// Set the package name.
+
+void
+Package::set_package_name(const std::string& package_name, Location location)
+{
+ go_assert(!package_name.empty());
+ if (this->package_name_.empty())
+ this->package_name_ = package_name;
+ else if (this->package_name_ != package_name)
+ error_at(location,
+ "saw two different packages with the same package path %s: %s, %s",
+ this->pkgpath_.c_str(), this->package_name_.c_str(),
+ package_name.c_str());
}
// Set the priority. We may see multiple priorities for an imported
diff --git a/gcc/go/gofrontend/gogo.h b/gcc/go/gofrontend/gogo.h
index 9c5f8cb4a6b..deb9968e84f 100644
--- a/gcc/go/gofrontend/gogo.h
+++ b/gcc/go/gofrontend/gogo.h
@@ -138,16 +138,14 @@ class Gogo
is_main_package() const;
// If necessary, adjust the name to use for a hidden symbol. We add
- // a prefix of the package name, so that hidden symbols in different
- // packages do not collide.
+ // the package name, so that hidden symbols in different packages do
+ // not collide.
std::string
pack_hidden_name(const std::string& name, bool is_exported) const
{
return (is_exported
? name
- : ('.' + this->unique_prefix()
- + '.' + this->package_name()
- + '.' + name));
+ : '.' + this->pkgpath() + '.' + name);
}
// Unpack a name which may have been hidden. Returns the
@@ -161,9 +159,9 @@ class Gogo
is_hidden_name(const std::string& name)
{ return name[0] == '.'; }
- // Return the package prefix of a hidden name.
+ // Return the package path of a hidden name.
static std::string
- hidden_name_prefix(const std::string& name)
+ hidden_name_pkgpath(const std::string& name)
{
go_assert(Gogo::is_hidden_name(name));
return name.substr(1, name.rfind('.') - 1);
@@ -183,13 +181,30 @@ class Gogo
&& name[name.length() - 2] == '.');
}
- // Return the unique prefix to use for all exported symbols.
+ // Convert a pkgpath into a string suitable for a symbol
+ static std::string
+ pkgpath_for_symbol(const std::string& pkgpath);
+
+ // Return the package path to use for reflect.Type.PkgPath.
+ const std::string&
+ pkgpath() const;
+
+ // Return the package path to use for a symbol name.
const std::string&
- unique_prefix() const;
+ pkgpath_symbol() const;
- // Set the unique prefix.
+ // Set the package path from a command line option.
void
- set_unique_prefix(const std::string&);
+ set_pkgpath(const std::string&);
+
+ // Set the prefix from a command line option.
+ void
+ set_prefix(const std::string&);
+
+ // Return whether pkgpath was set from a command line option.
+ bool
+ pkgpath_from_option() const
+ { return this->pkgpath_from_option_; }
// Return the priority to use for the package we are compiling.
// This is two more than the largest priority of any package we
@@ -229,7 +244,7 @@ class Gogo
Package*
add_imported_package(const std::string& real_name, const std::string& alias,
bool is_alias_exported,
- const std::string& unique_prefix,
+ const std::string& pkgpath,
Location location,
bool* padd_to_globals);
@@ -237,8 +252,7 @@ class Gogo
// This returns the Package structure for the package, creating if
// it necessary.
Package*
- register_package(const std::string& name, const std::string& unique_prefix,
- Location);
+ register_package(const std::string& pkgpath, Location);
// Start compiling a function. ADD_METHOD_TO_TYPE is true if a
// method function should be added to the type of its receiver.
@@ -384,6 +398,23 @@ class Gogo
void
clear_file_scope();
+ // Record that VAR1 must be initialized after VAR2. This is used
+ // when VAR2 does not appear in VAR1's INIT or PREINIT.
+ void
+ record_var_depends_on(Variable* var1, Named_object* var2)
+ {
+ go_assert(this->var_deps_.find(var1) == this->var_deps_.end());
+ this->var_deps_[var1] = var2;
+ }
+
+ // Return the variable that VAR depends on, or NULL if none.
+ Named_object*
+ var_depends_on(Variable* var) const
+ {
+ Var_deps::const_iterator p = this->var_deps_.find(var);
+ return p != this->var_deps_.end() ? p->second : NULL;
+ }
+
// Queue up a type-specific function to be written out. This is
// used when a type-specific function is needed when not at the top
// level.
@@ -592,11 +623,6 @@ class Gogo
void
import_unsafe(const std::string&, bool is_exported, Location);
- // Add a new imported package.
- Named_object*
- add_package(const std::string& real_name, const std::string& alias,
- const std::string& unique_prefix, Location location);
-
// Return the current binding contour.
Bindings*
current_bindings();
@@ -639,8 +665,9 @@ class Gogo
// Type used to map package names to packages.
typedef std::map<std::string, Package*> Packages;
- // Type used to map special names in the sys package.
- typedef std::map<std::string, std::string> Sys_names;
+ // Type used to map variables to the function calls that set them.
+ // This is used for initialization dependency analysis.
+ typedef std::map<Variable*, Named_object*> Var_deps;
// Type used to queue writing a type specific function.
struct Specific_type_function
@@ -683,16 +710,28 @@ class Gogo
Packages packages_;
// The functions named "init", if there are any.
std::vector<Named_object*> init_functions_;
+ // A mapping from variables to the function calls that initialize
+ // them, if it is not stored in the variable's init or preinit.
+ // This is used for dependency analysis.
+ Var_deps var_deps_;
// Whether we need a magic initialization function.
bool need_init_fn_;
// The name of the magic initialization function.
std::string init_fn_name_;
// A list of import control variables for packages that we import.
std::set<Import_init> imported_init_fns_;
- // The unique prefix used for all global symbols.
- std::string unique_prefix_;
- // Whether an explicit unique prefix was set by -fgo-prefix.
- bool unique_prefix_specified_;
+ // The package path used for reflection data.
+ std::string pkgpath_;
+ // The package path to use for a symbol name.
+ std::string pkgpath_symbol_;
+ // The prefix to use for symbols, from the -fgo-prefix option.
+ std::string prefix_;
+ // Whether pkgpath_ has been set.
+ bool pkgpath_set_;
+ // Whether an explicit package path was set by -fgo-pkgpath.
+ bool pkgpath_from_option_;
+ // Whether an explicit prefix was set by -fgo-prefix.
+ bool prefix_from_option_;
// A list of types to verify.
std::vector<Type*> verify_types_;
// A list of interface types defined while parsing.
@@ -2387,28 +2426,37 @@ class Unnamed_label
class Package
{
public:
- Package(const std::string& name, const std::string& unique_prefix,
- Location location);
+ Package(const std::string& pkgpath, Location location);
- // The real name of this package. This may be different from the
- // name in the associated Named_object if the import statement used
- // an alias.
+ // Get the package path used for all symbols exported from this
+ // package.
const std::string&
- name() const
- { return this->name_; }
+ pkgpath() const
+ { return this->pkgpath_; }
+
+ // Return the package path to use for a symbol name.
+ const std::string&
+ pkgpath_symbol() const
+ { return this->pkgpath_symbol_; }
// Return the location of the import statement.
Location
location() const
{ return this->location_; }
- // Get the unique prefix used for all symbols exported from this
- // package.
+ // Return whether we know the name of this package yet.
+ bool
+ has_package_name() const
+ { return !this->package_name_.empty(); }
+
+ // The name that this package uses in its package clause. This may
+ // be different from the name in the associated Named_object if the
+ // import statement used an alias.
const std::string&
- unique_prefix() const
+ package_name() const
{
- go_assert(!this->unique_prefix_.empty());
- return this->unique_prefix_;
+ go_assert(!this->package_name_.empty());
+ return this->package_name_;
}
// The priority of this package. The init function of packages with
@@ -2478,8 +2526,12 @@ class Package
lookup(const std::string& name) const
{ return this->bindings_->lookup(name); }
- // Set the location of the package. This is used if it is seen in a
- // different import before it is really imported.
+ // Set the name of the package.
+ void
+ set_package_name(const std::string& name, Location);
+
+ // Set the location of the package. This is used to record the most
+ // recent import location.
void
set_location(Location location)
{ this->location_ = location; }
@@ -2515,10 +2567,13 @@ class Package
determine_types();
private:
- // The real name of this package.
- std::string name_;
- // The unique prefix for all exported global symbols.
- std::string unique_prefix_;
+ // The package path for type reflection data.
+ std::string pkgpath_;
+ // The package path for symbol names.
+ std::string pkgpath_symbol_;
+ // The name that this package uses in the package clause. This may
+ // be the empty string if it is not yet known.
+ std::string package_name_;
// The names in this package.
Bindings* bindings_;
// The priority of this package. A package has a priority higher
diff --git a/gcc/go/gofrontend/import.cc b/gcc/go/gofrontend/import.cc
index 58b0355c6c6..9febf231897 100644
--- a/gcc/go/gofrontend/import.cc
+++ b/gcc/go/gofrontend/import.cc
@@ -281,13 +281,24 @@ Import::import(Gogo* gogo, const std::string& local_name,
std::string package_name = this->read_identifier();
this->require_c_string(";\n");
- this->require_c_string("prefix ");
- std::string unique_prefix = this->read_identifier();
- this->require_c_string(";\n");
+ std::string pkgpath;
+ if (this->match_c_string("prefix "))
+ {
+ this->advance(7);
+ std::string unique_prefix = this->read_identifier();
+ this->require_c_string(";\n");
+ pkgpath = unique_prefix + '.' + package_name;
+ }
+ else
+ {
+ this->require_c_string("pkgpath ");
+ pkgpath = this->read_identifier();
+ this->require_c_string(";\n");
+ }
this->package_ = gogo->add_imported_package(package_name, local_name,
is_local_name_exported,
- unique_prefix,
+ pkgpath,
this->location_,
&this->add_to_globals_);
if (this->package_ == NULL)
@@ -353,10 +364,18 @@ void
Import::read_one_import()
{
this->require_c_string("import ");
+ std::string package_name = this->read_identifier();
+ this->require_c_string(" ");
+ std::string pkgpath = this->read_identifier();
+ this->require_c_string(" \"");
Stream* stream = this->stream_;
- while (stream->peek_char() != ';')
+ while (stream->peek_char() != '"')
stream->advance(1);
- this->require_c_string(";\n");
+ this->require_c_string("\";\n");
+
+ Package* p = this->gogo_->register_package(pkgpath,
+ Linemap::unknown_location());
+ p->set_package_name(package_name, this->location());
}
// Read the list of import control functions.
@@ -572,55 +591,50 @@ Import::read_type()
while ((c = stream->get_char()) != '"')
type_name += c;
- // If this type is in the current package, the name will be
- // .PREFIX.PACKAGE.NAME or simply NAME with no dots. Otherwise, a
- // non-hidden symbol will be PREFIX.PACKAGE.NAME and a hidden symbol
- // will be .PREFIX.PACKAGE.NAME.
- std::string package_name;
- std::string unique_prefix;
+ // If this type is in the package we are currently importing, the
+ // name will be .PKGPATH.NAME or simply NAME with no dots.
+ // Otherwise, a non-hidden symbol will be PKGPATH.NAME and a hidden
+ // symbol will be .PKGPATH.NAME.
+ std::string pkgpath;
if (type_name.find('.') != std::string::npos)
{
- bool is_hidden = false;
size_t start = 0;
if (type_name[0] == '.')
- {
- ++start;
- is_hidden = true;
- }
- size_t dot1 = type_name.find('.', start);
- size_t dot2;
- if (dot1 == std::string::npos)
- dot2 = std::string::npos;
- else
- dot2 = type_name.find('.', dot1 + 1);
- if (dot1 == std::string::npos || dot2 == std::string::npos)
- {
- error_at(this->location_,
- ("error at import data at %d: missing dot in type name"),
- stream->pos());
- stream->set_saw_error();
- }
- else
- {
- unique_prefix = type_name.substr(start, dot1 - start);
- package_name = type_name.substr(dot1 + 1, dot2 - (dot1 + 1));
- }
- if (!is_hidden)
- type_name.erase(0, dot2 + 1);
+ start = 1;
+ size_t dot = type_name.rfind('.');
+ pkgpath = type_name.substr(start, dot - start);
+ if (type_name[0] != '.')
+ type_name.erase(0, dot + 1);
}
this->require_c_string(" ");
+ // The package name may follow. This is the name of the package in
+ // the package clause of that package. The type name will include
+ // the pkgpath, which may be different.
+ std::string package_name;
+ if (stream->peek_char() == '"')
+ {
+ stream->advance(1);
+ while ((c = stream->get_char()) != '"')
+ package_name += c;
+ this->require_c_string(" ");
+ }
+
// Declare the type in the appropriate package. If we haven't seen
// it before, mark it as invisible. We declare it before we read
// the actual definition of the type, since the definition may refer
// to the type itself.
Package* package;
- if (package_name.empty())
+ if (pkgpath.empty() || pkgpath == this->gogo_->pkgpath())
package = this->package_;
else
- package = this->gogo_->register_package(package_name, unique_prefix,
- Linemap::unknown_location());
+ {
+ package = this->gogo_->register_package(pkgpath,
+ Linemap::unknown_location());
+ if (!package_name.empty())
+ package->set_package_name(package_name, this->location());
+ }
Named_object* no = package->bindings()->lookup(type_name);
if (no == NULL)
@@ -628,8 +642,7 @@ Import::read_type()
else if (!no->is_type_declaration() && !no->is_type())
{
error_at(this->location_, "imported %<%s.%s%> both type and non-type",
- Gogo::message_name(package->name()).c_str(),
- Gogo::message_name(type_name).c_str());
+ pkgpath.c_str(), Gogo::message_name(type_name).c_str());
stream->set_saw_error();
return Type::make_error_type();
}
@@ -669,6 +682,9 @@ Import::read_type()
// This type has not yet been imported.
ntype->clear_is_visible();
+ if (!type->is_undefined() && type->interface_type() != NULL)
+ this->gogo_->record_interface_type(type->interface_type());
+
type = ntype;
}
else if (no->is_type())
@@ -769,9 +785,7 @@ Import::read_name()
if (ret == "?")
ret.clear();
else if (!Lex::is_exported_name(ret))
- ret = ('.' + this->package_->unique_prefix()
- + '.' + this->package_->name()
- + '.' + ret);
+ ret = '.' + this->package_->pkgpath() + '.' + ret;
return ret;
}
diff --git a/gcc/go/gofrontend/lex.cc b/gcc/go/gofrontend/lex.cc
index 53618fc72ca..5b7ce6869e6 100644
--- a/gcc/go/gofrontend/lex.cc
+++ b/gcc/go/gofrontend/lex.cc
@@ -1012,7 +1012,9 @@ Lex::gather_number()
}
}
- if (*p != '.' && *p != 'i' && !Lex::could_be_exponent(p, pend))
+ // A partial token that looks like an octal literal might actually be the
+ // beginning of a floating-point or imaginary literal.
+ if (base == 16 || (*p != '.' && *p != 'i' && !Lex::could_be_exponent(p, pend)))
{
std::string s(pnum, p - pnum);
mpz_t val;
diff --git a/gcc/go/gofrontend/parse.cc b/gcc/go/gofrontend/parse.cc
index 7a567a1cd14..29323f05c6c 100644
--- a/gcc/go/gofrontend/parse.cc
+++ b/gcc/go/gofrontend/parse.cc
@@ -126,18 +126,22 @@ Parse::identifier_list(Typed_identifier_list* til)
// ExpressionList = Expression { "," Expression } .
+// If MAY_BE_COMPOSITE_LIT is true, an expression may be a composite
+// literal.
+
// If MAY_BE_SINK is true, the expressions in the list may be "_".
Expression_list*
-Parse::expression_list(Expression* first, bool may_be_sink)
+Parse::expression_list(Expression* first, bool may_be_sink,
+ bool may_be_composite_lit)
{
Expression_list* ret = new Expression_list();
if (first != NULL)
ret->push_back(first);
while (true)
{
- ret->push_back(this->expression(PRECEDENCE_NORMAL, may_be_sink, true,
- NULL));
+ ret->push_back(this->expression(PRECEDENCE_NORMAL, may_be_sink,
+ may_be_composite_lit, NULL));
const Token* token = this->peek_token();
if (!token->is_op(OPERATOR_COMMA))
@@ -319,13 +323,13 @@ Parse::type_name(bool issue_error)
&& package->name() != this->gogo_->package_name())
{
// Check whether the name is there but hidden.
- std::string s = ('.' + package->package_value()->unique_prefix()
- + '.' + package->package_value()->name()
+ std::string s = ('.' + package->package_value()->pkgpath()
+ '.' + name);
named_object = package->package_value()->lookup(s);
if (named_object != NULL)
{
- const std::string& packname(package->package_value()->name());
+ Package* p = package->package_value();
+ const std::string& packname(p->package_name());
error_at(location, "invalid reference to hidden type %<%s.%s%>",
Gogo::message_name(packname).c_str(),
Gogo::message_name(name).c_str());
@@ -341,7 +345,7 @@ Parse::type_name(bool issue_error)
named_object = this->gogo_->add_unknown_name(name, location);
else
{
- const std::string& packname(package->package_value()->name());
+ const std::string& packname(package->package_value()->package_name());
error_at(location, "reference to undefined identifier %<%s.%s%>",
Gogo::message_name(packname).c_str(),
Gogo::message_name(name).c_str());
@@ -1425,7 +1429,7 @@ Parse::const_spec(Type** last_type, Expression_list** last_expr_list)
else
{
this->advance_token();
- expr_list = this->expression_list(NULL, false);
+ expr_list = this->expression_list(NULL, false, true);
*last_type = type;
if (*last_expr_list != NULL)
delete *last_expr_list;
@@ -1575,13 +1579,13 @@ Parse::var_spec(void*)
if (this->peek_token()->is_op(OPERATOR_EQ))
{
this->advance_token();
- init = this->expression_list(NULL, false);
+ init = this->expression_list(NULL, false, true);
}
}
else
{
this->advance_token();
- init = this->expression_list(NULL, false);
+ init = this->expression_list(NULL, false, true);
}
this->init_vars(&til, type, init, false, location);
@@ -1667,6 +1671,7 @@ Parse::init_vars_from_call(const Typed_identifier_list* vars, Type* type,
// the right number of values, but it might. Declare the variables,
// and then assign the results of the call to them.
+ Named_object* first_var = NULL;
unsigned int index = 0;
bool any_new = false;
for (Typed_identifier_list::const_iterator pv = vars->begin();
@@ -1674,7 +1679,22 @@ Parse::init_vars_from_call(const Typed_identifier_list* vars, Type* type,
++pv, ++index)
{
Expression* init = Expression::make_call_result(call, index);
- this->init_var(*pv, type, init, is_coloneq, false, &any_new);
+ Named_object* no = this->init_var(*pv, type, init, is_coloneq, false,
+ &any_new);
+
+ if (this->gogo_->in_global_scope() && no->is_variable())
+ {
+ if (first_var == NULL)
+ first_var = no;
+ else
+ {
+ // The subsequent vars have an implicit dependency on
+ // the first one, so that everything gets initialized in
+ // the right order and so that we detect cycles
+ // correctly.
+ this->gogo_->record_var_depends_on(no->var_value(), first_var);
+ }
+ }
}
if (is_coloneq && !any_new)
@@ -1972,6 +1992,9 @@ Parse::create_dummy_global(Type* type, Expression* init,
// In order to support both "a, b := 1, 0" and "a, b = 1, 0" we accept
// tuple assignments here as well.
+// If MAY_BE_COMPOSITE_LIT is true, the expression on the right hand
+// side may be a composite literal.
+
// If P_RANGE_CLAUSE is not NULL, then this will recognize a
// RangeClause.
@@ -1981,6 +2004,7 @@ Parse::create_dummy_global(Type* type, Expression* init,
void
Parse::simple_var_decl_or_assignment(const std::string& name,
Location location,
+ bool may_be_composite_lit,
Range_clause* p_range_clause,
Type_switch* p_type_switch)
{
@@ -2037,14 +2061,15 @@ Parse::simple_var_decl_or_assignment(const std::string& name,
exprs->push_back(this->id_to_expression(p->name(),
p->location()));
- Expression_list* more_exprs = this->expression_list(NULL, true);
+ Expression_list* more_exprs =
+ this->expression_list(NULL, true, may_be_composite_lit);
for (Expression_list::const_iterator p = more_exprs->begin();
p != more_exprs->end();
++p)
exprs->push_back(*p);
delete more_exprs;
- this->tuple_assignment(exprs, p_range_clause);
+ this->tuple_assignment(exprs, may_be_composite_lit, p_range_clause);
return;
}
}
@@ -2060,11 +2085,12 @@ Parse::simple_var_decl_or_assignment(const std::string& name,
Expression_list* init;
if (p_type_switch == NULL)
- init = this->expression_list(NULL, false);
+ init = this->expression_list(NULL, false, may_be_composite_lit);
else
{
bool is_type_switch = false;
- Expression* expr = this->expression(PRECEDENCE_NORMAL, false, true,
+ Expression* expr = this->expression(PRECEDENCE_NORMAL, false,
+ may_be_composite_lit,
&is_type_switch);
if (is_type_switch)
{
@@ -2083,7 +2109,7 @@ Parse::simple_var_decl_or_assignment(const std::string& name,
else
{
this->advance_token();
- init = this->expression_list(expr, false);
+ init = this->expression_list(expr, false, may_be_composite_lit);
}
}
@@ -2358,7 +2384,7 @@ Parse::operand(bool may_be_sink)
{
go_assert(package != NULL);
error_at(location, "invalid reference to hidden type %<%s.%s%>",
- Gogo::message_name(package->name()).c_str(),
+ Gogo::message_name(package->package_name()).c_str(),
Gogo::message_name(id).c_str());
return Expression::make_error(location);
}
@@ -2368,7 +2394,7 @@ Parse::operand(bool may_be_sink)
{
if (package != NULL)
{
- std::string n1 = Gogo::message_name(package->name());
+ std::string n1 = Gogo::message_name(package->package_name());
std::string n2 = Gogo::message_name(id);
if (!is_exported)
error_at(location,
@@ -3049,7 +3075,7 @@ Parse::call(Expression* func)
const Token* token = this->advance_token();
if (!token->is_op(OPERATOR_RPAREN))
{
- args = this->expression_list(NULL, false);
+ args = this->expression_list(NULL, false, true);
token = this->peek_token();
if (token->is_op(OPERATOR_ELLIPSIS))
{
@@ -3562,6 +3588,7 @@ Parse::simple_stat(bool may_be_composite_lit, bool* return_exp,
{
identifier = this->gogo_->pack_hidden_name(identifier, is_exported);
this->simple_var_decl_or_assignment(identifier, location,
+ may_be_composite_lit,
p_range_clause,
(token->is_op(OPERATOR_COLONEQ)
? p_type_switch
@@ -3597,7 +3624,7 @@ Parse::simple_stat(bool may_be_composite_lit, bool* return_exp,
this->inc_dec_stat(this->verify_not_sink(exp));
else if (token->is_op(OPERATOR_COMMA)
|| token->is_op(OPERATOR_EQ))
- this->assignment(exp, p_range_clause);
+ this->assignment(exp, may_be_composite_lit, p_range_clause);
else if (token->is_op(OPERATOR_PLUSEQ)
|| token->is_op(OPERATOR_MINUSEQ)
|| token->is_op(OPERATOR_OREQ)
@@ -3609,7 +3636,8 @@ Parse::simple_stat(bool may_be_composite_lit, bool* return_exp,
|| token->is_op(OPERATOR_RSHIFTEQ)
|| token->is_op(OPERATOR_ANDEQ)
|| token->is_op(OPERATOR_BITCLEAREQ))
- this->assignment(this->verify_not_sink(exp), p_range_clause);
+ this->assignment(this->verify_not_sink(exp), may_be_composite_lit,
+ p_range_clause);
else if (return_exp != NULL)
return this->verify_not_sink(exp);
else
@@ -3715,11 +3743,15 @@ Parse::inc_dec_stat(Expression* exp)
// EXP is an expression that we have already parsed.
+// If MAY_BE_COMPOSITE_LIT is true, an expression on the right hand
+// side may be a composite literal.
+
// If RANGE_CLAUSE is not NULL, then this will recognize a
// RangeClause.
void
-Parse::assignment(Expression* expr, Range_clause* p_range_clause)
+Parse::assignment(Expression* expr, bool may_be_composite_lit,
+ Range_clause* p_range_clause)
{
Expression_list* vars;
if (!this->peek_token()->is_op(OPERATOR_COMMA))
@@ -3730,20 +3762,24 @@ Parse::assignment(Expression* expr, Range_clause* p_range_clause)
else
{
this->advance_token();
- vars = this->expression_list(expr, true);
+ vars = this->expression_list(expr, true, may_be_composite_lit);
}
- this->tuple_assignment(vars, p_range_clause);
+ this->tuple_assignment(vars, may_be_composite_lit, p_range_clause);
}
// An assignment statement. LHS is the list of expressions which
// appear on the left hand side.
+// If MAY_BE_COMPOSITE_LIT is true, an expression on the right hand
+// side may be a composite literal.
+
// If RANGE_CLAUSE is not NULL, then this will recognize a
// RangeClause.
void
-Parse::tuple_assignment(Expression_list* lhs, Range_clause* p_range_clause)
+Parse::tuple_assignment(Expression_list* lhs, bool may_be_composite_lit,
+ Range_clause* p_range_clause)
{
const Token* token = this->peek_token();
if (!token->is_op(OPERATOR_EQ)
@@ -3775,7 +3811,8 @@ Parse::tuple_assignment(Expression_list* lhs, Range_clause* p_range_clause)
return;
}
- Expression_list* vals = this->expression_list(NULL, false);
+ Expression_list* vals = this->expression_list(NULL, false,
+ may_be_composite_lit);
// We've parsed everything; check for errors.
if (lhs == NULL || vals == NULL)
@@ -3944,7 +3981,7 @@ Parse::return_stat()
this->advance_token();
Expression_list* vals = NULL;
if (this->expression_may_start_here())
- vals = this->expression_list(NULL, false);
+ vals = this->expression_list(NULL, false, true);
this->gogo_->add_statement(Statement::make_return_statement(vals, location));
if (vals == NULL
@@ -4305,7 +4342,7 @@ Parse::expr_switch_case(bool* is_default)
if (token->is_keyword(KEYWORD_CASE))
{
this->advance_token();
- return this->expression_list(NULL, false);
+ return this->expression_list(NULL, false, true);
}
else if (token->is_keyword(KEYWORD_DEFAULT))
{
diff --git a/gcc/go/gofrontend/parse.h b/gcc/go/gofrontend/parse.h
index a838e4bcbbf..3139f7e8908 100644
--- a/gcc/go/gofrontend/parse.h
+++ b/gcc/go/gofrontend/parse.h
@@ -162,7 +162,8 @@ class Parse
// Parser nonterminals.
void identifier_list(Typed_identifier_list*);
- Expression_list* expression_list(Expression*, bool may_be_sink);
+ Expression_list* expression_list(Expression*, bool may_be_sink,
+ bool may_be_composite_lit);
bool qualified_ident(std::string*, Named_object**);
Type* type();
bool type_may_start_here();
@@ -207,6 +208,7 @@ class Parse
bool is_coloneq, bool type_from_init, bool* is_new);
Named_object* create_dummy_global(Type*, Expression*, Location);
void simple_var_decl_or_assignment(const std::string&, Location,
+ bool may_be_composite_lit,
Range_clause*, Type_switch*);
void function_decl();
Typed_identifier* receiver();
@@ -239,8 +241,9 @@ class Parse
void expression_stat(Expression*);
void send_stmt(Expression*);
void inc_dec_stat(Expression*);
- void assignment(Expression*, Range_clause*);
- void tuple_assignment(Expression_list*, Range_clause*);
+ void assignment(Expression*, bool may_be_composite_lit, Range_clause*);
+ void tuple_assignment(Expression_list*, bool may_be_composite_lit,
+ Range_clause*);
void send();
void go_or_defer_stat();
void return_stat();
diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc
index 74bab411ac0..35770c76572 100644
--- a/gcc/go/gofrontend/types.cc
+++ b/gcc/go/gofrontend/types.cc
@@ -1301,15 +1301,10 @@ Type::type_descriptor_var_name(Gogo* gogo, Named_type* nt)
go_assert(in_function == NULL);
else
{
- const std::string& unique_prefix(no->package() == NULL
- ? gogo->unique_prefix()
- : no->package()->unique_prefix());
- const std::string& package_name(no->package() == NULL
- ? gogo->package_name()
- : no->package()->name());
- ret.append(unique_prefix);
- ret.append(1, '.');
- ret.append(package_name);
+ const std::string& pkgpath(no->package() == NULL
+ ? gogo->pkgpath_symbol()
+ : no->package()->pkgpath_symbol());
+ ret.append(pkgpath);
ret.append(1, '.');
if (in_function != NULL)
{
@@ -1317,7 +1312,20 @@ Type::type_descriptor_var_name(Gogo* gogo, Named_type* nt)
ret.append(1, '.');
}
}
- ret.append(no->name());
+
+ // FIXME: This adds in pkgpath twice for hidden symbols, which is
+ // pointless.
+ const std::string& name(no->name());
+ if (!Gogo::is_hidden_name(name))
+ ret.append(name);
+ else
+ {
+ ret.append(1, '.');
+ ret.append(Gogo::pkgpath_for_symbol(Gogo::hidden_name_pkgpath(name)));
+ ret.append(1, '.');
+ ret.append(Gogo::unpack_hidden_name(name));
+ }
+
return ret;
}
@@ -1977,15 +1985,10 @@ Type::uncommon_type_constructor(Gogo* gogo, Type* uncommon_type,
else
{
const Package* package = no->package();
- const std::string& unique_prefix(package == NULL
- ? gogo->unique_prefix()
- : package->unique_prefix());
- const std::string& package_name(package == NULL
- ? gogo->package_name()
- : package->name());
- n.assign(unique_prefix);
- n.append(1, '.');
- n.append(package_name);
+ const std::string& pkgpath(package == NULL
+ ? gogo->pkgpath()
+ : package->pkgpath());
+ n.assign(pkgpath);
if (name->in_function() != NULL)
{
n.append(1, '.');
@@ -2096,7 +2099,8 @@ Type::method_constructor(Gogo*, Type* method_type,
vals->push_back(Expression::make_nil(bloc));
else
{
- s = Expression::make_string(Gogo::hidden_name_prefix(method_name), bloc);
+ s = Expression::make_string(Gogo::hidden_name_pkgpath(method_name),
+ bloc);
vals->push_back(Expression::make_unary(OPERATOR_AND, s, bloc));
}
@@ -4668,7 +4672,7 @@ Struct_type::do_type_descriptor(Gogo* gogo, Named_type* name)
fvals->push_back(Expression::make_nil(bloc));
else
{
- std::string n = Gogo::hidden_name_prefix(pf->field_name());
+ std::string n = Gogo::hidden_name_pkgpath(pf->field_name());
Expression* s = Expression::make_string(n, bloc);
fvals->push_back(Expression::make_unary(OPERATOR_AND, s, bloc));
}
@@ -7056,7 +7060,7 @@ Interface_type::do_type_descriptor(Gogo* gogo, Named_type* name)
mvals->push_back(Expression::make_nil(bloc));
else
{
- s = Gogo::hidden_name_prefix(pm->name());
+ s = Gogo::hidden_name_pkgpath(pm->name());
e = Expression::make_string(s, bloc);
mvals->push_back(Expression::make_unary(OPERATOR_AND, e, bloc));
}
@@ -7105,11 +7109,15 @@ Interface_type::do_reflection(Gogo* gogo, std::string* ret) const
{
if (!Gogo::is_hidden_name(p->name()))
ret->append(p->name());
+ else if (gogo->pkgpath_from_option())
+ ret->append(p->name().substr(1));
else
{
- // This matches what the gc compiler does.
- std::string prefix = Gogo::hidden_name_prefix(p->name());
- ret->append(prefix.substr(prefix.find('.') + 1));
+ // If no -fgo-pkgpath option, backward compatibility
+ // for how this used to work before -fgo-pkgpath was
+ // introduced.
+ std::string pkgpath = Gogo::hidden_name_pkgpath(p->name());
+ ret->append(pkgpath.substr(pkgpath.find('.') + 1));
ret->push_back('.');
ret->append(Gogo::unpack_hidden_name(p->name()));
}
@@ -7939,20 +7947,14 @@ Named_type::do_hash_for_method(Gogo* gogo) const
// where we are going to be comparing named types for equality. In
// other cases, which are cases where the runtime is going to
// compare hash codes to see if the types are the same, we need to
- // include the package prefix and name in the hash.
+ // include the pkgpath in the hash.
if (gogo != NULL && !Gogo::is_hidden_name(name) && !this->is_builtin())
{
const Package* package = this->named_object()->package();
if (package == NULL)
- {
- ret = Type::hash_string(gogo->unique_prefix(), ret);
- ret = Type::hash_string(gogo->package_name(), ret);
- }
+ ret = Type::hash_string(gogo->pkgpath(), ret);
else
- {
- ret = Type::hash_string(package->unique_prefix(), ret);
- ret = Type::hash_string(package->name(), ret);
- }
+ ret = Type::hash_string(package->pkgpath(), ret);
}
return ret;
@@ -8324,11 +8326,16 @@ Named_type::do_reflection(Gogo* gogo, std::string* ret) const
}
if (!this->is_builtin())
{
+ // We handle -fgo-prefix and -fgo-pkgpath differently here for
+ // compatibility with how the compiler worked before
+ // -fgo-pkgpath was introduced.
const Package* package = this->named_object_->package();
- if (package != NULL)
- ret->append(package->name());
+ if (gogo->pkgpath_from_option())
+ ret->append(package != NULL ? package->pkgpath() : gogo->pkgpath());
else
- ret->append(gogo->package_name());
+ ret->append(package != NULL
+ ? package->package_name()
+ : gogo->package_name());
ret->push_back('.');
}
if (this->in_function_ != NULL)
@@ -8355,15 +8362,10 @@ Named_type::do_mangled_name(Gogo* gogo, std::string* ret) const
go_assert(this->in_function_ == NULL);
else
{
- const std::string& unique_prefix(no->package() == NULL
- ? gogo->unique_prefix()
- : no->package()->unique_prefix());
- const std::string& package_name(no->package() == NULL
- ? gogo->package_name()
- : no->package()->name());
- name = unique_prefix;
- name.append(1, '.');
- name.append(package_name);
+ const std::string& pkgpath(no->package() == NULL
+ ? gogo->pkgpath_symbol()
+ : no->package()->pkgpath_symbol());
+ name = pkgpath;
name.append(1, '.');
if (this->in_function_ != NULL)
{
@@ -9487,9 +9489,9 @@ Forward_declaration_type::do_mangled_name(Gogo* gogo, std::string* ret) const
const Named_object* no = this->named_object();
std::string name;
if (no->package() == NULL)
- name = gogo->package_name();
+ name = gogo->pkgpath_symbol();
else
- name = no->package()->name();
+ name = no->package()->pkgpath_symbol();
name += '.';
name += Gogo::unpack_hidden_name(no->name());
char buf[20];
diff --git a/gcc/go/gofrontend/unsafe.cc b/gcc/go/gofrontend/unsafe.cc
index 930723e0908..5d0c658d523 100644
--- a/gcc/go/gofrontend/unsafe.cc
+++ b/gcc/go/gofrontend/unsafe.cc
@@ -22,7 +22,7 @@ Gogo::import_unsafe(const std::string& local_name, bool is_local_name_exported,
bool add_to_globals;
Package* package = this->add_imported_package("unsafe", local_name,
is_local_name_exported,
- "libgo_unsafe",
+ "libgo_unsafe.unsafe",
location, &add_to_globals);
if (package == NULL)
diff --git a/gcc/go/lang.opt b/gcc/go/lang.opt
index c14df9c6107..eb9ed9a63a0 100644
--- a/gcc/go/lang.opt
+++ b/gcc/go/lang.opt
@@ -53,6 +53,10 @@ fgo-optimize-
Go Joined RejectNegative
-fgo-optimize-<type> Turn on optimization passes in the frontend
+fgo-pkgpath=
+Go Joined RejectNegative
+-fgo-pkgpath=<string> Set Go package path
+
fgo-prefix=
Go Joined RejectNegative
-fgo-prefix=<string> Set package-specific prefix for exported Go names
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 4a2ca40ba13..cdabd738dc6 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -87,8 +87,7 @@ remove_invariant_phi (sese region, gimple_stmt_iterator *psi)
edge e = gimple_phi_arg_edge (phi, entry);
tree var;
gimple stmt;
- gimple_seq stmts;
- gimple_stmt_iterator gsi;
+ gimple_seq stmts = NULL;
if (tree_contains_chrecs (scev, NULL))
scev = gimple_phi_arg_def (phi, entry);
@@ -97,11 +96,7 @@ remove_invariant_phi (sese region, gimple_stmt_iterator *psi)
stmt = gimple_build_assign (res, var);
remove_phi_node (psi, false);
- if (!stmts)
- stmts = gimple_seq_alloc ();
-
- gsi = gsi_last (stmts);
- gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
+ gimple_seq_add_stmt (&stmts, stmt);
gsi_insert_seq_on_edge (e, stmts);
gsi_commit_edge_inserts ();
SSA_NAME_DEF_STMT (res) = stmt;
@@ -2088,11 +2083,7 @@ insert_stmts (scop_p scop, gimple stmt, gimple_seq stmts,
gimple_stmt_iterator gsi;
VEC (gimple, heap) *x = VEC_alloc (gimple, heap, 3);
- if (!stmts)
- stmts = gimple_seq_alloc ();
-
- gsi = gsi_last (stmts);
- gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
+ gimple_seq_add_stmt (&stmts, stmt);
for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
VEC_safe_push (gimple, heap, x, gsi_stmt (gsi));
@@ -2107,16 +2098,12 @@ static void
insert_out_of_ssa_copy (scop_p scop, tree res, tree expr, gimple after_stmt)
{
gimple_seq stmts;
- gimple_stmt_iterator si;
gimple_stmt_iterator gsi;
tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
gimple stmt = gimple_build_assign (res, var);
VEC (gimple, heap) *x = VEC_alloc (gimple, heap, 3);
- if (!stmts)
- stmts = gimple_seq_alloc ();
- si = gsi_last (stmts);
- gsi_insert_after (&si, stmt, GSI_NEW_STMT);
+ gimple_seq_add_stmt (&stmts, stmt);
for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
VEC_safe_push (gimple, heap, x, gsi_stmt (gsi));
@@ -2167,17 +2154,13 @@ static void
insert_out_of_ssa_copy_on_edge (scop_p scop, edge e, tree res, tree expr)
{
gimple_stmt_iterator gsi;
- gimple_seq stmts;
+ gimple_seq stmts = NULL;
tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
gimple stmt = gimple_build_assign (res, var);
basic_block bb;
VEC (gimple, heap) *x = VEC_alloc (gimple, heap, 3);
- if (!stmts)
- stmts = gimple_seq_alloc ();
-
- gsi = gsi_last (stmts);
- gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
+ gimple_seq_add_stmt (&stmts, stmt);
for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
VEC_safe_push (gimple, heap, x, gsi_stmt (gsi));
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 79e27380283..6f2101ef80b 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -1520,8 +1520,7 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
&& MEM_ADDR_SPACE (a) == MEM_ADDR_SPACE (b)
&& if_info->branch_cost >= 5)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (a));
+ enum machine_mode address_mode = get_address_mode (a);
a = XEXP (a, 0);
b = XEXP (b, 0);
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index eb8d20d94f6..533398b4f7b 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -2445,7 +2445,6 @@ ipcp_driver (void)
struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
struct topo_info topo;
- cgraph_remove_unreachable_nodes (true,dump_file);
ipa_check_create_node_params ();
ipa_check_create_edge_args ();
grow_next_edge_clone_vector ();
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index eb3d42d25d6..c3482edf087 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -1097,45 +1097,6 @@ update_callee_keys (fibheap_t heap, struct cgraph_node *node,
}
}
-/* Recompute heap nodes for each of caller edges of each of callees.
- Walk recursively into all inline clones. */
-
-static void
-update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
- bitmap updated_nodes)
-{
- struct cgraph_edge *e = node->callees;
- if (!e)
- return;
- while (true)
- if (!e->inline_failed && e->callee->callees)
- e = e->callee->callees;
- else
- {
- struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
- NULL);
-
- /* We inlined and thus callees might have different number of calls.
- Reset their caches */
- reset_node_growth_cache (callee);
- if (e->inline_failed)
- update_caller_keys (heap, callee, updated_nodes, e);
- if (e->next_callee)
- e = e->next_callee;
- else
- {
- do
- {
- if (e->caller == node)
- return;
- e = e->caller->callers;
- }
- while (!e->next_callee);
- e = e->next_callee;
- }
- }
-}
-
/* Enqueue all recursive calls from NODE into priority queue depending on
how likely we want to recursively inline the call. */
@@ -1488,7 +1449,7 @@ inline_small_functions (void)
at once. Consequently we need to update all callee keys. */
if (flag_indirect_inlining)
add_new_edges_to_heap (heap, new_indirect_edges);
- update_all_callee_keys (heap, where, updated_nodes);
+ update_callee_keys (heap, where, updated_nodes);
}
else
{
@@ -1527,18 +1488,7 @@ inline_small_functions (void)
reset_edge_caches (edge->callee);
reset_node_growth_cache (callee);
- /* We inlined last offline copy to the body. This might lead
- to callees of function having fewer call sites and thus they
- may need updating.
-
- FIXME: the callee size could also shrink because more information
- is propagated from caller. We don't track when this happen and
- thus we need to recompute everything all the time. Once this is
- solved, "|| 1" should go away. */
- if (callee->global.inlined_to || 1)
- update_all_callee_keys (heap, callee, updated_nodes);
- else
- update_callee_keys (heap, edge->callee, updated_nodes);
+ update_callee_keys (heap, edge->callee, updated_nodes);
}
where = edge->caller;
if (where->global.inlined_to)
@@ -1551,11 +1501,6 @@ inline_small_functions (void)
called by function we inlined (since number of it inlinable callers
might change). */
update_caller_keys (heap, where, updated_nodes, NULL);
-
- /* We removed one call of the function we just inlined. If offline
- copy is still needed, be sure to update the keys. */
- if (callee != where && !callee->global.inlined_to)
- update_caller_keys (heap, callee, updated_nodes, NULL);
bitmap_clear (updated_nodes);
if (dump_file)
@@ -1717,7 +1662,7 @@ ipa_inline (void)
}
inline_small_functions ();
- cgraph_remove_unreachable_nodes (true, dump_file);
+ symtab_remove_unreachable_nodes (true, dump_file);
free (order);
/* We already perform some inlining of functions called once during
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 02d6c316276..af0f335a993 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -1626,7 +1626,7 @@ ipa_analyze_params_uses (struct cgraph_node *node,
visit_ref_for_mod_analysis,
visit_ref_for_mod_analysis);
}
- for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
walk_stmt_load_store_addr_ops (gsi_stmt (gsi), info,
visit_ref_for_mod_analysis,
visit_ref_for_mod_analysis,
@@ -2513,7 +2513,8 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt,
tree type = adj->type;
unsigned int align;
unsigned HOST_WIDE_INT misalign;
- align = get_pointer_alignment_1 (base, &misalign);
+
+ get_pointer_alignment_1 (base, &align, &misalign);
misalign += (double_int_sext (tree_to_double_int (off),
TYPE_PRECISION (TREE_TYPE (off))).low
* BITS_PER_UNIT);
diff --git a/gcc/ipa.c b/gcc/ipa.c
index a722386934b..a107c6f0ec5 100644
--- a/gcc/ipa.c
+++ b/gcc/ipa.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-iterator.h"
#include "ipa-utils.h"
#include "pointer-set.h"
+#include "ipa-inline.h"
/* Look for all functions inlined to NODE and update their inlined_to pointers
to INLINED_TO. */
@@ -49,7 +50,7 @@ update_inlined_to_pointer (struct cgraph_node *node, struct cgraph_node *inlined
}
}
-/* Add cgraph NODE to queue starting at FIRST.
+/* Add symtab NODE to queue starting at FIRST.
The queue is linked via AUX pointers and terminated by pointer to 1.
We enqueue nodes at two occasions: when we find them reachable or when we find
@@ -58,8 +59,8 @@ update_inlined_to_pointer (struct cgraph_node *node, struct cgraph_node *inlined
reachable. */
static void
-enqueue_cgraph_node (struct cgraph_node *node, struct cgraph_node **first,
- struct pointer_set_t *reachable)
+enqueue_node (symtab_node node, symtab_node *first,
+ struct pointer_set_t *reachable)
{
/* Node is still in queue; do nothing. */
if (node->symbol.aux && node->symbol.aux != (void *) 2)
@@ -72,21 +73,11 @@ enqueue_cgraph_node (struct cgraph_node *node, struct cgraph_node **first,
*first = node;
}
-/* Add varpool NODE to queue starting at FIRST. */
-
-static void
-enqueue_varpool_node (struct varpool_node *node, struct varpool_node **first)
-{
- node->symbol.aux = *first;
- *first = node;
-}
-
/* Process references. */
static void
process_references (struct ipa_ref_list *list,
- struct cgraph_node **first,
- struct varpool_node **first_varpool,
+ symtab_node *first,
bool before_inlining_p,
struct pointer_set_t *reachable)
{
@@ -97,18 +88,21 @@ process_references (struct ipa_ref_list *list,
if (symtab_function_p (ref->referred))
{
struct cgraph_node *node = ipa_ref_node (ref);
+
if (node->analyzed
&& (!DECL_EXTERNAL (node->symbol.decl)
|| node->alias
|| before_inlining_p))
pointer_set_insert (reachable, node);
- enqueue_cgraph_node (node, first, reachable);
+ enqueue_node ((symtab_node) node, first, reachable);
}
else
{
struct varpool_node *node = ipa_ref_varpool_node (ref);
- if (!pointer_set_insert (reachable, node))
- enqueue_varpool_node (node, first_varpool);
+
+ if (node->analyzed)
+ pointer_set_insert (reachable, node);
+ enqueue_node ((symtab_node) node, first, reachable);
}
}
}
@@ -162,19 +156,63 @@ has_addr_references_p (struct cgraph_node *node,
}
/* Perform reachability analysis and reclaim all unreachable nodes.
- If BEFORE_INLINING_P is true this function is called before inlining
- decisions has been made. If BEFORE_INLINING_P is false this function also
- removes unneeded bodies of extern inline functions. */
+
+ The algorithm is basically mark&sweep but with some extra refinements:
+
+ - reachable extern inline functions needs special handling; the bodies needs
+ to stay in memory until inlining in hope that they will be inlined.
+ After inlining we release their bodies and turn them into unanalyzed
+ nodes even when they are reachable.
+
+ BEFORE_INLINING_P specify whether we are before or after inlining.
+
+ - virtual functions are kept in callgraph even if they seem unreachable in
+ hope calls to them will be devirtualized.
+
+ Again we remove them after inlining. In late optimization some
+ devirtualization may happen, but it is not importnat since we won't inline
+ the call. In theory early opts and IPA should work out all important cases.
+
+ - virtual clones needs bodies of their origins for later materialization;
+ this means that we want to keep the body even if the origin is unreachable
+ otherwise. To avoid origin from sitting in the callgraph and being
+ walked by IPA passes, we turn them into unanalyzed nodes with body
+ defined.
+
+ We maintain set of function declaration where body needs to stay in
+ body_needed_for_clonning
+
+ Inline clones represent special case: their declaration match the
+ declaration of origin and cgraph_remove_node already knows how to
+ reshape callgraph and preserve body when offline copy of function or
+ inline clone is being removed.
+
+ We maintain queue of both reachable symbols (i.e. defined symbols that needs
+ to stay) and symbols that are in boundary (i.e. external symbols referenced
+ by reachable symbols or origins of clones). The queue is represented
+ as linked list by AUX pointer terminated by 1.
+
+ A the end we keep all reachable symbols. For symbols in boundary we always
+ turn definition into a declaration, but we may keep function body around
+ based on body_needed_for_clonning
+
+ All symbols that enter the queue have AUX pointer non-zero and are in the
+ boundary. Pointer set REACHABLE is used to track reachable symbols.
+
+ Every symbol can be visited twice - once as part of boundary and once
+ as real reachable symbol. enqueue_node needs to decide whether the
+ node needs to be re-queued for second processing. For this purpose
+ we set AUX pointer of processed symbols in the boundary to constant 2. */
bool
-cgraph_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
+symtab_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
{
- struct cgraph_node *first = (struct cgraph_node *) (void *) 1;
- struct varpool_node *first_varpool = (struct varpool_node *) (void *) 1;
+ symtab_node first = (symtab_node) (void *) 1;
struct cgraph_node *node, *next;
struct varpool_node *vnode, *vnext;
bool changed = false;
struct pointer_set_t *reachable = pointer_set_create ();
+ struct pointer_set_t *body_needed_for_clonning = pointer_set_create ();
#ifdef ENABLE_CHECKING
verify_symtab ();
@@ -191,8 +229,8 @@ cgraph_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
This is mostly when they can be referenced externally. Inline clones
are special since their declarations are shared with master clone and thus
cgraph_can_remove_if_no_direct_calls_and_refs_p should not be called on them. */
- FOR_EACH_FUNCTION (node)
- if (node->analyzed && !node->global.inlined_to
+ FOR_EACH_DEFINED_FUNCTION (node)
+ if (!node->global.inlined_to
&& (!cgraph_can_remove_if_no_direct_calls_and_refs_p (node)
/* Keep around virtual functions for possible devirtualization. */
|| (before_inlining_p
@@ -200,198 +238,126 @@ cgraph_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
&& (DECL_COMDAT (node->symbol.decl) || DECL_EXTERNAL (node->symbol.decl)))))
{
gcc_assert (!node->global.inlined_to);
- enqueue_cgraph_node (node, &first, reachable);
pointer_set_insert (reachable, node);
+ enqueue_node ((symtab_node)node, &first, reachable);
}
else
gcc_assert (!node->symbol.aux);
/* Mark variables that are obviously needed. */
- FOR_EACH_VARIABLE (vnode)
+ FOR_EACH_DEFINED_VARIABLE (vnode)
+ if (!varpool_can_remove_if_no_refs (vnode))
+ {
+ pointer_set_insert (reachable, vnode);
+ enqueue_node ((symtab_node)vnode, &first, reachable);
+ }
+
+ /* Perform reachability analysis. */
+ while (first != (symtab_node) (void *) 1)
{
- if ((vnode->analyzed || vnode->symbol.force_output)
- && !varpool_can_remove_if_no_refs (vnode))
- {
- pointer_set_insert (reachable, vnode);
- enqueue_varpool_node (vnode, &first_varpool);
- }
- }
+ bool in_boundary_p = !pointer_set_contains (reachable, first);
+ symtab_node node = first;
- /* Perform reachability analysis. As a special case do not consider
- extern inline functions not inlined as live because we won't output
- them at all.
+ first = (symtab_node)first->symbol.aux;
- We maintain two worklist, one for cgraph nodes other for varpools and
- are finished once both are empty. */
+ /* If we are processing symbol in boundary, mark its AUX pointer for
+ possible later re-processing in enqueue_node. */
+ if (in_boundary_p)
+ node->symbol.aux = (void *)2;
+ else
+ {
+ /* If any symbol in a comdat group is reachable, force
+ all other in the same comdat group to be also reachable. */
+ if (node->symbol.same_comdat_group)
+ {
+ symtab_node next;
+ for (next = node->symbol.same_comdat_group;
+ next != node;
+ next = next->symbol.same_comdat_group)
+ if (!pointer_set_insert (reachable, next))
+ enqueue_node ((symtab_node) next, &first, reachable);
+ }
+ /* Mark references as reachable. */
+ process_references (&node->symbol.ref_list, &first,
+ before_inlining_p, reachable);
+ }
- while (first != (struct cgraph_node *) (void *) 1
- || first_varpool != (struct varpool_node *) (void *) 1)
- {
- if (first != (struct cgraph_node *) (void *) 1)
+ if (symtab_function_p (node))
{
- struct cgraph_edge *e;
- node = first;
- first = (struct cgraph_node *) first->symbol.aux;
- if (!pointer_set_contains (reachable, node))
- node->symbol.aux = (void *)2;
- /* If we found this node reachable, first mark on the callees
- reachable too, unless they are direct calls to extern inline functions
- we decided to not inline. */
- else
+ struct cgraph_node *cnode = cgraph (node);
+
+ /* Mark the callees reachable unless they are direct calls to extern
+ inline functions we decided to not inline. */
+ if (!in_boundary_p)
{
- for (e = node->callees; e; e = e->next_callee)
+ struct cgraph_edge *e;
+ for (e = cnode->callees; e; e = e->next_callee)
{
- if (node->analyzed
+ if (e->callee->analyzed
&& (!e->inline_failed
|| !DECL_EXTERNAL (e->callee->symbol.decl)
- || node->alias
+ || cnode->alias
|| before_inlining_p))
pointer_set_insert (reachable, e->callee);
- enqueue_cgraph_node (e->callee, &first, reachable);
- }
- process_references (&node->symbol.ref_list, &first,
- &first_varpool, before_inlining_p,
- reachable);
-
- /* If any function in a comdat group is reachable, force
- all other functions in the same comdat group to be
- also reachable. */
- if (node->symbol.same_comdat_group
- && !node->global.inlined_to)
- {
- for (next = cgraph (node->symbol.same_comdat_group);
- next != node;
- next = cgraph (next->symbol.same_comdat_group))
- if (!pointer_set_insert (reachable, next))
- enqueue_cgraph_node (next, &first, reachable);
+ enqueue_node ((symtab_node) e->callee, &first, reachable);
}
+
+ /* When inline clone exists, mark body to be preserved so when removing
+ offline copy of the function we don't kill it. */
+ if (!cnode->alias && cnode->global.inlined_to)
+ pointer_set_insert (body_needed_for_clonning, cnode->symbol.decl);
}
- /* We can freely remove inline clones even if they are cloned, however if
- function is clone of real clone, we must keep it around in order to
- make materialize_clones produce function body with the changes
- applied. */
- while (node->clone_of && !node->clone_of->symbol.aux
- && !gimple_has_body_p (node->symbol.decl))
+ /* For non-inline clones, force their origins to the boundary and ensure
+ that body is not removed. */
+ while (cnode->clone_of
+ && !gimple_has_body_p (cnode->symbol.decl))
{
- bool noninline = node->clone_of->symbol.decl != node->symbol.decl;
- node = node->clone_of;
- if (noninline && !pointer_set_insert (reachable, node) && !node->symbol.aux)
+ bool noninline = cnode->clone_of->symbol.decl != cnode->symbol.decl;
+ cnode = cnode->clone_of;
+ if (noninline)
{
- enqueue_cgraph_node (node, &first, reachable);
+ pointer_set_insert (body_needed_for_clonning, cnode->symbol.decl);
+ enqueue_node ((symtab_node)cnode, &first, reachable);
break;
}
}
}
- if (first_varpool != (struct varpool_node *) (void *) 1)
- {
- vnode = first_varpool;
- first_varpool = (struct varpool_node *)first_varpool->symbol.aux;
- vnode->symbol.aux = NULL;
- process_references (&vnode->symbol.ref_list, &first,
- &first_varpool, before_inlining_p,
- reachable);
- /* If any function in a comdat group is reachable, force
- all other functions in the same comdat group to be
- also reachable. */
- if (vnode->symbol.same_comdat_group)
- {
- struct varpool_node *next;
- for (next = varpool (vnode->symbol.same_comdat_group);
- next != vnode;
- next = varpool (next->symbol.same_comdat_group))
- if (!pointer_set_insert (reachable, next))
- enqueue_varpool_node (next, &first_varpool);
- }
- }
}
- /* Remove unreachable nodes.
-
- Completely unreachable functions can be fully removed from the callgraph.
- Extern inline functions that we decided to not inline need to become unanalyzed nodes of
- callgraph (so we still have edges to them). We remove function body then.
-
- Also we need to care functions that are unreachable but we need to keep them around
- for later clonning. In this case we also turn them to unanalyzed nodes, but
- keep the body around. */
+ /* Remove unreachable functions. */
for (node = cgraph_first_function (); node; node = next)
{
next = cgraph_next_function (node);
- if (node->symbol.aux && !pointer_set_contains (reachable, node))
- {
- cgraph_node_remove_callees (node);
- ipa_remove_all_references (&node->symbol.ref_list);
- node->analyzed = false;
- }
if (!node->symbol.aux)
{
- struct cgraph_edge *e;
- bool found = false;
- int i;
- struct ipa_ref *ref;
-
- node->global.inlined_to = NULL;
if (file)
fprintf (file, " %s", cgraph_node_name (node));
- /* See if there is reachable caller. */
- for (e = node->callers; e && !found; e = e->next_caller)
- if (pointer_set_contains (reachable, e->caller))
- found = true;
- for (i = 0; (ipa_ref_list_referring_iterate (&node->symbol.ref_list,
- i, ref)
- && !found); i++)
- if (pointer_set_contains (reachable, ref->referring))
- found = true;
-
- /* If so, we need to keep node in the callgraph. */
- if (found)
- {
- if (node->analyzed)
- {
- struct cgraph_node *clone;
-
- /* If there are still clones, we must keep body around.
- Otherwise we can just remove the body but keep the clone. */
- for (clone = node->clones; clone;
- clone = clone->next_sibling_clone)
- if (clone->symbol.aux)
- break;
- if (!clone)
- {
- cgraph_release_function_body (node);
- if (node->prev_sibling_clone)
- node->prev_sibling_clone->next_sibling_clone = node->next_sibling_clone;
- else if (node->clone_of)
- node->clone_of->clones = node->next_sibling_clone;
- if (node->next_sibling_clone)
- node->next_sibling_clone->prev_sibling_clone = node->prev_sibling_clone;
- if (node->clone_of)
- node->former_clone_of = node->clone_of->symbol.decl;
- node->clone_of = NULL;
- node->next_sibling_clone = NULL;
- node->prev_sibling_clone = NULL;
- }
- else
- gcc_assert (!clone->symbol.in_other_partition);
- node->analyzed = false;
- changed = true;
- cgraph_node_remove_callees (node);
- ipa_remove_all_references (&node->symbol.ref_list);
- }
- }
- else
+ cgraph_remove_node (node);
+ changed = true;
+ }
+ else if (!pointer_set_contains (reachable, node))
+ {
+ if (node->analyzed)
{
- cgraph_remove_node (node);
+ if (file)
+ fprintf (file, " %s", cgraph_node_name (node));
+ cgraph_node_remove_callees (node);
+ ipa_remove_all_references (&node->symbol.ref_list);
changed = true;
}
+ if (!pointer_set_contains (body_needed_for_clonning, node->symbol.decl)
+ && !DECL_ARTIFICIAL (node->symbol.decl))
+ cgraph_release_function_body (node);
+ node->analyzed = false;
}
}
+
+ /* Inline clones might be kept around so their materializing allows further
+ cloning. If the function the clone is inlined into is removed, we need
+ to turn it into normal cone. */
FOR_EACH_FUNCTION (node)
{
- /* Inline clones might be kept around so their materializing allows further
- cloning. If the function the clone is inlined into is removed, we need
- to turn it into normal cone. */
if (node->global.inlined_to
&& !node->callers)
{
@@ -402,25 +368,38 @@ cgraph_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
node->symbol.aux = NULL;
}
+ /* Remove unreachable variables. */
if (file)
- fprintf (file, "\n");
-
- if (file)
- fprintf (file, "Reclaiming variables:");
+ fprintf (file, "\nReclaiming variables:");
for (vnode = varpool_first_variable (); vnode; vnode = vnext)
{
vnext = varpool_next_variable (vnode);
- if (!pointer_set_contains (reachable, vnode))
- {
+ if (!vnode->symbol.aux)
+ {
if (file)
fprintf (file, " %s", varpool_node_name (vnode));
varpool_remove_node (vnode);
changed = true;
}
+ else if (!pointer_set_contains (reachable, vnode))
+ {
+ if (vnode->analyzed)
+ {
+ if (file)
+ fprintf (file, " %s", varpool_node_name (vnode));
+ changed = true;
+ }
+ vnode->analyzed = false;
+ vnode->symbol.aux = NULL;
+ }
+ else
+ vnode->symbol.aux = NULL;
}
- /* Now update address_taken flags and try to promote functions to be local. */
+ pointer_set_destroy (reachable);
+ pointer_set_destroy (body_needed_for_clonning);
+ /* Now update address_taken flags and try to promote functions to be local. */
if (file)
fprintf (file, "\nClearing address taken flags:");
FOR_EACH_DEFINED_FUNCTION (node)
@@ -444,18 +423,18 @@ cgraph_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
if (file)
fprintf (file, "\n");
- /* Rest of transformations are undesirable at -O0. */
- if (!optimize)
- return changed;
-
#ifdef ENABLE_CHECKING
verify_symtab ();
#endif
+ /* If we removed something, perhaps profile could be improved. */
+ if (changed && optimize && inline_edge_summary_vec)
+ FOR_EACH_DEFINED_FUNCTION (node)
+ cgraph_propagate_frequency (node);
+
/* Reclaim alias pairs for functions that have disappeared from the
call graph. */
remove_unreachable_alias_pairs ();
- pointer_set_destroy (reachable);
return changed;
}
diff --git a/gcc/ira.c b/gcc/ira.c
index 456c5f0bcb3..f0d885c8813 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -4125,7 +4125,12 @@ ira (FILE *f)
}
allocated_reg_info_size = max_reg_num ();
- find_moveable_pseudos ();
+
+ /* It is not worth to do such improvement when we use a simple
+ allocation because of -O0 usage or because the function is too
+ big. */
+ if (ira_conflicts_p)
+ find_moveable_pseudos ();
max_regno_before_ira = max_reg_num ();
ira_setup_eliminable_regset ();
@@ -4234,7 +4239,10 @@ ira (FILE *f)
max_regno * sizeof (struct ira_spilled_reg_stack_slot));
}
allocate_initial_values (reg_equivs);
- move_unallocated_pseudos ();
+
+ /* See comment for find_moveable_pseudos call. */
+ if (ira_conflicts_p)
+ move_unallocated_pseudos ();
}
static void
diff --git a/gcc/jump.c b/gcc/jump.c
index 4c4b00118f4..246fab02ff3 100644
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -275,13 +275,13 @@ mark_all_labels (rtx f)
/* In cfglayout mode, there may be non-insns between the
basic blocks. If those non-insns represent tablejump data,
they contain label references that we must record. */
- for (insn = bb->il.rtl->header; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
gcc_assert (JUMP_TABLE_DATA_P (insn));
mark_jump_label (PATTERN (insn), insn, 0);
}
- for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
gcc_assert (JUMP_TABLE_DATA_P (insn));
diff --git a/gcc/langhooks.c b/gcc/langhooks.c
index cb5da8c36fe..340cc99fa29 100644
--- a/gcc/langhooks.c
+++ b/gcc/langhooks.c
@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic.h"
#include "tree-diagnostic.h"
#include "cgraph.h"
+#include "timevar.h"
#include "output.h"
/* Do nothing; in many cases the default hook. */
@@ -298,10 +299,7 @@ write_global_declarations (void)
tree globals, decl, *vec;
int len, i;
- /* This lang hook is dual-purposed, and also finalizes the
- compilation unit. */
- finalize_compilation_unit ();
-
+ timevar_start (TV_PHASE_DEFERRED);
/* Really define vars that have had only a tentative definition.
Really output inline functions that must actually be callable
and have not been output so far. */
@@ -318,7 +316,17 @@ write_global_declarations (void)
wrapup_global_declarations (vec, len);
check_global_declarations (vec, len);
+ timevar_stop (TV_PHASE_DEFERRED);
+
+ timevar_start (TV_PHASE_CGRAPH);
+ /* This lang hook is dual-purposed, and also finalizes the
+ compilation unit. */
+ finalize_compilation_unit ();
+ timevar_stop (TV_PHASE_CGRAPH);
+
+ timevar_start (TV_PHASE_CHECK_DBGINFO);
emit_debug_global_declarations (vec, len);
+ timevar_stop (TV_PHASE_CHECK_DBGINFO);
/* Clean up. */
free (vec);
diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c
index a11b33d1391..13442064c52 100644
--- a/gcc/lower-subreg.c
+++ b/gcc/lower-subreg.c
@@ -135,13 +135,11 @@ static int
shift_cost (bool speed_p, struct cost_rtxes *rtxes, enum rtx_code code,
enum machine_mode mode, int op1)
{
- PUT_MODE (rtxes->target, mode);
PUT_CODE (rtxes->shift, code);
PUT_MODE (rtxes->shift, mode);
PUT_MODE (rtxes->source, mode);
XEXP (rtxes->shift, 1) = GEN_INT (op1);
- SET_SRC (rtxes->set) = rtxes->shift;
- return insn_rtx_cost (rtxes->set, speed_p);
+ return set_src_cost (rtxes->shift, speed_p);
}
/* For each X in the range [0, BITS_PER_WORD), set SPLITTING[X]
@@ -189,11 +187,12 @@ compute_costs (bool speed_p, struct cost_rtxes *rtxes)
unsigned int i;
int word_move_zero_cost, word_move_cost;
+ PUT_MODE (rtxes->target, word_mode);
SET_SRC (rtxes->set) = CONST0_RTX (word_mode);
- word_move_zero_cost = insn_rtx_cost (rtxes->set, speed_p);
+ word_move_zero_cost = set_rtx_cost (rtxes->set, speed_p);
SET_SRC (rtxes->set) = rtxes->source;
- word_move_cost = insn_rtx_cost (rtxes->set, speed_p);
+ word_move_cost = set_rtx_cost (rtxes->set, speed_p);
if (LOG_COSTS)
fprintf (stderr, "%s move: from zero cost %d, from reg cost %d\n",
@@ -209,7 +208,7 @@ compute_costs (bool speed_p, struct cost_rtxes *rtxes)
PUT_MODE (rtxes->target, mode);
PUT_MODE (rtxes->source, mode);
- mode_move_cost = insn_rtx_cost (rtxes->set, speed_p);
+ mode_move_cost = set_rtx_cost (rtxes->set, speed_p);
if (LOG_COSTS)
fprintf (stderr, "%s move: original cost %d, split cost %d * %d\n",
@@ -236,10 +235,8 @@ compute_costs (bool speed_p, struct cost_rtxes *rtxes)
/* The only case here to check to see if moving the upper part with a
zero is cheaper than doing the zext itself. */
- PUT_MODE (rtxes->target, twice_word_mode);
PUT_MODE (rtxes->source, word_mode);
- SET_SRC (rtxes->set) = rtxes->zext;
- zext_cost = insn_rtx_cost (rtxes->set, speed_p);
+ zext_cost = set_src_cost (rtxes->zext, speed_p);
if (LOG_COSTS)
fprintf (stderr, "%s %s: original cost %d, split cost %d + %d\n",
@@ -1490,9 +1487,7 @@ decompose_multiword_subregs (void)
FOR_EACH_BB (bb)
{
rtx insn;
- bool speed_p;
- speed_p = optimize_bb_for_speed_p (bb);
FOR_BB_INSNS (bb, insn)
{
rtx pat;
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 24d8d4f6025..5d617755ac4 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -599,10 +599,7 @@ make_new_block (struct function *fn, unsigned int index)
basic_block bb = alloc_block ();
bb->index = index;
SET_BASIC_BLOCK_FOR_FUNCTION (fn, index, bb);
- bb->il.gimple = ggc_alloc_cleared_gimple_bb_info ();
n_basic_blocks_for_function (fn)++;
- bb->flags = 0;
- set_bb_seq (bb, gimple_seq_alloc ());
return bb;
}
diff --git a/gcc/lto-wrapper.c b/gcc/lto-wrapper.c
index 1c9aa833dbc..b656db251fa 100644
--- a/gcc/lto-wrapper.c
+++ b/gcc/lto-wrapper.c
@@ -414,6 +414,16 @@ merge_and_complain (struct cl_decoded_option **decoded_options,
if (j == *decoded_options_count)
append_option (decoded_options, decoded_options_count, foption);
break;
+
+ case OPT_freg_struct_return:
+ case OPT_fpcc_struct_return:
+ for (j = 0; j < *decoded_options_count; ++j)
+ if ((*decoded_options)[j].opt_index == foption->opt_index)
+ break;
+ if (j == *decoded_options_count)
+ fatal ("Option %s not used consistently in all LTO input files",
+ foption->orig_option_with_args_text);
+ break;
}
}
}
@@ -558,6 +568,8 @@ run_gcc (unsigned argc, char *argv[])
case OPT_fcommon:
case OPT_fexceptions:
case OPT_fgnu_tm:
+ case OPT_freg_struct_return:
+ case OPT_fpcc_struct_return:
break;
default:
@@ -619,6 +631,12 @@ run_gcc (unsigned argc, char *argv[])
/* We've handled these LTO options, do not pass them on. */
continue;
+ case OPT_freg_struct_return:
+ case OPT_fpcc_struct_return:
+ /* Ignore these, they are determined by the input files.
+ ??? We fail to diagnose a possible mismatch here. */
+ continue;
+
default:
break;
}
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index f9c5d3028bb..4be2f2b04c5 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,7 +1,18 @@
+2012-05-04 Jan Hubicka <jh@suse.cz>
+
+ * lto.c (do_whole_program_analysis): Set timevars correctly.
+ (lto_main): Likewise.
+
+2012-05-04 Richard Guenther <rguenther@suse.de>
+
+ * lang.opt (fwpa): Do not mark as Optimization.
+ (fltrans): Likewise.
+
2012-04-30 Jan Hubicka <jh@suse.cz>
* lto.c (lto_main): Use compile ().
- * lto-partition.c (partition_cgraph_node_p): Use symtab_used_from_object_file_p.
+ * lto-partition.c (partition_cgraph_node_p): Use
+ symtab_used_from_object_file_p.
(partition_varpool_node_p): Likewise.
2012-04-20 Jan Hubicka <jh@suse.cz>
diff --git a/gcc/lto/lang.opt b/gcc/lto/lang.opt
index 82857fa604b..f5e9e39026d 100644
--- a/gcc/lto/lang.opt
+++ b/gcc/lto/lang.opt
@@ -25,7 +25,7 @@ Language
LTO
fltrans
-LTO Report Var(flag_ltrans) Optimization
+LTO Report Var(flag_ltrans)
Run the link-time optimizer in local transformation (LTRANS) mode.
fltrans-output-list=
@@ -33,7 +33,7 @@ LTO Joined Var(ltrans_output_list)
Specify a file to which a list of files output by LTRANS is written.
fwpa
-LTO Driver Report Var(flag_wpa) Optimization
+LTO Driver Report Var(flag_wpa)
Run the link-time optimizer in whole program analysis (WPA) mode.
fresolution=
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 32fc869e983..00f623f6a0d 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -1958,6 +1958,7 @@ materialize_cgraph (void)
static void
do_whole_program_analysis (void)
{
+ timevar_start (TV_PHASE_CGRAPH);
/* Note that since we are in WPA mode, materialize_cgraph will not
actually read in all the function bodies. It only materializes
the decls and cgraph nodes so that analysis can be performed. */
@@ -2017,6 +2018,7 @@ do_whole_program_analysis (void)
dump_memory_report (false);
}
+ timevar_stop (TV_PHASE_CGRAPH);
/* Show the LTO report before launching LTRANS. */
if (flag_lto_report)
print_lto_report ();
@@ -2116,7 +2118,9 @@ lto_main (void)
/* Let the middle end know that we have read and merged all of
the input files. */
+ timevar_start (TV_PHASE_CGRAPH);
compile ();
+ timevar_stop (TV_PHASE_CGRAPH);
/* FIXME lto, if the processes spawned by WPA fail, we miss
the chance to print WPA's report, so WPA will call
diff --git a/gcc/melt-run.proto.h b/gcc/melt-run.proto.h
index 5ded95b3be4..5dfed40949d 100644
--- a/gcc/melt-run.proto.h
+++ b/gcc/melt-run.proto.h
@@ -56,6 +56,9 @@ along with GCC; see the file COPYING3. If not see
#include "ggc.h"
#include "cgraph.h"
+/* Notice that gtype-desc.h is included thru ggc.h so all the
+ gt_ggc_mx_* marking routines are visible. */
+
#ifndef MELT_GCC_VERSION
/* Actually, the generated melt-run.h contains a number like 4007 for
GCC 4.7 etc. This is the version of the GCC using this MELT. */
diff --git a/gcc/melt-runtime.c b/gcc/melt-runtime.c
index 0476e4ca234..83332271fdb 100644
--- a/gcc/melt-runtime.c
+++ b/gcc/melt-runtime.c
@@ -13670,4 +13670,16 @@ void melt_clear_flag_debug (void)
melt_flag_debug = 0;
}
+/* With GCC 4.8, the gimple_seq are disappearing because they are the
+same as gimple (with file "coretypes.h" having the definition `typedef
+gimple gimple_seq;`), but our generated runtime support might still
+want their old marking routine. */
+
+#if MELT_GCC_VERSION >= 4008
+void melt_gt_ggc_mx_gimple_seq_d(void*p)
+{
+ gt_ggc_mx_gimple_statement_d (p);
+}
+#endif /* GCC 4.8 */
+
/* eof $Id$ */
diff --git a/gcc/melt-runtime.h b/gcc/melt-runtime.h
index 6c04a60f119..660cd1c8c8d 100644
--- a/gcc/melt-runtime.h
+++ b/gcc/melt-runtime.h
@@ -290,8 +290,8 @@ melt_need_debug_limit (int depth, int lim) {
}
/* unspecified flexible dimension in structure */
-#if defined(__STDC__) && __STDC__VERSION >= 199901L
-#define MELT_FLEXIBLE_DIM /*flexible */
+#if (defined(__STDC__) && __STDC__VERSION >= 199901L) || __cplusplus
+#define MELT_FLEXIBLE_DIM /*C99 or better or C++ flexible*/
#define MELT_HAVE_FLEXIBLE_DIM 1
#elsif __GNUC__>=4
#define MELT_FLEXIBLE_DIM /*gcc flexible*/
@@ -3264,6 +3264,17 @@ extern const char melt_run_preprocessed_md5[]; /* defined in generated file melt
#define flag_melt_bootstrapping melt_flag_bootstrapping
#define flag_melt_debug melt_flag_debug
+
+/* With GCC 4.8, the gimple_seq are disappearing because they are the
+same as gimple (with file "coretypes.h" having the definition `typedef
+gimple gimple_seq;`), but our generated runtime support might still
+want their old marking routine. */
+
+#if MELT_GCC_VERSION >= 4008
+extern void melt_gt_ggc_mx_gimple_seq_d(void*);
+#define gt_ggc_mx_gimple_seq_d melt_gt_ggc_mx_gimple_seq_d
+#endif /* GCC 4.8 */
+
#endif /*MELT_INCLUDED_ */
/* eof $Id$ */
diff --git a/gcc/melt/warmelt-debug.melt b/gcc/melt/warmelt-debug.melt
index f213a9d94df..e0c030c2e0c 100644
--- a/gcc/melt/warmelt-debug.melt
+++ b/gcc/melt/warmelt-debug.melt
@@ -269,7 +269,7 @@ See $MELT_NEED_DBGLIM.}#
:disc_super discr_closure
:named_name '"DISCR_DEBUG_CLOSURE")
-;;; the debug_fun is usually called thru the debug macro.
+;;; the melt_debug_fun is usually called thru the debug macro.
(defun melt_debug_fun (nothing :long count :cstring filename :long lineno :rest)
(if (melt_has_flag_debug_set)
(let ( (:long dbgcounter 0)
@@ -421,12 +421,11 @@ See $MELT_NEED_DBGLIM.}#
(let ( (vctyp (variadic_ctype 0))
(vctypname (get_field :named_name vctyp))
)
- (code_chunk warnbadctype
- #{/* $WARNBADCTYPE */
+ (code_chunk warnbadctype #{/* $WARNBADCTYPE */
warning (0,
"MELT invalid ctype %s in (DEBUG ...) file %s line %d",
melt_string_str ((melt_ptr_t) $VCTYPNAME),
- $FILENAME, $LINENO) ;
+ $FILENAME, (int) $LINENO) ;
}#)
(add2out out "??:" (get_field :ctype_keyword vctyp) "?? ")
)))
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index ec1a5522d7f..21a5188d214 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -158,7 +158,7 @@ scan_omp_op (tree *tp, omp_context *ctx)
return walk_tree (tp, scan_omp_1_op, &wi, NULL);
}
-static void lower_omp (gimple_seq, omp_context *);
+static void lower_omp (gimple_seq *, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
@@ -336,9 +336,11 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
switch (TREE_CODE (t))
{
case PLUS_EXPR:
- case POINTER_PLUS_EXPR:
loop->step = TREE_OPERAND (t, 1);
break;
+ case POINTER_PLUS_EXPR:
+ loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
+ break;
case MINUS_EXPR:
loop->step = TREE_OPERAND (t, 1);
loop->step = fold_build1_loc (loc,
@@ -1231,7 +1233,7 @@ finalize_task_copyfn (gimple task_stmt)
{
struct function *child_cfun;
tree child_fn, old_fn;
- gimple_seq seq, new_seq;
+ gimple_seq seq = NULL, new_seq;
gimple bind;
child_fn = gimple_omp_task_copy_fn (task_stmt);
@@ -1248,13 +1250,12 @@ finalize_task_copyfn (gimple task_stmt)
push_cfun (child_cfun);
current_function_decl = child_fn;
bind = gimplify_body (child_fn, false);
- seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (new_seq != seq)
{
bind = gimple_build_bind (NULL, new_seq, NULL);
- seq = gimple_seq_alloc ();
+ seq = NULL;
gimple_seq_add_stmt (&seq, bind);
}
gimple_set_body (child_fn, seq);
@@ -2229,14 +2230,11 @@ static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
omp_context *ctx)
{
- gimple_stmt_iterator diter;
tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
int pass;
- *dlist = gimple_seq_alloc ();
- diter = gsi_start (*dlist);
copyin_seq = NULL;
/* Do all the fixed sized types in the first pass, and the variable sized
@@ -2425,7 +2423,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
dtor = x;
gimplify_stmt (&dtor, &tseq);
- gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
+ gimple_seq_add_seq (dlist, tseq);
}
break;
@@ -2468,7 +2466,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
x = build_fold_addr_expr_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
- lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
+ lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
gimple_seq_add_seq (ilist,
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
@@ -2572,7 +2570,7 @@ lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
{
- lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
+ lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (stmt_list,
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
}
@@ -2676,7 +2674,7 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
- lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
+ lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
@@ -3499,7 +3497,8 @@ expand_omp_taskreg (struct omp_region *region)
&& !DECL_EXTERNAL (t))
varpool_finalize_decl (t);
DECL_SAVED_TREE (child_fn) = NULL;
- gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
+ /* We'll create a CFG for child_fn, so no gimple body is needed. */
+ gimple_set_body (child_fn, NULL);
TREE_USED (block) = 1;
/* Reset DECL_CONTEXT on function arguments. */
@@ -5787,9 +5786,8 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, control;
gimple_stmt_iterator tgsi;
- unsigned i, len;
gimple stmt, new_stmt, bind, t;
- gimple_seq ilist, dlist, olist, new_body, body;
+ gimple_seq ilist, dlist, olist, new_body;
struct gimplify_ctx gctx;
stmt = gsi_stmt (*gsi_p);
@@ -5801,13 +5799,10 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
&ilist, &dlist, ctx);
- tgsi = gsi_start (gimple_omp_body (stmt));
- for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
- continue;
-
- tgsi = gsi_start (gimple_omp_body (stmt));
- body = NULL;
- for (i = 0; i < len; i++, gsi_next (&tgsi))
+ new_body = gimple_omp_body (stmt);
+ gimple_omp_set_body (stmt, NULL);
+ tgsi = gsi_start (new_body);
+ for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
{
omp_context *sctx;
gimple sec_start;
@@ -5816,32 +5811,33 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
sctx = maybe_lookup_ctx (sec_start);
gcc_assert (sctx);
- gimple_seq_add_stmt (&body, sec_start);
-
- lower_omp (gimple_omp_body (sec_start), sctx);
- gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
+ lower_omp (gimple_omp_body_ptr (sec_start), sctx);
+ gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
+ GSI_CONTINUE_LINKING);
gimple_omp_set_body (sec_start, NULL);
- if (i == len - 1)
+ if (gsi_one_before_end_p (tgsi))
{
gimple_seq l = NULL;
lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
&l, ctx);
- gimple_seq_add_seq (&body, l);
+ gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
gimple_omp_section_set_last (sec_start);
}
- gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
+ gsi_insert_after (&tgsi, gimple_build_omp_return (false),
+ GSI_CONTINUE_LINKING);
}
block = make_node (BLOCK);
- bind = gimple_build_bind (NULL, body, block);
+ bind = gimple_build_bind (NULL, new_body, block);
olist = NULL;
lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, new_stmt, true);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
@@ -5871,9 +5867,6 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_stmt (&new_body, t);
gimple_bind_set_body (new_stmt, new_body);
- gimple_omp_set_body (stmt, NULL);
-
- gsi_replace (gsi_p, new_stmt, true);
}
@@ -6006,10 +5999,14 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
push_gimplify_context (&gctx);
+ block = make_node (BLOCK);
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
bind_body = NULL;
+ dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
&bind_body, &dlist, ctx);
- lower_omp (gimple_omp_body (single_stmt), ctx);
+ lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
@@ -6028,15 +6025,12 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
(!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT));
gimple_seq_add_stmt (&bind_body, t);
-
- block = make_node (BLOCK);
- bind = gimple_build_bind (NULL, bind_body, block);
+ gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
- gsi_replace (gsi_p, bind, true);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
@@ -6056,8 +6050,9 @@ lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
push_gimplify_context (&gctx);
block = make_node (BLOCK);
- bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
- block);
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_bind_add_stmt (bind, stmt);
bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
x = build_call_expr_loc (loc, bfn_decl, 0);
@@ -6067,7 +6062,7 @@ lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimplify_and_add (x, &tseq);
gimple_bind_add_seq (bind, tseq);
- lower_omp (gimple_omp_body (stmt), ctx);
+ lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
@@ -6080,7 +6075,6 @@ lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
- gsi_replace (gsi_p, bind, true);
}
@@ -6096,14 +6090,15 @@ lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
push_gimplify_context (&gctx);
block = make_node (BLOCK);
- bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
- block);
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_bind_add_stmt (bind, stmt);
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
0);
gimple_bind_add_stmt (bind, x);
- lower_omp (gimple_omp_body (stmt), ctx);
+ lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
@@ -6117,7 +6112,6 @@ lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
- gsi_replace (gsi_p, bind, true);
}
@@ -6193,13 +6187,15 @@ lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
push_gimplify_context (&gctx);
block = make_node (BLOCK);
- bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_bind_add_stmt (bind, stmt);
tbody = gimple_bind_body (bind);
gimplify_and_add (lock, &tbody);
gimple_bind_set_body (bind, tbody);
- lower_omp (gimple_omp_body (stmt), ctx);
+ lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
@@ -6213,7 +6209,6 @@ lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
- gsi_replace (gsi_p, bind, true);
}
@@ -6281,11 +6276,15 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
push_gimplify_context (&gctx);
- lower_omp (gimple_omp_for_pre_body (stmt), ctx);
- lower_omp (gimple_omp_body (stmt), ctx);
+ lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
+ lower_omp (gimple_omp_body_ptr (stmt), ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
+ /* Replace at gsi right away, so that 'stmt' is no member
+ of a sequence anymore as we're going to add to to a different
+ one below. */
+ gsi_replace (gsi_p, new_stmt, true);
/* Move declaration of temporaries in the loop body before we make
it go away. */
@@ -6355,7 +6354,6 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_bind_set_body (new_stmt, body);
gimple_omp_set_body (stmt, NULL);
gimple_omp_for_set_pre_body (stmt, NULL);
- gsi_replace (gsi_p, new_stmt, true);
}
/* Callback for walk_stmts. Check if the current statement only contains
@@ -6708,7 +6706,7 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
par_olist = NULL;
par_ilist = NULL;
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
- lower_omp (par_body, ctx);
+ lower_omp (&par_body, ctx);
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
lower_reduction_clauses (clauses, &par_olist, ctx);
@@ -6754,15 +6752,10 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_omp_set_body (stmt, new_body);
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
- gimple_bind_add_stmt (bind, stmt);
- if (ilist || olist)
- {
- gimple_seq_add_stmt (&ilist, bind);
- gimple_seq_add_seq (&ilist, olist);
- bind = gimple_build_bind (NULL, ilist, NULL);
- }
-
gsi_replace (gsi_p, bind, true);
+ gimple_bind_add_seq (bind, ilist);
+ gimple_bind_add_stmt (bind, stmt);
+ gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
}
@@ -6827,17 +6820,17 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_regimplify_operands (stmt, gsi_p);
break;
case GIMPLE_CATCH:
- lower_omp (gimple_catch_handler (stmt), ctx);
+ lower_omp (gimple_catch_handler_ptr (stmt), ctx);
break;
case GIMPLE_EH_FILTER:
- lower_omp (gimple_eh_filter_failure (stmt), ctx);
+ lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
break;
case GIMPLE_TRY:
- lower_omp (gimple_try_eval (stmt), ctx);
- lower_omp (gimple_try_cleanup (stmt), ctx);
+ lower_omp (gimple_try_eval_ptr (stmt), ctx);
+ lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
break;
case GIMPLE_BIND:
- lower_omp (gimple_bind_body (stmt), ctx);
+ lower_omp (gimple_bind_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
@@ -6890,11 +6883,11 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
}
static void
-lower_omp (gimple_seq body, omp_context *ctx)
+lower_omp (gimple_seq *body, omp_context *ctx)
{
location_t saved_location = input_location;
- gimple_stmt_iterator gsi = gsi_start (body);
- for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
input_location = saved_location;
}
@@ -6924,7 +6917,7 @@ execute_lower_omp (void)
if (task_shared_vars)
push_gimplify_context (&gctx);
- lower_omp (body, NULL);
+ lower_omp (&body, NULL);
if (task_shared_vars)
pop_gimplify_context (NULL);
}
@@ -7102,7 +7095,7 @@ diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
wi->info = stmt;
- walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
+ walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
@@ -7110,9 +7103,9 @@ diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
wi->info = stmt;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
- walk_gimple_seq (gimple_omp_for_pre_body (stmt),
- diagnose_sb_2, NULL, wi);
- walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
+ walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
+ diagnose_sb_2, NULL, wi);
+ walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
@@ -7185,7 +7178,9 @@ diagnose_omp_structured_block_errors (void)
memset (&wi, 0, sizeof (wi));
wi.want_locations = true;
- walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
+ walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
+
+ gimple_set_body (current_function_decl, body);
splay_tree_delete (all_labels);
all_labels = NULL;
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 7ef513acae6..9a549ff0667 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -4152,6 +4152,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
if (!SCALAR_FLOAT_MODE_P (mode))
{
rtx result;
+ enum machine_mode ret_mode;
/* Handle a libcall just for the mode we are using. */
libfunc = optab_libfunc (cmp_optab, mode);
@@ -4166,9 +4167,9 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
libfunc = ulibfunc;
}
+ ret_mode = targetm.libgcc_cmp_return_mode ();
result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
- targetm.libgcc_cmp_return_mode (),
- 2, x, mode, y, mode);
+ ret_mode, 2, x, mode, y, mode);
/* There are two kinds of comparison routines. Biased routines
return 0/1/2, and unbiased routines return -1/0/1. Other parts
@@ -4186,7 +4187,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
{
if (unsignedp)
- x = plus_constant (result, 1);
+ x = plus_constant (ret_mode, result, 1);
else
y = const0_rtx;
}
@@ -8252,7 +8253,7 @@ maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
enum machine_mode mode;
last = get_last_insn ();
- mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
+ mode = get_address_mode (mem);
mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
if (insn_operand_matches (icode, opno, mem))
{
diff --git a/gcc/opts.c b/gcc/opts.c
index 22c7590f1bd..b6c786f950e 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -835,6 +835,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
if (opts->x_warn_unused_value == -1)
opts->x_warn_unused_value = opts->x_warn_unused;
+ /* Wunused-local-typedefs is enabled by -Wunused or -Wall. */
+ if (opts->x_warn_unused_local_typedefs == -1)
+ opts->x_warn_unused_local_typedefs = opts->x_warn_unused;
+
/* This replaces set_Wextra. */
if (opts->x_warn_uninitialized == -1)
opts->x_warn_uninitialized = opts->x_extra_warnings;
diff --git a/gcc/passes.c b/gcc/passes.c
index 0ab775ce6ad..0b6f7e204ad 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -186,6 +186,7 @@ rest_of_decl_compilation (tree decl,
if ((at_end
|| !DECL_DEFER_OUTPUT (decl)
|| DECL_INITIAL (decl))
+ && (TREE_CODE (decl) != VAR_DECL || !DECL_HAS_VALUE_EXPR_P (decl))
&& !DECL_EXTERNAL (decl))
{
/* When reading LTO unit, we also read varpool, so do not
@@ -1864,7 +1865,7 @@ execute_todo (unsigned int flags)
if (flags & TODO_remove_functions)
{
gcc_assert (!cfun);
- cgraph_remove_unreachable_nodes (true, dump_file);
+ symtab_remove_unreachable_nodes (true, dump_file);
}
if ((flags & TODO_dump_symtab) && dump_file && !current_function_decl)
@@ -2149,7 +2150,7 @@ execute_one_pass (struct opt_pass *pass)
bool applied = false;
do_per_function (apply_ipa_transforms, (void *)&applied);
if (applied)
- cgraph_remove_unreachable_nodes (true, dump_file);
+ symtab_remove_unreachable_nodes (true, dump_file);
/* Restore current_pass. */
current_pass = pass;
}
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 5c189129445..751483e26fb 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -64,7 +64,8 @@ static void move2add_note_store (rtx, const_rtx, void *);
/* Call cse / combine like post-reload optimization phases.
FIRST is the first instruction. */
-void
+
+static void
reload_cse_regs (rtx first ATTRIBUTE_UNUSED)
{
bool moves_converted;
diff --git a/gcc/predict.c b/gcc/predict.c
index 8ed2e833cf7..c93586bd502 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -946,6 +946,355 @@ combine_predictions_for_bb (basic_block bb)
}
}
+/* Check if T1 and T2 satisfy the IV_COMPARE condition.
+ Return the SSA_NAME if the condition satisfies, NULL otherwise.
+
+ T1 and T2 should be one of the following cases:
+ 1. T1 is SSA_NAME, T2 is NULL
+ 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
+ 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
+
+static tree
+strips_small_constant (tree t1, tree t2)
+{
+ tree ret = NULL;
+ int value = 0;
+
+ if (!t1)
+ return NULL;
+ else if (TREE_CODE (t1) == SSA_NAME)
+ ret = t1;
+ else if (host_integerp (t1, 0))
+ value = tree_low_cst (t1, 0);
+ else
+ return NULL;
+
+ if (!t2)
+ return ret;
+ else if (host_integerp (t2, 0))
+ value = tree_low_cst (t2, 0);
+ else if (TREE_CODE (t2) == SSA_NAME)
+ {
+ if (ret)
+ return NULL;
+ else
+ ret = t2;
+ }
+
+ if (value <= 4 && value >= -4)
+ return ret;
+ else
+ return NULL;
+}
+
+/* Return the SSA_NAME in T or T's operands.
+ Return NULL if SSA_NAME cannot be found. */
+
+static tree
+get_base_value (tree t)
+{
+ if (TREE_CODE (t) == SSA_NAME)
+ return t;
+
+ if (!BINARY_CLASS_P (t))
+ return NULL;
+
+ switch (TREE_OPERAND_LENGTH (t))
+ {
+ case 1:
+ return strips_small_constant (TREE_OPERAND (t, 0), NULL);
+ case 2:
+ return strips_small_constant (TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1));
+ default:
+ return NULL;
+ }
+}
+
+/* Check the compare STMT in LOOP. If it compares an induction
+ variable to a loop invariant, return true, and save
+ LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
+ Otherwise return false and set LOOP_INVAIANT to NULL. */
+
+static bool
+is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop,
+ tree *loop_invariant,
+ enum tree_code *compare_code,
+ int *loop_step,
+ tree *loop_iv_base)
+{
+ tree op0, op1, bound, base;
+ affine_iv iv0, iv1;
+ enum tree_code code;
+ int step;
+
+ code = gimple_cond_code (stmt);
+ *loop_invariant = NULL;
+
+ switch (code)
+ {
+ case GT_EXPR:
+ case GE_EXPR:
+ case NE_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ case EQ_EXPR:
+ break;
+
+ default:
+ return false;
+ }
+
+ op0 = gimple_cond_lhs (stmt);
+ op1 = gimple_cond_rhs (stmt);
+
+ if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
+ || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
+ return false;
+ if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
+ return false;
+ if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
+ return false;
+ if (TREE_CODE (iv0.step) != INTEGER_CST
+ || TREE_CODE (iv1.step) != INTEGER_CST)
+ return false;
+ if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
+ || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
+ return false;
+
+ if (integer_zerop (iv0.step))
+ {
+ if (code != NE_EXPR && code != EQ_EXPR)
+ code = invert_tree_comparison (code, false);
+ bound = iv0.base;
+ base = iv1.base;
+ if (host_integerp (iv1.step, 0))
+ step = tree_low_cst (iv1.step, 0);
+ else
+ return false;
+ }
+ else
+ {
+ bound = iv1.base;
+ base = iv0.base;
+ if (host_integerp (iv0.step, 0))
+ step = tree_low_cst (iv0.step, 0);
+ else
+ return false;
+ }
+
+ if (TREE_CODE (bound) != INTEGER_CST)
+ bound = get_base_value (bound);
+ if (!bound)
+ return false;
+ if (TREE_CODE (base) != INTEGER_CST)
+ base = get_base_value (base);
+ if (!base)
+ return false;
+
+ *loop_invariant = bound;
+ *compare_code = code;
+ *loop_step = step;
+ *loop_iv_base = base;
+ return true;
+}
+
+/* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
+
+static bool
+expr_coherent_p (tree t1, tree t2)
+{
+ gimple stmt;
+ tree ssa_name_1 = NULL;
+ tree ssa_name_2 = NULL;
+
+ gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
+ gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
+
+ if (t1 == t2)
+ return true;
+
+ if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
+ return true;
+ if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
+ return false;
+
+ /* Check to see if t1 is expressed/defined with t2. */
+ stmt = SSA_NAME_DEF_STMT (t1);
+ gcc_assert (stmt != NULL);
+ if (is_gimple_assign (stmt))
+ {
+ ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
+ if (ssa_name_1 && ssa_name_1 == t2)
+ return true;
+ }
+
+ /* Check to see if t2 is expressed/defined with t1. */
+ stmt = SSA_NAME_DEF_STMT (t2);
+ gcc_assert (stmt != NULL);
+ if (is_gimple_assign (stmt))
+ {
+ ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
+ if (ssa_name_2 && ssa_name_2 == t1)
+ return true;
+ }
+
+ /* Compare if t1 and t2's def_stmts are identical. */
+ if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
+ return true;
+ else
+ return false;
+}
+
+/* Predict branch probability of BB when BB contains a branch that compares
+ an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
+ loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
+
+ E.g.
+ for (int i = 0; i < bound; i++) {
+ if (i < bound - 2)
+ computation_1();
+ else
+ computation_2();
+ }
+
+ In this loop, we will predict the branch inside the loop to be taken. */
+
+static void
+predict_iv_comparison (struct loop *loop, basic_block bb,
+ tree loop_bound_var,
+ tree loop_iv_base_var,
+ enum tree_code loop_bound_code,
+ int loop_bound_step)
+{
+ gimple stmt;
+ tree compare_var, compare_base;
+ enum tree_code compare_code;
+ int compare_step;
+ edge then_edge;
+ edge_iterator ei;
+
+ if (predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
+ || predicted_by_p (bb, PRED_LOOP_ITERATIONS)
+ || predicted_by_p (bb, PRED_LOOP_EXIT))
+ return;
+
+ stmt = last_stmt (bb);
+ if (!stmt || gimple_code (stmt) != GIMPLE_COND)
+ return;
+ if (!is_comparison_with_loop_invariant_p (stmt, loop, &compare_var,
+ &compare_code,
+ &compare_step,
+ &compare_base))
+ return;
+
+ /* Find the taken edge. */
+ FOR_EACH_EDGE (then_edge, ei, bb->succs)
+ if (then_edge->flags & EDGE_TRUE_VALUE)
+ break;
+
+ /* When comparing an IV to a loop invariant, NE is more likely to be
+ taken while EQ is more likely to be not-taken. */
+ if (compare_code == NE_EXPR)
+ {
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ return;
+ }
+ else if (compare_code == EQ_EXPR)
+ {
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
+ return;
+ }
+
+ if (!expr_coherent_p (loop_iv_base_var, compare_base))
+ return;
+
+ /* If loop bound, base and compare bound are all constants, we can
+ calculate the probability directly. */
+ if (host_integerp (loop_bound_var, 0)
+ && host_integerp (compare_var, 0)
+ && host_integerp (compare_base, 0))
+ {
+ int probability;
+ HOST_WIDE_INT compare_count;
+ HOST_WIDE_INT loop_bound = tree_low_cst (loop_bound_var, 0);
+ HOST_WIDE_INT compare_bound = tree_low_cst (compare_var, 0);
+ HOST_WIDE_INT base = tree_low_cst (compare_base, 0);
+ HOST_WIDE_INT loop_count = (loop_bound - base) / compare_step;
+
+ if ((compare_step > 0)
+ ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
+ compare_count = (loop_bound - compare_bound) / compare_step;
+ else
+ compare_count = (compare_bound - base) / compare_step;
+
+ if (compare_code == LE_EXPR || compare_code == GE_EXPR)
+ compare_count ++;
+ if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
+ loop_count ++;
+ if (compare_count < 0)
+ compare_count = 0;
+ if (loop_count < 0)
+ loop_count = 0;
+
+ if (loop_count == 0)
+ probability = 0;
+ else if (compare_count > loop_count)
+ probability = REG_BR_PROB_BASE;
+ else
+ probability = (double) REG_BR_PROB_BASE * compare_count / loop_count;
+ predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
+ return;
+ }
+
+ if (expr_coherent_p (loop_bound_var, compare_var))
+ {
+ if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
+ && (compare_code == LT_EXPR || compare_code == LE_EXPR))
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
+ && (compare_code == GT_EXPR || compare_code == GE_EXPR))
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ else if (loop_bound_code == NE_EXPR)
+ {
+ /* If the loop backedge condition is "(i != bound)", we do
+ the comparison based on the step of IV:
+ * step < 0 : backedge condition is like (i > bound)
+ * step > 0 : backedge condition is like (i < bound) */
+ gcc_assert (loop_bound_step != 0);
+ if (loop_bound_step > 0
+ && (compare_code == LT_EXPR
+ || compare_code == LE_EXPR))
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ else if (loop_bound_step < 0
+ && (compare_code == GT_EXPR
+ || compare_code == GE_EXPR))
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ else
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
+ }
+ else
+ /* The branch is predicted not-taken if loop_bound_code is
+ opposite with compare_code. */
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
+ }
+ else if (expr_coherent_p (loop_iv_base_var, compare_var))
+ {
+ /* For cases like:
+ for (i = s; i < h; i++)
+ if (i > s + 2) ....
+ The branch should be predicted taken. */
+ if (loop_bound_step > 0
+ && (compare_code == GT_EXPR || compare_code == GE_EXPR))
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ else if (loop_bound_step < 0
+ && (compare_code == LT_EXPR || compare_code == LE_EXPR))
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
+ else
+ predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
+ }
+}
+
/* Predict edge probabilities by exploiting loop structure. */
static void
@@ -963,6 +1312,12 @@ predict_loops (void)
VEC (edge, heap) *exits;
struct tree_niter_desc niter_desc;
edge ex;
+ struct nb_iter_bound *nb_iter;
+ enum tree_code loop_bound_code = ERROR_MARK;
+ int loop_bound_step = 0;
+ tree loop_bound_var = NULL;
+ tree loop_iv_base = NULL;
+ gimple stmt = NULL;
exits = get_loop_exit_edges (loop);
n_exits = VEC_length (edge, exits);
@@ -1010,6 +1365,25 @@ predict_loops (void)
}
VEC_free (edge, heap, exits);
+ /* Find information about loop bound variables. */
+ for (nb_iter = loop->bounds; nb_iter;
+ nb_iter = nb_iter->next)
+ if (nb_iter->stmt
+ && gimple_code (nb_iter->stmt) == GIMPLE_COND)
+ {
+ stmt = nb_iter->stmt;
+ break;
+ }
+ if (!stmt && last_stmt (loop->header)
+ && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
+ stmt = last_stmt (loop->header);
+ if (stmt)
+ is_comparison_with_loop_invariant_p (stmt, loop,
+ &loop_bound_var,
+ &loop_bound_code,
+ &loop_bound_step,
+ &loop_iv_base);
+
bbs = get_loop_body (loop);
for (j = 0; j < loop->num_nodes; j++)
@@ -1071,6 +1445,10 @@ predict_loops (void)
|| !flow_bb_inside_loop_p (loop, e->dest))
predict_edge (e, PRED_LOOP_EXIT, probability);
}
+ if (loop_bound_var)
+ predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
+ loop_bound_code,
+ loop_bound_step);
}
/* Free basic blocks from get_loop_body. */
diff --git a/gcc/predict.def b/gcc/predict.def
index 4b3e87aa568..591bb1caf58 100644
--- a/gcc/predict.def
+++ b/gcc/predict.def
@@ -116,3 +116,13 @@ DEF_PREDICTOR (PRED_NULL_RETURN, "null return", HITRATE (90), 0)
/* Branches to a mudflap bounds check are extremely unlikely. */
DEF_PREDICTOR (PRED_MUDFLAP, "mudflap check", PROB_VERY_LIKELY, 0)
+
+/* Branches to compare induction variable to a loop bound is
+ extremely likely. */
+DEF_PREDICTOR (PRED_LOOP_IV_COMPARE_GUESS, "guess loop iv compare",
+ PROB_VERY_LIKELY, 0)
+
+/* Use number of loop iterations determined by # of iterations analysis
+ to set probability of branches that compares IV to loop bound variable. */
+DEF_PREDICTOR (PRED_LOOP_IV_COMPARE, "loop iv compare", PROB_VERY_LIKELY,
+ PRED_FLAG_FIRST_MATCH)
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index 466b7db7469..28f51b06352 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -611,9 +611,6 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
|| code == QUAL_UNION_TYPE)
&& TYPE_NO_FORCE_BLK (node))
fputs (" no-force-blk", file);
- else if (code == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (node))
- fputs (" sizetype", file);
if (TYPE_STRING_FLAG (node))
fputs (" string-flag", file);
diff --git a/gcc/read-md.c b/gcc/read-md.c
index 4f1933f447a..e5534d75d48 100644
--- a/gcc/read-md.c
+++ b/gcc/read-md.c
@@ -167,14 +167,21 @@ copy_md_ptr_loc (const void *new_ptr, const void *old_ptr)
}
/* If PTR is associated with a known file position, print a #line
- directive for it. */
+ directive for it to OUTF. */
void
-print_md_ptr_loc (const void *ptr)
+fprint_md_ptr_loc (FILE *outf, const void *ptr)
{
const struct ptr_loc *loc = get_md_ptr_loc (ptr);
if (loc != 0)
- printf ("#line %d \"%s\"\n", loc->lineno, loc->filename);
+ fprintf (outf, "#line %d \"%s\"\n", loc->lineno, loc->filename);
+}
+
+/* Special fprint_md_ptr_loc for writing to STDOUT. */
+void
+print_md_ptr_loc (const void *ptr)
+{
+ fprint_md_ptr_loc (stdout, ptr);
}
/* Return a condition that satisfies both COND1 and COND2. Either string
@@ -204,31 +211,39 @@ join_c_conditions (const char *cond1, const char *cond2)
return result;
}
-/* Print condition COND, wrapped in brackets. If COND was created by
- join_c_conditions, recursively invoke this function for the original
+/* Print condition COND to OUTF, wrapped in brackets. If COND was created
+ by join_c_conditions, recursively invoke this function for the original
conditions and join the result with "&&". Otherwise print a #line
directive for COND if its original file position is known. */
void
-print_c_condition (const char *cond)
+fprint_c_condition (FILE *outf, const char *cond)
{
const char **halves = (const char **) htab_find (joined_conditions, &cond);
if (halves != 0)
{
- printf ("(");
- print_c_condition (halves[1]);
- printf (" && ");
- print_c_condition (halves[2]);
- printf (")");
+ fprintf (outf, "(");
+ fprint_c_condition (outf, halves[1]);
+ fprintf (outf, " && ");
+ fprint_c_condition (outf, halves[2]);
+ fprintf (outf, ")");
}
else
{
- putc ('\n', stdout);
- print_md_ptr_loc (cond);
- printf ("(%s)", cond);
+ fputc ('\n', outf);
+ fprint_md_ptr_loc (outf, cond);
+ fprintf (outf, "(%s)", cond);
}
}
+/* Special fprint_c_condition for writing to STDOUT. */
+
+void
+print_c_condition (const char *cond)
+{
+ fprint_c_condition (stdout, cond);
+}
+
/* A vfprintf-like function for reporting an error against line LINENO
of the current MD file. */
diff --git a/gcc/read-md.h b/gcc/read-md.h
index abcca51ac72..43dfc44736d 100644
--- a/gcc/read-md.h
+++ b/gcc/read-md.h
@@ -118,8 +118,10 @@ extern hashval_t leading_string_hash (const void *);
extern int leading_string_eq_p (const void *, const void *);
extern void copy_md_ptr_loc (const void *, const void *);
extern void print_md_ptr_loc (const void *);
+extern void fprint_md_ptr_loc (FILE *, const void *);
extern const char *join_c_conditions (const char *, const char *);
extern void print_c_condition (const char *);
+extern void fprint_c_condition (FILE *, const char *);
extern void message_with_line (int, const char *, ...) ATTRIBUTE_PRINTF_2;
extern void error_with_line (int, const char *, ...) ATTRIBUTE_PRINTF_2;
extern void fatal_with_file_and_line (const char *, ...)
diff --git a/gcc/recog.c b/gcc/recog.c
index cb2bfd31701..c5725d2abdf 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -1969,7 +1969,7 @@ offsettable_address_addr_space_p (int strictp, enum machine_mode mode, rtx y,
int good;
y1 = *y2;
- *y2 = plus_constant (*y2, mode_sz - 1);
+ *y2 = plus_constant (GET_MODE (y), *y2, mode_sz - 1);
/* Use QImode because an odd displacement may be automatically invalid
for any wider mode. But it should be valid for a single byte. */
good = (*addressp) (QImode, y, as);
@@ -1991,9 +1991,10 @@ offsettable_address_addr_space_p (int strictp, enum machine_mode mode, rtx y,
&& mode != BLKmode
&& mode_sz <= GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT)
z = gen_rtx_LO_SUM (GET_MODE (y), XEXP (y, 0),
- plus_constant (XEXP (y, 1), mode_sz - 1));
+ plus_constant (GET_MODE (y), XEXP (y, 1),
+ mode_sz - 1));
else
- z = plus_constant (y, mode_sz - 1);
+ z = plus_constant (GET_MODE (y), y, mode_sz - 1);
/* Use QImode because an odd displacement may be automatically invalid
for any wider mode. But it should be valid for a single byte. */
@@ -2680,6 +2681,16 @@ constrain_operands (int strict)
/* Every address operand can be reloaded to fit. */
&& strict < 0)
win = 1;
+ /* Cater to architectures like IA-64 that define extra memory
+ constraints without using define_memory_constraint. */
+ else if (reload_in_progress
+ && REG_P (op)
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (op)] < 0
+ && reg_equiv_mem (REGNO (op)) != 0
+ && EXTRA_CONSTRAINT_STR
+ (reg_equiv_mem (REGNO (op)), c, p))
+ win = 1;
#endif
break;
}
diff --git a/gcc/reload.c b/gcc/reload.c
index 9eddc4d414a..ed14e63fe71 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -3993,10 +3993,8 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known,
as would have been done by find_reloads_address. */
addr_space_t as = MEM_ADDR_SPACE (recog_data.operand[i]);
enum machine_mode address_mode;
- address_mode = GET_MODE (XEXP (recog_data.operand[i], 0));
- if (address_mode == VOIDmode)
- address_mode = targetm.addr_space.address_mode (as);
+ address_mode = get_address_mode (recog_data.operand[i]);
operand_reloadnum[i]
= push_reload (XEXP (recog_data.operand[i], 0), NULL_RTX,
&XEXP (recog_data.operand[i], 0), (rtx*) 0,
@@ -5196,7 +5194,8 @@ find_reloads_address (enum machine_mode mode, rtx *memrefloc, rtx ad,
rtx offset_reg;
enum reg_class cls;
- offset_reg = plus_constant (operand, INTVAL (XEXP (ad, 1)));
+ offset_reg = plus_constant (GET_MODE (ad), operand,
+ INTVAL (XEXP (ad, 1)));
/* Form the adjusted address. */
if (GET_CODE (XEXP (ad, 0)) == PLUS)
@@ -5363,9 +5362,9 @@ form_sum (enum machine_mode mode, rtx x, rtx y)
gcc_assert (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode);
if (CONST_INT_P (x))
- return plus_constant (y, INTVAL (x));
+ return plus_constant (mode, y, INTVAL (x));
else if (CONST_INT_P (y))
- return plus_constant (x, INTVAL (y));
+ return plus_constant (mode, x, INTVAL (y));
else if (CONSTANT_P (x))
tem = x, x = y, y = tem;
@@ -6161,7 +6160,8 @@ find_reloads_subreg_address (rtx x, int force_replace, int opnum,
else
offset = SUBREG_BYTE (x);
- XEXP (tem, 0) = plus_constant (XEXP (tem, 0), offset);
+ XEXP (tem, 0) = plus_constant (GET_MODE (XEXP (tem, 0)),
+ XEXP (tem, 0), offset);
PUT_MODE (tem, GET_MODE (x));
if (MEM_OFFSET_KNOWN_P (tem))
set_mem_offset (tem, MEM_OFFSET (tem) + offset);
diff --git a/gcc/reload.h b/gcc/reload.h
index ad0cfcd82d8..f747099abed 100644
--- a/gcc/reload.h
+++ b/gcc/reload.h
@@ -411,9 +411,6 @@ extern int push_reload (rtx, rtx, rtx *, rtx *, enum reg_class,
enum machine_mode, enum machine_mode,
int, int, int, enum reload_type);
-/* Functions in postreload.c: */
-extern void reload_cse_regs (rtx);
-
/* Functions in reload1.c: */
/* Initialize the reload pass once per compilation. */
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 71cea8171d4..c887614ce5b 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -462,7 +462,7 @@ init_reload (void)
gen_rtx_REG (Pmode, i));
/* This way, we make sure that reg+reg is an offsettable address. */
- tem = plus_constant (tem, 4);
+ tem = plus_constant (Pmode, tem, 4);
if (memory_address_p (QImode, tem))
{
@@ -2590,7 +2590,7 @@ eliminate_regs_1 (rtx x, enum machine_mode mem_mode, rtx insn,
for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
ep++)
if (ep->from_rtx == x && ep->can_eliminate)
- return plus_constant (ep->to_rtx, ep->previous_offset);
+ return plus_constant (Pmode, ep->to_rtx, ep->previous_offset);
}
else if (reg_renumber && reg_renumber[regno] < 0
@@ -2646,7 +2646,7 @@ eliminate_regs_1 (rtx x, enum machine_mode mem_mode, rtx insn,
return ep->to_rtx;
else
return gen_rtx_PLUS (Pmode, ep->to_rtx,
- plus_constant (XEXP (x, 1),
+ plus_constant (Pmode, XEXP (x, 1),
ep->previous_offset));
}
@@ -2723,7 +2723,8 @@ eliminate_regs_1 (rtx x, enum machine_mode mem_mode, rtx insn,
ep->ref_outside_mem = 1;
return
- plus_constant (gen_rtx_MULT (Pmode, ep->to_rtx, XEXP (x, 1)),
+ plus_constant (Pmode,
+ gen_rtx_MULT (Pmode, ep->to_rtx, XEXP (x, 1)),
ep->previous_offset * INTVAL (XEXP (x, 1)));
}
@@ -3297,8 +3298,8 @@ eliminate_regs_in_insn (rtx insn, int replace)
if (base == ep->to_rtx)
{
- rtx src
- = plus_constant (ep->to_rtx, offset - ep->offset);
+ rtx src = plus_constant (Pmode, ep->to_rtx,
+ offset - ep->offset);
new_body = old_body;
if (! replace)
@@ -3412,7 +3413,8 @@ eliminate_regs_in_insn (rtx insn, int replace)
had a PLUS before. */
if (offset == 0 || plus_src)
{
- rtx new_src = plus_constant (to_rtx, offset);
+ rtx new_src = plus_constant (GET_MODE (to_rtx),
+ to_rtx, offset);
new_body = old_body;
if (! replace)
@@ -5429,6 +5431,13 @@ reload_reg_reaches_end_p (unsigned int regno, int reloadnum)
if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
return 0;
+ /* Reload register of reload with type RELOAD_FOR_INPADDR_ADDRESS
+ could be killed if the register is also used by reload with type
+ RELOAD_FOR_INPUT_ADDRESS, so check it. */
+ if (type == RELOAD_FOR_INPADDR_ADDRESS
+ && TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], regno))
+ return 0;
+
for (i = opnum + 1; i < reload_n_operands; i++)
if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
|| TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno))
@@ -5503,6 +5512,13 @@ reload_reg_reaches_end_p (unsigned int regno, int reloadnum)
|| TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno))
return 0;
+ /* Reload register of reload with type RELOAD_FOR_OUTADDR_ADDRESS
+ could be killed if the register is also used by reload with type
+ RELOAD_FOR_OUTPUT_ADDRESS, so check it. */
+ if (type == RELOAD_FOR_OUTADDR_ADDRESS
+ && TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[opnum], regno))
+ return 0;
+
return 1;
default:
diff --git a/gcc/reorg.c b/gcc/reorg.c
index dfc9747c5f7..e99fe02bd05 100644
--- a/gcc/reorg.c
+++ b/gcc/reorg.c
@@ -903,38 +903,6 @@ get_jump_flags (rtx insn, rtx label)
else
flags = 0;
- /* If insn is a conditional branch call mostly_true_jump to get
- determine the branch prediction.
-
- Non conditional branches are predicted as very likely taken. */
- if (JUMP_P (insn)
- && (condjump_p (insn) || condjump_in_parallel_p (insn)))
- {
- int prediction;
-
- prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
- switch (prediction)
- {
- case 2:
- flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
- break;
- case 1:
- flags |= ATTR_FLAG_likely;
- break;
- case 0:
- flags |= ATTR_FLAG_unlikely;
- break;
- case -1:
- flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else
- flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
-
return flags;
}
diff --git a/gcc/rtl.def b/gcc/rtl.def
index 955e8e4709b..0695d640fce 100644
--- a/gcc/rtl.def
+++ b/gcc/rtl.def
@@ -1268,9 +1268,7 @@ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA)
true for the insn being scheduled in reorg.
genattr.c defines the following flags which can be tested by
- (attr_flag "foo") expressions in eligible_for_delay.
-
- forward, backward, very_likely, likely, very_unlikely, and unlikely. */
+ (attr_flag "foo") expressions in eligible_for_delay: forward, backward. */
DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA)
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 86c56acc197..6f7aabc32c2 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1643,8 +1643,7 @@ extern int ceil_log2 (unsigned HOST_WIDE_INT);
/* In explow.c */
extern HOST_WIDE_INT trunc_int_for_mode (HOST_WIDE_INT, enum machine_mode);
-extern rtx plus_constant (rtx, HOST_WIDE_INT);
-extern rtx plus_constant_mode (enum machine_mode, rtx, HOST_WIDE_INT);
+extern rtx plus_constant (enum machine_mode, rtx, HOST_WIDE_INT);
/* In rtl.c */
extern rtx rtx_alloc_stat (RTX_CODE MEM_STAT_DECL);
@@ -1900,6 +1899,7 @@ typedef struct replace_label_data
bool update_label_nuses;
} replace_label_data;
+extern enum machine_mode get_address_mode (rtx mem);
extern int rtx_addr_can_trap_p (const_rtx);
extern bool nonzero_address_p (const_rtx);
extern int rtx_unstable_p (const_rtx);
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index 78365bdb9e1..beed221ef63 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -5279,3 +5279,17 @@ low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
return exact_log2 (m + 1);
}
+
+/* Return the mode of MEM's address. */
+
+enum machine_mode
+get_address_mode (rtx mem)
+{
+ enum machine_mode mode;
+
+ gcc_assert (MEM_P (mem));
+ mode = GET_MODE (XEXP (mem, 0));
+ if (mode != VOIDmode)
+ return mode;
+ return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
+}
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 4a0212112f2..be45c6afa16 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -2445,8 +2445,7 @@ sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
if (sched_deps_info->use_cselib)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
+ enum machine_mode address_mode = get_address_mode (dest);
t = shallow_copy_rtx (dest);
cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
@@ -2607,8 +2606,7 @@ sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
if (sched_deps_info->use_cselib)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
+ enum machine_mode address_mode = get_address_mode (t);
t = shallow_copy_rtx (t);
cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index 27b06ad1695..f0a6b075e08 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -957,7 +957,7 @@ debug_mem_addr_value (rtx x)
enum machine_mode address_mode;
gcc_assert (MEM_P (x));
- address_mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (x));
+ address_mode = get_address_mode (x);
t = shallow_copy_rtx (x);
if (cselib_lookup (XEXP (t, 0), address_mode, 0, GET_MODE (t)))
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 3357ceb1024..6b0d56ed3ea 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -613,7 +613,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* Similarly, (not (neg X)) is (plus X -1). */
if (GET_CODE (op) == NEG)
- return plus_constant (XEXP (op, 0), -1);
+ return plus_constant (mode, XEXP (op, 0), -1);
/* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (op) == XOR
@@ -713,7 +713,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
- return plus_constant (XEXP (op, 0), 1);
+ return plus_constant (mode, XEXP (op, 0), 1);
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
@@ -782,7 +782,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
if (GET_CODE (op) == XOR
&& XEXP (op, 1) == const1_rtx
&& nonzero_bits (XEXP (op, 0), mode) == 1)
- return plus_constant (XEXP (op, 0), -1);
+ return plus_constant (mode, XEXP (op, 0), -1);
/* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
/* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
@@ -1954,12 +1954,12 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
|| GET_CODE (op0) == SYMBOL_REF
|| GET_CODE (op0) == LABEL_REF)
&& CONST_INT_P (op1))
- return plus_constant (op0, INTVAL (op1));
+ return plus_constant (mode, op0, INTVAL (op1));
else if ((GET_CODE (op1) == CONST
|| GET_CODE (op1) == SYMBOL_REF
|| GET_CODE (op1) == LABEL_REF)
&& CONST_INT_P (op0))
- return plus_constant (op1, INTVAL (op0));
+ return plus_constant (mode, op1, INTVAL (op0));
/* See if this is something like X * C - X or vice versa or
if the multiplication is written as a shift. If so, we can
@@ -2557,7 +2557,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (mask >> count == INTVAL (trueop1)
&& (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
return simplify_gen_binary (ASHIFTRT, mode,
- plus_constant (XEXP (op0, 0), mask),
+ plus_constant (mode, XEXP (op0, 0),
+ mask),
XEXP (op0, 1));
}
@@ -4118,7 +4119,8 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
rtx value = ops[n_ops - 1].op;
if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
value = neg_const_int (mode, value);
- ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
+ ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
+ INTVAL (value));
n_ops--;
}
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 8f7b1506eef..dd34890d462 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -2198,9 +2198,6 @@ expand_case (gimple stmt)
/* RANGE may be signed, and really large ranges will show up
as negative numbers. */
|| compare_tree_int (range, 0) < 0
-#ifndef ASM_OUTPUT_ADDR_DIFF_ELT
- || flag_pic
-#endif
|| !flag_jump_tables
|| TREE_CONSTANT (index_expr)
/* If neither casesi or tablejump is available, we can
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index e72e7f39091..cb47a52b715 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -785,8 +785,8 @@ start_record_layout (tree t)
return rli;
}
-/* These four routines perform computations that convert between
- the offset/bitpos forms and byte and bit offsets. */
+/* Return the combined bit position for the byte offset OFFSET and the
+ bit position BITPOS. */
tree
bit_from_pos (tree offset, tree bitpos)
@@ -797,25 +797,52 @@ bit_from_pos (tree offset, tree bitpos)
bitsize_unit_node));
}
+/* Return the combined truncated byte position for the byte offset OFFSET and
+ the bit position BITPOS.
+
+ These functions operate on byte and bit positions as present in FIELD_DECLs
+ and assume that these expressions result in no (intermediate) overflow.
+ This assumption is necessary to fold the expressions as much as possible,
+ so as to avoid creating artificially variable-sized types in languages
+ supporting variable-sized types like Ada. */
+
tree
byte_from_pos (tree offset, tree bitpos)
{
- return size_binop (PLUS_EXPR, offset,
- fold_convert (sizetype,
- size_binop (TRUNC_DIV_EXPR, bitpos,
- bitsize_unit_node)));
+ tree bytepos;
+ if (TREE_CODE (bitpos) == MULT_EXPR
+ && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
+ bytepos = TREE_OPERAND (bitpos, 0);
+ else
+ bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
+ return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
}
+/* Split the bit position POS into a byte offset *POFFSET and a bit
+ position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
+
void
pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
tree pos)
{
- *poffset = size_binop (MULT_EXPR,
- fold_convert (sizetype,
- size_binop (FLOOR_DIV_EXPR, pos,
- bitsize_int (off_align))),
- size_int (off_align / BITS_PER_UNIT));
- *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
+ tree toff_align = bitsize_int (off_align);
+ if (TREE_CODE (pos) == MULT_EXPR
+ && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
+ {
+ *poffset = size_binop (MULT_EXPR,
+ fold_convert (sizetype, TREE_OPERAND (pos, 0)),
+ size_int (off_align / BITS_PER_UNIT));
+ *pbitpos = bitsize_zero_node;
+ }
+ else
+ {
+ *poffset = size_binop (MULT_EXPR,
+ fold_convert (sizetype,
+ size_binop (FLOOR_DIV_EXPR, pos,
+ toff_align)),
+ size_int (off_align / BITS_PER_UNIT));
+ *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
+ }
}
/* Given a pointer to bit and byte offsets and an offset alignment,
@@ -828,17 +855,10 @@ normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
downwards. */
if (compare_tree_int (*pbitpos, off_align) >= 0)
{
- tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
- bitsize_int (off_align));
-
- *poffset
- = size_binop (PLUS_EXPR, *poffset,
- size_binop (MULT_EXPR,
- fold_convert (sizetype, extra_aligns),
- size_int (off_align / BITS_PER_UNIT)));
-
- *pbitpos
- = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
+ tree offset, bitpos;
+ pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
+ *poffset = size_binop (PLUS_EXPR, *poffset, offset);
+ *pbitpos = bitpos;
}
}
@@ -2182,11 +2202,37 @@ layout_type (tree type)
that (possible) negative values are handled appropriately
when determining overflow. */
else
- length
- = fold_convert (sizetype,
- size_binop (PLUS_EXPR,
- build_int_cst (TREE_TYPE (lb), 1),
- size_binop (MINUS_EXPR, ub, lb)));
+ {
+ /* ??? When it is obvious that the range is signed
+ represent it using ssizetype. */
+ if (TREE_CODE (lb) == INTEGER_CST
+ && TREE_CODE (ub) == INTEGER_CST
+ && TYPE_UNSIGNED (TREE_TYPE (lb))
+ && tree_int_cst_lt (ub, lb))
+ {
+ lb = double_int_to_tree
+ (ssizetype,
+ double_int_sext (tree_to_double_int (lb),
+ TYPE_PRECISION (TREE_TYPE (lb))));
+ ub = double_int_to_tree
+ (ssizetype,
+ double_int_sext (tree_to_double_int (ub),
+ TYPE_PRECISION (TREE_TYPE (ub))));
+ }
+ length
+ = fold_convert (sizetype,
+ size_binop (PLUS_EXPR,
+ build_int_cst (TREE_TYPE (lb), 1),
+ size_binop (MINUS_EXPR, ub, lb)));
+ }
+
+ /* If we arrived at a length of zero ignore any overflow
+ that occured as part of the calculation. There exists
+ an association of the plus one where that overflow would
+ not happen. */
+ if (integer_zerop (length)
+ && TREE_OVERFLOW (length))
+ length = size_zero_node;
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
@@ -2439,12 +2485,10 @@ initialize_sizetypes (void)
TYPE_NAME (sizetype) = get_identifier ("sizetype");
TYPE_PRECISION (sizetype) = precision;
TYPE_UNSIGNED (sizetype) = 1;
- TYPE_IS_SIZETYPE (sizetype) = 1;
bitsizetype = make_node (INTEGER_TYPE);
TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
TYPE_PRECISION (bitsizetype) = bprecision;
TYPE_UNSIGNED (bitsizetype) = 1;
- TYPE_IS_SIZETYPE (bitsizetype) = 1;
/* Now layout both types manually. */
SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
@@ -2453,11 +2497,6 @@ initialize_sizetypes (void)
TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
set_min_and_max_values_for_integral_type (sizetype, precision,
/*is_unsigned=*/true);
- /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
- sign-extended in a way consistent with force_fit_type. */
- TYPE_MAX_VALUE (sizetype)
- = double_int_to_tree (sizetype,
- tree_to_double_int (TYPE_MAX_VALUE (sizetype)));
SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
@@ -2466,19 +2505,12 @@ initialize_sizetypes (void)
= size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
/*is_unsigned=*/true);
- /* bitsizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
- sign-extended in a way consistent with force_fit_type. */
- TYPE_MAX_VALUE (bitsizetype)
- = double_int_to_tree (bitsizetype,
- tree_to_double_int (TYPE_MAX_VALUE (bitsizetype)));
/* Create the signed variants of *sizetype. */
ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
- TYPE_IS_SIZETYPE (ssizetype) = 1;
sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
- TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
diff --git a/gcc/target.def b/gcc/target.def
index d658b118c88..f5023542409 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -1940,6 +1940,12 @@ DEFHOOKPOD
"",
unsigned HOST_WIDE_INT, 0)
+/* Defines, which target-dependent bits (upper 16) are used by port */
+DEFHOOK
+(memmodel_check,
+ "",
+ unsigned HOST_WIDE_INT, (unsigned HOST_WIDE_INT val), NULL)
+
/* Functions relating to calls - argument passing, returns, etc. */
/* Members of struct call have no special macro prefix. */
HOOK_VECTOR (TARGET_CALLS, calls)
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 72d9379d0d9..c0ed0e4090f 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,331 @@
+2012-05-11 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53305
+ * g++.dg/cpp0x/variadic132.C: New.
+
+2012-05-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53158
+ * g++.dg/cpp0x/lambda/lambda-err2.C: New.
+ * g++.dg/parse/error26.C: Tweak dg-error column number.
+
+2012-05-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53301
+ * g++.dg/warn/Wzero-as-null-pointer-constant-6.C: New.
+
+2012-05-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/lto11.ad[sb]: New test.
+
+2012-05-09 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/52908
+ * gcc.target/i386/xop-imul32widen-vector.c: Update scan-assembler
+ directive to Scan for vpmuldq, not vpmacsdql.
+
+2012-05-09 Michael Matz <matz@suse.de>
+
+ PR tree-optimization/53185
+ * gcc.dg/vect/pr53185.c: New test.
+
+2012-05-09 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR middle-end/53249
+ * gcc.target/i386/pr53249.c: New.
+
+2012-05-09 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/18437
+ * gfortran.dg/vect/rnflow-trs2a2.f90: Move ...
+ * gfortran.dg/vect/fast-math-rnflow-trs2a2.f90: ... here.
+
+2012-05-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/53226
+ * gcc.c-torture/compile/pr53226.c: New test.
+
+2012-05-09 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * gcc.target/i386/hle-add-acq-1.c: Allow for ; after lock.
+ * gcc.target/i386/hle-add-rel-1.c: Likewise.
+ * gcc.target/i386/hle-and-acq-1.c: Likewise.
+ * gcc.target/i386/hle-and-rel-1.c: Likewise.
+ * gcc.target/i386/hle-cmpxchg-acq-1.c: Likewise.
+ * gcc.target/i386/hle-cmpxchg-rel-1.c: Likewise.
+ * gcc.target/i386/hle-or-acq-1.c: Likewise.
+ * gcc.target/i386/hle-or-rel-1.c: Likewise.
+ * gcc.target/i386/hle-sub-acq-1.c: Likewise.
+ * gcc.target/i386/hle-sub-rel-1.c: Likewise.
+ * gcc.target/i386/hle-xadd-acq-1.c: Likewise.
+ * gcc.target/i386/hle-xadd-rel-1.c: Likewise.
+ * gcc.target/i386/hle-xor-acq-1.c: Likewise.
+ * gcc.target/i386/hle-xor-rel-1.c: Likewise.
+
+2012-05-09 Dehao Chen <dehao@google.com>
+
+ * gcc.dg/predict-1.c: Remove the replicated text in this test.
+ * gcc.dg/predict-2.c: Likewise.
+ * gcc.dg/predict-3.c: Likewise.
+ * gcc.dg/predict-4.c: Likewise.
+ * gcc.dg/predict-5.c: Likewise.
+ * gcc.dg/predict-6.c: Likewise.
+
+2012-05-08 Hans-Peter Nilsson <hp@axis.com>
+
+ PR target/53272
+ * gcc.dg/torture/pr53272-1.c, gcc.dg/torture/pr53272-2.c: New test.
+
+2012-05-08 Richard Guenther <rguenther@suse.de>
+
+ * gcc.dg/fold-bitand-4.c: New testcase.
+
+2012-05-08 Dehao Chen <dehao@google.com>
+
+ * gcc.dg/predict-1.c: Check if LOOP_IV_COMPARE static predict
+ heuristic is working properly.
+ * gcc.dg/predict-2.c: Likewise.
+ * gcc/dg/predict-3.c: Likewise.
+ * gcc/dg/predict-4.c: Likewise.
+ * gcc/dg/predict-5.c: Likewise.
+ * gcc/dg/predict-6.c: Likewise.
+
+2012-05-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/53239
+ * g++.dg/opt/vrp3.C: New test.
+ * g++.dg/opt/vrp3-aux.cc: New file.
+ * g++.dg/opt/vrp3.h: New file.
+
+2012-05-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * g++.dg/debug/dwarf2/nested-3.C: Allow for / comments and missing
+ .uleb128 support in assembler output.
+
+2012-05-07 Richard Guenther <rguenther@suse.de>
+
+ PR lto/42987
+ * g++.dg/lto/pr42987_0.C: New testcase.
+ * g++.dg/lto/pr42987_1.C: Likewise.
+
+2012-05-07 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/53255
+ * gfortran.dg/typebound_operator_15.f90: New.
+
+2012-05-06 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/41587
+ * gfortran.dg/class_array_13.f90: New.
+
+2012-05-06 Tristan Gingold <gingold@adacore.com>
+
+ * gnat.dg/warn7.adb: New test.
+
+2012-05-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/specs/renamings.ads: Rename to...
+ * gnat.dg/specs/renaming1.ads: ...this.
+ * gnat.dg/specs/renaming2.ads: New test.
+ * gnat.dg/specs/renaming2_pkg1.ads: New helper.
+ * gnat.dg/specs/renaming2_pkg2.ads: Likewise.
+ * gnat.dg/specs/renaming2_pkg3.ads: Likewise.
+ * gnat.dg/specs/renaming2_pkg4.ad[sb]: Likewise.
+
+2012-05-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/discr36.ad[sb]: New test.
+ * gnat.dg/discr36_pkg.ad[sb]: New helper.
+
+2012-05-05 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c/43772
+ * c-c++-common/pr43772.c: New.
+
+2012-05-05 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/41600
+ * gfortran.dg/select_type_26.f03 : New test.
+ * gfortran.dg/select_type_27.f03 : New test.
+
+ PR fortran/53191
+ * gfortran.dg/select_type_28.f03 : New test.
+
+2012-05-05 Janne Blomqvist <jb@gcc.gnu.org>
+
+ PR fortran/49010
+ PR fortran/24518
+ * gfortran.dg/mod_sign0_1.f90: New test.
+ * gfortran.dg/mod_large_1.f90: New test.
+
+2012-05-04 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/53175
+ gfortran.dg/public_private_module_5.f90: New.
+
+2012-05-04 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/53111
+ * gfortran.dg/constructor_7.f90: New.
+ * gfortran.dg/constructor_8.f90: New.
+
+2012-05-04 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53166
+ * g++.dg/cpp0x/static_assert7.C: New.
+
+2012-05-04 Ulrich Weigand <ulrich.weigand@linaro.org>
+
+ PR tree-optimization/52633
+ * gcc.dg/vect/vect-over-widen-1.c: Two patterns should now be
+ recognized as widening shifts instead of over-widening.
+ * gcc.dg/vect/vect-over-widen-1-big-array.c: Likewise.
+ * gcc.dg/vect/vect-over-widen-4.c: Likewise.
+ * gcc.dg/vect/vect-over-widen-4-big-array.c: Likewise.
+ * gcc.target/arm/pr52633.c: New test.
+
+2012-05-04 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/53168
+ * gcc.dg/torture/pr53168.c: New testcase.
+ * gcc.dg/tree-ssa/ssa-pre-30.c: Likewise.
+
+2012-05-04 Richard Guenther <rguenther@suse.de>
+
+ * gcc.dg/lto/pr53214_0.c: New testcase.
+
+2012-05-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.target/ia64/pr48496.c: New test.
+ * gcc.target/ia64/pr52657.c: Likewise.
+
+2012-05-04 Manuel López-Ibáñez <manu@gcc.gnu.org>
+
+ PR c/51712
+ * c-c++-common/pr51712.c: New.
+
+2012-05-03 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ PR target/53199
+ * gcc.target/powwerpc/pr53199.c: New file.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/debug/dwarf2/nested-3.C: Adjust pattern.
+
+ * gcc.dg/debug/dwarf2/dups-types.c: New.
+ * gcc.dg/debug/dwarf2/dups-types.h: New.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/debug/dwarf2/namespace-2.C: New.
+ * g++.dg/debug/dwarf2/localclass3.C: New.
+
+2012-05-03 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/debug/dwarf2/thunk1.C: New.
+
+2012-05-03 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53186
+ * g++.dg/other/final2.C: New.
+
+2012-05-03 Richard Guenther <rguenther@suse.de>
+
+ * gcc.dg/tree-ssa/ssa-pre-27.c: Remove XFAIL.
+
+2012-05-03 Uros Bizjak <ubizjak@gmail.com>
+
+ * gcc.target/i386/hle-cmpxchg-acq-1.c (dg-options): Add -march=x86-64.
+ * gcc.target/i386/hle-xadd-acq-1.c (dg-options): Ditto.
+ * gcc.target/i386/hle-cmpxchg-rel-1.c (dg-options): Ditto.
+ * gcc.target/i386/hle-xadd-rel-1.c (dg-options): Ditto.
+
+2012-05-03 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.target/i386/hle-xadd-rel-1.c: Match .byte 0xf3 instead of
+ .byte 0xf2.
+ * gcc.target/i386/hle-sub-rel-1.c: Likewise.
+ * gcc.target/i386/hle-xchg-rel-1.c: Likewise.
+ * gcc.target/i386/hle-add-rel-1.c: Likewise.
+
+ PR debug/53174
+ * gcc.dg/pr53174.c: New test.
+
+ PR target/53187
+ * gcc.target/arm/pr53187.c: New test.
+ * gcc.c-torture/compile/pr53187.c: New test.
+
+2012-05-03 Richard Guenther <rguenther@suse.de>
+
+ * gfortran.dg/pr52621.f90: Add -w to avoid diagnostic about
+ unsupported prefetching support.
+
+2012-05-03 Greta Yorsh <Greta.Yorsh@arm.com>
+
+ * gcc.dg/fixed-point/composite-type.c (dg-options): Add
+ option -ftrack-macro-expansion=0.
+ * gcc.dg/fixed-point/operator-bitwise.c (dg-options): Add
+ option -ftrack-macro-expansion=0.
+
+2012-05-03 Greta Yorsh <Greta.Yorsh@arm.com>
+
+ * gcc.dg/builtin-stringop-chk-1.c (dg-options): Replace
+ dg-options for target arm with dg-additional-options.
+
+2012-05-03 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/53144
+ * gcc.dg/torture/pr53144.c: New testcase.
+
+2012-05-03 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/52864
+ * gfortran.dg/pointer_intent_7.f90: New.
+ * gfortran.dg/pure_formal_3.f90: New.
+
+2012-05-02 Ulrich Weigand <ulrich.weigand@linaro.org>
+
+ * gcc.target/s390/20030123-1.c: Add missing "volatile".
+
+2012-05-02 Martin Jambor <mjambor@suse.cz>
+
+ PR lto/52605
+ * g++.dg/lto/pr52605_0.C: New test.
+
+2012-05-02 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ * gcc.target/i386/hle-cmpxchg-acq-1.c: New.
+ * gcc.target/i386/hle-cmpxchg-rel-1.c: Ditto.
+ * gcc.target/i386/hle-add-acq-1.c: Ditto.
+ * gcc.target/i386/hle-add-rel-1.c: Ditto.
+ * gcc.target/i386/hle-and-acq-1.c: Ditto.
+ * gcc.target/i386/hle-and-rel-1.c: Ditto.
+ * gcc.target/i386/hle-or-acq-1.c: Ditto.
+ * gcc.target/i386/hle-or-rel-1.c: Ditto.
+ * gcc.target/i386/hle-sub-acq-1.c: Ditto.
+ * gcc.target/i386/hle-sub-rel-1.c: Ditto.
+ * gcc.target/i386/hle-xadd-acq-1.c: Ditto.
+ * gcc.target/i386/hle-xadd-rel-1.c: Ditto.
+ * gcc.target/i386/hle-xchg-acq-1.c: Ditto.
+ * gcc.target/i386/hle-xchg-rel-1.c: Ditto.
+ * gcc.target/i386/hle-xor-acq-1.c: Ditto.
+ * gcc.target/i386/hle-xor-rel-1.c: Ditto.
+
+2012-05-02 Steven Bosscher <steven@gcc.gnu.org>
+
+ PR middle-end/53153
+ * gcc.dg/pr53153.c: New test.
+
+2012-05-02 Richard Guenther <rguenther@suse.de>
+
+ * g++.dg/tree-ssa/pr19807.C: Adjust.
+
2012-05-02 Jakub Jelinek <jakub@redhat.com>
+ PR tree-optimization/53163
+ * gcc.c-torture/compile/pr53163.c: New test.
+
PR rtl-optimization/53160
* gcc.c-torture/execute/pr53160.c: New test.
@@ -105,11 +431,11 @@
Fix expansion point loc for macro-like tokens
* gcc.dg/debug/dwarf2/pr41445-5.c: Adjust.
- * gcc.dg/debug/dwarf2/pr41445-6.c: Likewise.
+ * gcc.dg/debug/dwarf2/pr41445-6.c: Likewise.
Fix token pasting with -ftrack-macro-expansion
* gcc.dg/cpp/paste17.c: New test case for
- -ftrack-macro-expansion=2 mode only.
+ -ftrack-macro-expansion=2 mode only.
* gcc.dg/cpp/macro-exp-tracking-5.c: Likewise.
2012-04-30 Eric Botcazou <ebotcazou@adacore.com>
@@ -119,7 +445,7 @@
2012-04-29 Manuel López-Ibáñez <manu@gcc.gnu.org>
PR 53149
- * gcc.dg/20011021-1.c: Adjust testcase.
+ * gcc.dg/20011021-1.c: Adjust testcase.
2012-04-29 Thomas Koenig <tkoenig@gcc.gnu.org>
diff --git a/gcc/testsuite/c-c++-common/pr43772.c b/gcc/testsuite/c-c++-common/pr43772.c
new file mode 100644
index 00000000000..8bf09a420f3
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/pr43772.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-options "-Wlogical-op -Wtype-limits" } */
+#include <limits.h>
+long long
+emacs_lseek (long long offset)
+{
+ return -1-9223372036854775807LL <= offset && offset <= 9223372036854775807LL;
+}
+
+long long
+foo (long long offset)
+{
+ return -1-9223372036854775807LL > offset && offset > 9223372036854775807LL;
+}
+
+long long
+foo3 (long long offset)
+{
+ return -1-9223372036854775807LL > offset && offset < 9223372036854775807LL;
+}
+
+long long
+foo2 (long long offset)
+{
+ if (-1-9223372036854775807LL <= offset) return 0;
+ if (offset <= 9223372036854775807LL) return 0;
+ if (-1-9223372036854775807LL > offset) return 0;
+ if (offset > 9223372036854775807LL) return 0;
+ return 1;
+}
+
+# define BOT INT_MIN
+# define TOP INT_MAX
+
+long long get_intmax(void);
+int get_int(void);
+extern void do_something(void);
+int main(void)
+{
+ int i = get_int();
+ long long x = get_intmax();
+ i = (i > BOT && i < TOP); //OK
+ i = (i >= BOT+1 && i <= TOP-1); //OK
+ i = (i >= BOT && i <= TOP); //Oops!
+}
diff --git a/gcc/testsuite/c-c++-common/pr51712.c b/gcc/testsuite/c-c++-common/pr51712.c
new file mode 100644
index 00000000000..4d9eba33afe
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/pr51712.c
@@ -0,0 +1,18 @@
+/* PR c/51712 */
+/* { dg-do compile } */
+/* { dg-options "-Wtype-limits" } */
+
+enum test_enum {
+ FOO,
+ BAR
+};
+
+int valid(enum test_enum arg)
+{
+ return arg >= 0 && arg <= BAR;
+}
+
+int valid2(unsigned int arg2)
+{
+ return arg2 >= FOO && arg2 <= BAR; /* { dg-bogus "comparison of unsigned expression" "" { xfail *-*-* } } */
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-err2.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-err2.C
new file mode 100644
index 00000000000..aaa80f47000
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-err2.C
@@ -0,0 +1,12 @@
+// PR c++/53158
+// { dg-do compile { target c++11 } }
+
+int main()
+{
+ auto a = []() { return true; };
+ auto b = []() { return a(); }; // { dg-error "'a' is not captured" }
+ int c, d;
+ while (b() && c < d) // { dg-error "could not convert" }
+ {
+ }
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/static_assert7.C b/gcc/testsuite/g++.dg/cpp0x/static_assert7.C
new file mode 100644
index 00000000000..28793e43400
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/static_assert7.C
@@ -0,0 +1,20 @@
+// PR c++/53166
+// { dg-options "-std=c++11 -Waddress" }
+
+template <typename X, X a>
+struct A
+{
+ static_assert (a != nullptr, "oops");
+ static_assert (nullptr != a, "oops");
+
+ int f()
+ {
+ static_assert (a != nullptr, "oops");
+ static_assert (nullptr != a, "oops");
+ return 1;
+ }
+};
+
+int i1;
+A<int*, &i1> a1;
+int i2 = a1.f();
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic132.C b/gcc/testsuite/g++.dg/cpp0x/variadic132.C
new file mode 100644
index 00000000000..f50c7a659f5
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic132.C
@@ -0,0 +1,27 @@
+// PR c++/53305
+// { dg-do compile { target c++11 } }
+
+template<class... Ts> struct tuple { };
+
+struct funct
+{
+ template<class... argTs>
+ int operator()(argTs...);
+};
+
+template<class...> class test;
+
+template<template <class...> class tp,
+ class... arg1Ts, class... arg2Ts>
+class test<tp<arg1Ts...>, tp<arg2Ts...>>
+{
+ template<class func, class...arg3Ts>
+ auto test2(func fun, arg1Ts... arg1s, arg3Ts... arg3s)
+ -> decltype(fun(arg1s..., arg3s...));
+};
+
+int main()
+{
+ test<tuple<>, tuple<char,int>> t2;
+ t2.test2(funct(), 'a', 2); // { dg-error "no matching function" }
+}
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/localclass3.C b/gcc/testsuite/g++.dg/debug/dwarf2/localclass3.C
new file mode 100644
index 00000000000..be28a197168
--- /dev/null
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/localclass3.C
@@ -0,0 +1,11 @@
+// Test that the A* pointer_type is also within the debug info for f.
+// Currently GCC emits it immediately before A, which is simple to test for.
+// { dg-options "-g -dA" }
+
+void f()
+{
+ struct A { int i; } *ap;
+ ap->i = 42;
+}
+
+// { dg-final { scan-assembler "DW_TAG_pointer_type.\[^)\]*. DW_TAG_structure_type" } }
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/namespace-2.C b/gcc/testsuite/g++.dg/debug/dwarf2/namespace-2.C
new file mode 100644
index 00000000000..0289e9022d3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/namespace-2.C
@@ -0,0 +1,10 @@
+// Test that we define A inside the namespace rather than declaring it
+// there and then defining it at CU scope.
+// { dg-options "-g -dA" }
+// { dg-final { scan-assembler-not "DW_AT_declaration" } }
+
+namespace N {
+ struct A;
+}
+
+struct N::A { } a;
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/nested-3.C b/gcc/testsuite/g++.dg/debug/dwarf2/nested-3.C
index 1c1be999c4d..ac121d6b906 100644
--- a/gcc/testsuite/g++.dg/debug/dwarf2/nested-3.C
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/nested-3.C
@@ -37,6 +37,14 @@ main ()
// .uleb128 0x9 # (DIE (0x34) DW_TAG_class_type)
// .long .LASF0 # DW_AT_name: "Executor"
// # DW_AT_declaration
+// .byte 0xa0 # DW_AT_signature
+// .byte 0xfe
+// .byte 0xe6
+// .byte 0x7b
+// .byte 0x66
+// .byte 0xe9
+// .byte 0x38
+// .byte 0xf0
// .uleb128 0x5 # (DIE (0x39) DW_TAG_subprogram)
// # DW_AT_external
// .long .LASF1 # DW_AT_name: "CurrentExecutor"
@@ -51,4 +59,4 @@ main ()
//
// Hence the scary regexp:
//
-// { dg-final { scan-assembler "\[^\n\r\]*\\(DIE \\(0x(\[0-9a-f\]+)\\) DW_TAG_namespace\\)\[\n\r\]+\[^\n\r\]*\"thread\[\^\n\r]+\[\n\r\]+(\[^\n\r\]*\[\n\r\]+)+\[^\n\r\]*\\(DIE \\(0x(\[0-9a-f\]+)\\) DW_TAG_class_type\\)(\[\n\r\]+\[^\n\r\]*)+\"Executor\[^\n\r\]+\[\n\r\]+\[^\n\r\]*DW_AT_declaration\[\n\r\]+\[^\n\r\]*\\(DIE\[^\n\r\]*DW_TAG_subprogram\\)\[\n\r\]+(\[^\n\r\]*\[\n\r\]+)+\[^\n\r\]*\"CurrentExecutor\[^\n\r\]+\[\n\r\]+(\[^\n\r\]*\[\n\r\]+)+(\[^\n\r\]*\[\n\r\]+)+\[^\n\r\]*end of children of DIE 0x\\3\[\n\r]+\[^\n\r\]*end of children of DIE 0x\\1\[\n\r]+" } }
+// { dg-final { scan-assembler "\[^\n\r\]*\\(DIE \\(0x(\[0-9a-f\]+)\\) DW_TAG_namespace\\)\[\n\r\]+\[^\n\r\]*\"thread\[\^\n\r]+\[\n\r\]+(\[^\n\r\]*\[\n\r\]+)+\[^\n\r\]*\\(DIE \\(0x(\[0-9a-f\]+)\\) DW_TAG_class_type\\)(\[\n\r\]+\[^\n\r\]*)+\"Executor\[^\n\r\]+\[\n\r\]+\[^\n\r\]*DW_AT_declaration\[\n\r\]+\[^\n\r\]*DW_AT_signature\[^#/\]*\[#/\] \[^\n\r\]*\\(DIE\[^\n\r\]*DW_TAG_subprogram\\)\[\n\r\]+(\[^\n\r\]*\[\n\r\]+)+\[^\n\r\]*\"CurrentExecutor\[^\n\r\]+\[\n\r\]+(\[^\n\r\]*\[\n\r\]+)+(\[^\n\r\]*\[\n\r\]+)+\[^\n\r\]*end of children of DIE 0x\\3\[\n\r]+\[^\n\r\]*end of children of DIE 0x\\1\[\n\r]+" } }
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/thunk1.C b/gcc/testsuite/g++.dg/debug/dwarf2/thunk1.C
new file mode 100644
index 00000000000..c34373e2681
--- /dev/null
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/thunk1.C
@@ -0,0 +1,11 @@
+// Test that we don't add the x86 PC thunk to .debug_ranges
+// { dg-do compile { target i?86-*-* } }
+// { dg-options "-g -fpic" }
+// { dg-final { scan-assembler-times "LFB3" 1 } }
+
+template <class T> void f(T t) { }
+
+int main()
+{
+ f(42);
+}
diff --git a/gcc/testsuite/g++.dg/lto/pr42987_0.C b/gcc/testsuite/g++.dg/lto/pr42987_0.C
new file mode 100644
index 00000000000..e422daa69e7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lto/pr42987_0.C
@@ -0,0 +1,22 @@
+// { dg-lto-do run }
+// { dg-lto-options { { -flto -g } { -flto -flto-partition=none -g } } }
+
+#include <typeinfo>
+
+struct B {
+ virtual void b() {}
+};
+
+static B* f() {
+ struct D : public B {
+ };
+
+ return new D;
+}
+
+extern B* g();
+
+int main () {
+ if (typeid (*f()) == typeid (*g()))
+ return 1;
+}
diff --git a/gcc/testsuite/g++.dg/lto/pr42987_1.C b/gcc/testsuite/g++.dg/lto/pr42987_1.C
new file mode 100644
index 00000000000..638479e7459
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lto/pr42987_1.C
@@ -0,0 +1,14 @@
+struct B {
+ virtual void b() {}
+};
+
+static B* f() {
+ struct D : public B {
+ };
+
+ return new D;
+}
+
+B* g() {
+ return f();
+}
diff --git a/gcc/testsuite/g++.dg/lto/pr52605_0.C b/gcc/testsuite/g++.dg/lto/pr52605_0.C
new file mode 100644
index 00000000000..22540abf9e4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lto/pr52605_0.C
@@ -0,0 +1,39 @@
+// { dg-lto-do link }
+// { dg-lto-options {{-flto -g}} }
+
+extern "C" void abort (void);
+
+class A
+{
+public:
+ virtual int foo (int i);
+};
+
+int A::foo (int i)
+{
+ return i + 1;
+}
+
+int __attribute__ ((noinline,noclone)) get_input(void)
+{
+ return 1;
+}
+
+int main (int argc, char *argv[])
+{
+
+ class B : public A
+ {
+ public:
+ int bar (int i)
+ {
+ return foo (i) + 2;
+ }
+ };
+ class B b;
+
+ if (b.bar (get_input ()) != 4)
+ abort ();
+ return 0;
+}
+
diff --git a/gcc/testsuite/g++.dg/opt/vrp3-aux.cc b/gcc/testsuite/g++.dg/opt/vrp3-aux.cc
new file mode 100644
index 00000000000..fb68f6b5204
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/vrp3-aux.cc
@@ -0,0 +1,21 @@
+// { dg-do compile }
+// { dg-options "" }
+
+#include "vrp3.h"
+
+R::R ()
+{
+ r1 = r2 = 1;
+}
+
+R::R (int n, int d)
+{
+ r1 = n;
+ r2 = d;
+}
+
+int
+R::compare (R const &r, R const &s)
+{
+ return (int) (r.r1 * s.r2 - s.r1 * r.r2);
+}
diff --git a/gcc/testsuite/g++.dg/opt/vrp3.C b/gcc/testsuite/g++.dg/opt/vrp3.C
new file mode 100644
index 00000000000..90162bfe1c0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/vrp3.C
@@ -0,0 +1,47 @@
+// PR tree-optimization/53239
+// { dg-do run }
+// { dg-options "-O2" }
+// { dg-additional-sources "vrp3-aux.cc" }
+
+#include "vrp3.h"
+
+struct M
+{
+ M (R m);
+ R val;
+ static int compare (M const &, M const &);
+};
+
+inline M const &
+min (M const & t1, M const & t2)
+{
+ return R::compare (t1.val, t2.val) < 0 ? t1 : t2;
+}
+
+M::M (R m)
+{
+ val = m;
+}
+
+M
+test (M *x)
+{
+ M n (R (0, 0));
+
+ for (int i = 0; i < 2; i++)
+ {
+ M p = x[i];
+ n = min (n, p);
+ }
+
+ if (n.val.r2 != 2 || n.val.r1 != 1)
+ __builtin_abort ();
+ return n;
+}
+
+int
+main ()
+{
+ M x[2] = { M (R (1, 2)), M (R (1, 1)) };
+ test (x);
+}
diff --git a/gcc/testsuite/g++.dg/opt/vrp3.h b/gcc/testsuite/g++.dg/opt/vrp3.h
new file mode 100644
index 00000000000..0e97d0c491c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/vrp3.h
@@ -0,0 +1,9 @@
+struct R
+{
+ long long r1, r2;
+ void copy (R const &r) { r1 = r.r1; r2 = r.r2; }
+ R ();
+ explicit R (int, int);
+ R (R const &r) { copy (r); }
+ static int compare (R const &, R const &);
+};
diff --git a/gcc/testsuite/g++.dg/other/final2.C b/gcc/testsuite/g++.dg/other/final2.C
new file mode 100644
index 00000000000..a07562299d6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/other/final2.C
@@ -0,0 +1,27 @@
+// PR c++/53186
+// { dg-options "-fdump-tree-original -std=c++11" }
+
+struct F1
+{
+ virtual void operator()() final;
+ virtual operator int() final;
+ virtual int operator++() final;
+};
+
+struct F2 final
+{
+ virtual void operator()();
+ virtual operator int();
+ virtual int operator++();
+};
+
+void fooF1(F1& a) { a(); int m = a; ++a; }
+void fooF2(F2& a) { a(); int m = a; ++a; }
+
+// { dg-final { scan-tree-dump-times "F1::operator\\(\\)" 1 "original" } }
+// { dg-final { scan-tree-dump-times "F1::operator int" 1 "original" } }
+// { dg-final { scan-tree-dump-times "F1::operator\\+\\+" 1 "original" } }
+// { dg-final { scan-tree-dump-times "F2::operator\\(\\)" 1 "original" } }
+// { dg-final { scan-tree-dump-times "F2::operator int" 1 "original" } }
+// { dg-final { scan-tree-dump-times "F2::operator\\+\\+" 1 "original" } }
+// { dg-final { cleanup-tree-dump "original" } }
diff --git a/gcc/testsuite/g++.dg/parse/error26.C b/gcc/testsuite/g++.dg/parse/error26.C
index befaf3bb3b8..1084e76d34a 100644
--- a/gcc/testsuite/g++.dg/parse/error26.C
+++ b/gcc/testsuite/g++.dg/parse/error26.C
@@ -4,7 +4,7 @@
void foo()
{
if (({int c[2];})) ; // { dg-error "7:ISO C.. forbids" "7" }
- // { dg-error "20:could not convert" "20" { target *-*-* } 6 }
+ // { dg-error "17:could not convert" "17" { target *-*-* } 6 }
}
void bar()
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr19807.C b/gcc/testsuite/g++.dg/tree-ssa/pr19807.C
index bba79a9d150..0eeeb18abda 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr19807.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr19807.C
@@ -25,6 +25,6 @@ void bar(int i)
Simply test for the existence of +1 and -1 once, which also ensures
the above. If the addition/subtraction would be applied to the
pointer we would instead see +-4 (or 8, depending on sizeof(int)). */
-/* { dg-final { scan-tree-dump-times "\\\+ -1;" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump "\\\+ (0x0f*|18446744073709551615|4294967295|-1);" "optimized" } } */
/* { dg-final { scan-tree-dump-times "\\\+ 1;" 1 "optimized" } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-6.C b/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-6.C
new file mode 100644
index 00000000000..4a76b5cf16f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-6.C
@@ -0,0 +1,6 @@
+// PR c++/53301
+// { dg-options "-Wzero-as-null-pointer-constant" }
+
+class x { public: x(int v) {} };
+
+void foo(const x& = 0);
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr53163.c b/gcc/testsuite/gcc.c-torture/compile/pr53163.c
new file mode 100644
index 00000000000..990b1132444
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr53163.c
@@ -0,0 +1,34 @@
+/* PR tree-optimization/53163 */
+
+struct S { int s; } b, f;
+int a, c;
+
+void
+foo (void)
+{
+ int d, e;
+ for (d = 4; d < 19; ++d)
+ for (e = 2; e >= 0; e--)
+ {
+ a = 0;
+ a = 1;
+ }
+}
+
+void
+bar (void)
+{
+ int g, h, i;
+ for (i = 1; i >= 0; i--)
+ {
+ b = f;
+ for (g = 0; g <= 1; g++)
+ {
+ if (c)
+ break;
+ for (h = 0; h <= 1; h++)
+ foo ();
+ foo ();
+ }
+ }
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr53187.c b/gcc/testsuite/gcc.c-torture/compile/pr53187.c
new file mode 100644
index 00000000000..13455f42271
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr53187.c
@@ -0,0 +1,11 @@
+/* PR target/53187 */
+
+void bar (int);
+
+void
+foo (int x, double y, double z)
+{
+ _Bool t = z >= y;
+ if (!t || x)
+ bar (t ? 1 : 16);
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr53226.c b/gcc/testsuite/gcc.c-torture/compile/pr53226.c
new file mode 100644
index 00000000000..2d0284fb006
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr53226.c
@@ -0,0 +1,13 @@
+/* PR tree-optimization/53226 */
+
+void
+foo (unsigned long *x, char y, char z)
+{
+ int i;
+ for (i = y; i < z; ++i)
+ {
+ unsigned long a = ((unsigned char) i) & 63UL;
+ unsigned long b = 1ULL << a;
+ *x |= b;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/builtin-stringop-chk-1.c b/gcc/testsuite/gcc.dg/builtin-stringop-chk-1.c
index beecab652e3..5cec6b361e4 100644
--- a/gcc/testsuite/gcc.dg/builtin-stringop-chk-1.c
+++ b/gcc/testsuite/gcc.dg/builtin-stringop-chk-1.c
@@ -2,7 +2,7 @@
are emitted properly. */
/* { dg-do compile } */
/* { dg-options "-O2 -std=gnu99 -ftrack-macro-expansion=0" } */
-/* { dg-options "-mstructure-size-boundary=8 -O2 -std=gnu99" { target arm*-*-* } } */
+/* { dg-additional-options "-mstructure-size-boundary=8" { target arm*-*-* } } */
extern void abort (void);
diff --git a/gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.c b/gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.c
new file mode 100644
index 00000000000..d9c01d07da7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.c
@@ -0,0 +1,8 @@
+/* Test that these two options can work together. */
+/* { dg-options "-gdwarf-4 -dA -feliminate-dwarf2-dups -fdebug-types-section" } */
+/* { dg-final { scan-assembler "DW.dups_types\.h\[^)\]*. DW_TAG_typedef" } } */
+/* { dg-final { scan-assembler "DW_TAG_type_unit" } } */
+
+#include "dups-types.h"
+
+A2 a;
diff --git a/gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.h b/gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.h
new file mode 100644
index 00000000000..99b7d907c0f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/debug/dwarf2/dups-types.h
@@ -0,0 +1,10 @@
+struct A
+{
+ int i;
+ int j;
+};
+
+typedef struct A A2;
+extern A2 a;
+
+A2 f(A2);
diff --git a/gcc/testsuite/gcc.dg/fixed-point/composite-type.c b/gcc/testsuite/gcc.dg/fixed-point/composite-type.c
index 5ae11981e26..026bdaf5644 100644
--- a/gcc/testsuite/gcc.dg/fixed-point/composite-type.c
+++ b/gcc/testsuite/gcc.dg/fixed-point/composite-type.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-std=gnu99 -O -Wall -Wno-unused" } */
+/* { dg-options "-std=gnu99 -O -Wall -Wno-unused -ftrack-macro-expansion=0" } */
/* C99 6.2.7: Compatible type and composite type. */
diff --git a/gcc/testsuite/gcc.dg/fixed-point/operator-bitwise.c b/gcc/testsuite/gcc.dg/fixed-point/operator-bitwise.c
index 31aecf55858..6ba817dca64 100644
--- a/gcc/testsuite/gcc.dg/fixed-point/operator-bitwise.c
+++ b/gcc/testsuite/gcc.dg/fixed-point/operator-bitwise.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-std=gnu99" } */
+/* { dg-options "-std=gnu99 -ftrack-macro-expansion=0" } */
/* C99 6.5.10: Bitwise AND operator.
C99 6.5.11: Bitwise exclusive OR operator.
diff --git a/gcc/testsuite/gcc.dg/fold-bitand-4.c b/gcc/testsuite/gcc.dg/fold-bitand-4.c
new file mode 100644
index 00000000000..dba83615dfe
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/fold-bitand-4.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fdump-tree-original" } */
+
+int foo (int i)
+{
+ return (i * 8) & 5;
+}
+
+unsigned bar (unsigned i)
+{
+ return (i * 6) & 5;
+}
+
+/* { dg-final { scan-tree-dump-times "\\\&" 1 "original" } } */
+/* { dg-final { scan-tree-dump-times "\\\& 4;" 1 "original" } } */
+/* { dg-final { cleanup-tree-dump "original" } } */
diff --git a/gcc/testsuite/gcc.dg/lto/pr53214_0.c b/gcc/testsuite/gcc.dg/lto/pr53214_0.c
new file mode 100644
index 00000000000..e76d4dac456
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/lto/pr53214_0.c
@@ -0,0 +1,8 @@
+/* { dg-lto-do run } */
+
+double a(double) __attribute__ ((optimize(1), used));
+double a(double r)
+{
+ return r;
+}
+int main () { return 0; }
diff --git a/gcc/testsuite/gcc.dg/pr53153.c b/gcc/testsuite/gcc.dg/pr53153.c
new file mode 100644
index 00000000000..8899e04e90f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr53153.c
@@ -0,0 +1,61 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+extern void bar (void);
+
+/* Case 181 is not in the range for 'char'. */
+void
+foo1 (char *buf)
+{
+ int x = *buf;
+ switch (x)
+ {
+ case -76:
+ case 65:
+ case 181:
+ bar();
+ }
+}
+
+/* All cases are below the range of char. */
+void
+foo2 (char *buf)
+{
+ int x = *buf;
+ switch (x)
+ {
+ case -150:
+ case -140:
+ case -130:
+ bar();
+ }
+}
+
+/* All cases are above the range of char. */
+void
+foo3 (char *buf)
+{
+ int x = *buf;
+ switch (x)
+ {
+ case 130:
+ case 140:
+ case 150: /* This case is not in the range for 'char'. */
+ bar();
+ }
+}
+
+/* The bounding cases are partially out of range for char. */
+void
+foo4 (char *buf)
+{
+ int x = *buf;
+ switch (x)
+ {
+ case -130 ... -120:
+ case 100:
+ case 120 ... 130:
+ bar();
+ }
+}
+
diff --git a/gcc/testsuite/gcc.dg/pr53174.c b/gcc/testsuite/gcc.dg/pr53174.c
new file mode 100644
index 00000000000..37c9390e5f7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr53174.c
@@ -0,0 +1,67 @@
+/* PR debug/53174 */
+/* { dg-do compile } */
+/* { dg-options "-Ofast -g" } */
+
+int w, h;
+
+void
+bar (float (*x)[4], int y, int z)
+{
+ int a, b, c, d, e, f, g;
+
+ a = 2;
+ b = 2;
+ c = 274;
+ d = 274;
+ if (!z)
+ a = 12;
+ if (!y)
+ b = 12;
+ if (z + 266 >= h - 2)
+ c = 8 + h - z;
+ if (y + 266 >= w - 2)
+ d = 8 + w - y;
+ for (e = a; e < c; e++)
+ for (f = b, g = e * 276 + f; f < d; f++, g++)
+ {
+ float (*h)[4] = x + (g - 277);
+ float k = (*h)[0];
+ float l = (*h)[1];
+ float m = (*h)[2];
+ h++;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ h++;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ h += 274;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ h += 2;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ h += 274;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ h++;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ h++;
+ k += (*h)[0];
+ l += (*h)[1];
+ m += (*h)[2];
+ k *= 0.125f;
+ l *= 0.125f;
+ m *= 0.125f;
+ k = k + (x[g][1] - l);
+ m = m + (x[g][1] - l);
+ x[g][0] = k;
+ x[g][2] = m;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/predict-1.c b/gcc/testsuite/gcc.dg/predict-1.c
new file mode 100644
index 00000000000..5c9a5a930a5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/predict-1.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
+
+extern int global;
+
+int bar(int);
+
+void foo (int bound)
+{
+ int i, ret = 0;
+ for (i = 0; i < bound; i++)
+ {
+ if (i > bound)
+ global += bar (i);
+ if (i >= bound + 2)
+ global += bar (i);
+ if (i > bound - 2)
+ global += bar (i);
+ if (i + 2 > bound)
+ global += bar (i);
+ if (i == 10)
+ global += bar (i);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "loop iv compare heuristics: 0.0%" 5 "profile_estimate"} } */
+/* { dg-final { cleanup-tree-dump "profile_estimate" } } */
diff --git a/gcc/testsuite/gcc.dg/predict-2.c b/gcc/testsuite/gcc.dg/predict-2.c
new file mode 100644
index 00000000000..55ca1d0bc7e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/predict-2.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
+
+extern int global;
+
+int bar(int);
+
+void foo (int base, int bound)
+{
+ int i, ret = 0;
+ for (i = base; i < bound; i++)
+ {
+ if (i > bound * bound)
+ global += bar (i);
+ if (i > bound + 10)
+ global += bar (i);
+ if (i <= bound + 10)
+ global += bar (i);
+ if (i > base + 10)
+ global += bar (i);
+ if (i < base - 10)
+ global += bar (i);
+ }
+}
+
+/* { dg-final { scan-tree-dump-not "loop iv compare heuristics" "profile_estimate"} } */
+/* { dg-final { cleanup-tree-dump "profile_estimate" } } */
diff --git a/gcc/testsuite/gcc.dg/predict-3.c b/gcc/testsuite/gcc.dg/predict-3.c
new file mode 100644
index 00000000000..8881bde30b6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/predict-3.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
+
+extern int global;
+
+int bar(int);
+
+void foo (int bound)
+{
+ int i, ret = 0;
+ for (i = 0; i <= bound; i++)
+ {
+ if (i < bound - 2)
+ global += bar (i);
+ if (i <= bound)
+ global += bar (i);
+ if (i + 1 < bound)
+ global += bar (i);
+ if (i != bound)
+ global += bar (i);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "loop iv compare heuristics: 100.0%" 4 "profile_estimate"} } */
+/* { dg-final { cleanup-tree-dump "profile_estimate" } } */
diff --git a/gcc/testsuite/gcc.dg/predict-4.c b/gcc/testsuite/gcc.dg/predict-4.c
new file mode 100644
index 00000000000..17a50b9ead5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/predict-4.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
+
+extern int global;
+
+int bar(int);
+
+void foo (int bound)
+{
+ int i, ret = 0;
+ for (i = 0; i < 10; i++)
+ {
+ if (i < 5)
+ global += bar (i);
+ }
+}
+
+/* { dg-final { scan-tree-dump "loop iv compare heuristics: 50.0%" "profile_estimate"} } */
+/* { dg-final { cleanup-tree-dump "profile_estimate" } } */
diff --git a/gcc/testsuite/gcc.dg/predict-5.c b/gcc/testsuite/gcc.dg/predict-5.c
new file mode 100644
index 00000000000..f2fe339b6cb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/predict-5.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
+
+extern int global;
+
+int bar (int);
+
+void foo (int base, int bound)
+{
+ int i, ret = 0;
+ for (i = base; i <= bound; i++)
+ {
+ if (i > base)
+ global += bar (i);
+ if (i > base + 1)
+ global += bar (i);
+ if (i >= base + 3)
+ global += bar (i);
+ if (i - 2 >= base)
+ global += bar (i);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "loop iv compare heuristics: 100.0%" 4 "profile_estimate"} } */
+/* { dg-final { cleanup-tree-dump "profile_estimate" } } */
diff --git a/gcc/testsuite/gcc.dg/predict-6.c b/gcc/testsuite/gcc.dg/predict-6.c
new file mode 100644
index 00000000000..bf769fd2cf8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/predict-6.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
+
+extern int global;
+
+int bar (int);
+
+void foo (int base, int bound)
+{
+ int i, ret = 0;
+ for (i = base; i <= bound; i++)
+ {
+ if (i < base)
+ global += bar (i);
+ if (i < base + 1)
+ global += bar (i);
+ if (i <= base + 3)
+ global += bar (i);
+ if (i - 1 < base)
+ global += bar (i);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "loop iv compare heuristics: 0.0%" 4 "profile_estimate"} } */
+/* { dg-final { cleanup-tree-dump "profile_estimate" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr53144.c b/gcc/testsuite/gcc.dg/torture/pr53144.c
new file mode 100644
index 00000000000..ad94812ad7c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr53144.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+
+typedef unsigned char __attribute__((vector_size(4))) uvec;
+
+int main (int argc, char *argv[]) {
+ int i;
+ int x = 0;
+ uvec uc0 = (uvec) {argc, 1, 2, 10};
+ unsigned char uc1[4] = {0, 3, 2, 200};
+ signed char ucg[4] = {1, 0, 0, 0 };
+ signed char ucl[4] = {0, 1, 0, 1 };
+
+#define uc0_ ((unsigned char *)&uc0)
+
+ for (i = 0; i < 4; i ++) {
+ x |= ucg[i] != (uc0_[i] > uc1[i]);
+ x |= ucl[i] != (uc0_[i] < uc1[i]);
+ }
+ return x;
+}
+
diff --git a/gcc/testsuite/gcc.dg/torture/pr53168.c b/gcc/testsuite/gcc.dg/torture/pr53168.c
new file mode 100644
index 00000000000..0b9a8dce609
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr53168.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+
+int a, b, c;
+unsigned *d;
+int e[1];
+void fn1 ();
+int fn2 ();
+int
+fn3 ()
+{
+ int *f = &a;
+ if (fn2 ())
+ {
+ for (; c; c++)
+ {
+ e[a] && (b = 0);
+ fn1 ();
+ if (e[a])
+ return 0;
+ }
+ for (; c <= 0; c++)
+ for (;;)
+ ;
+ }
+ else
+ e[0] = 0 != (d = f);
+ return *d;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr53272-1.c b/gcc/testsuite/gcc.dg/torture/pr53272-1.c
new file mode 100644
index 00000000000..a8fc91cb5b3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr53272-1.c
@@ -0,0 +1,39 @@
+/* { dg-do run } */
+/* { dg-additional-sources "pr53272-2.c" } */
+struct rtc_class_ops {
+ int (*f)(void *, unsigned int enabled);
+};
+
+struct rtc_device
+{
+ void *owner;
+ const struct rtc_class_ops *ops;
+ int ops_lock;
+};
+
+__attribute__ ((__noinline__, __noclone__))
+extern int foo(void *);
+__attribute__ ((__noinline__, __noclone__))
+extern void foobar(void *);
+
+__attribute__ ((__noinline__, __noclone__))
+int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
+{
+ int err;
+ asm volatile ("");
+
+ err = foo(&rtc->ops_lock);
+
+ if (err)
+ return err;
+
+ if (!rtc->ops)
+ err = -19;
+ else if (!rtc->ops->f)
+ err = -22;
+ else
+ err = rtc->ops->f(rtc->owner, enabled);
+
+ foobar(&rtc->ops_lock);
+ return err;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr53272-2.c b/gcc/testsuite/gcc.dg/torture/pr53272-2.c
new file mode 100644
index 00000000000..f5065a785b7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr53272-2.c
@@ -0,0 +1,39 @@
+__attribute__ ((__noinline__, __noclone__))
+int foo(void *x)
+{
+ asm ("");
+ return *(int *) x != 42;
+}
+
+__attribute__ ((__noinline__, __noclone__))
+void foobar(void *x)
+{
+ asm ("");
+ if (foo(x))
+ __builtin_abort();
+}
+
+struct rtc_class_ops {
+ int (*f)(void *, unsigned int enabled);
+};
+
+struct rtc_device
+{
+ void *owner;
+ struct rtc_class_ops *ops;
+ int ops_lock;
+};
+
+extern __attribute__ ((__noinline__, __noclone__))
+int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int);
+
+int main(void)
+{
+ struct rtc_class_ops ops = {(void *) 0};
+ struct rtc_device dev1 = {0, &ops, 42};
+
+ if (rtc_update_irq_enable (&dev1, 1) != -22)
+ __builtin_abort ();
+
+ __builtin_exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-27.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-27.c
index 1d60a301320..4149bbef6a5 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-27.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-27.c
@@ -17,13 +17,12 @@ int foo2 (int i, int j, int b)
int res = 0;
if (b)
res = i/j;
- /* But we fail so here because of the possibly not returning
- call in the same basic-block. */
+ /* And here, the possibly not returning call in the same basic-block
+ comes after the trapping i/j. */
res += i/j;
bar ();
return res;
}
-/* { dg-final { scan-tree-dump-times "# prephitmp" 1 "pre" } } */
-/* { dg-final { scan-tree-dump-times "# prephitmp" 2 "pre" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "# prephitmp" 2 "pre" } } */
/* { dg-final { cleanup-tree-dump "pre" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-30.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-30.c
new file mode 100644
index 00000000000..68a7a7f826e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-pre-30.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-pre-details" } */
+
+int f;
+int g;
+unsigned int
+foo (int b, int x)
+{
+ if (b)
+ x = *(int *)&f;
+ g = x;
+ return *(unsigned int*)&f;
+}
+float
+bar (int b, int x)
+{
+ if (b)
+ x = *(int *)&f;
+ g = x;
+ return *(float *)&f;
+}
+
+/* We should see the partial redundant loads of f even though they
+ are using different types (of the same size). */
+
+/* { dg-final { scan-tree-dump-times "Replaced MEM" 2 "pre" } } */
+/* { dg-final { cleanup-tree-dump "pre" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr53185.c b/gcc/testsuite/gcc.dg/vect/pr53185.c
new file mode 100644
index 00000000000..af1efba8d66
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr53185.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -ftree-vectorize" } */
+unsigned short a, e;
+int *b, *d;
+int c;
+extern int fn2();
+void fn1 () {
+ void *f;
+ for (;;) {
+ fn2 ();
+ b = f;
+ e = 0;
+ for (; e < a; ++e)
+ b[e] = d[e * c];
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c b/gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c
index 2061594acc0..5df349a155e 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c
@@ -58,7 +58,9 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { ! vect_widen_shift } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c b/gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c
index 47db4a16030..e358209ab10 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c
@@ -58,7 +58,9 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target {! vect_sizes_32B_16B} } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { { ! vect_sizes_32B_16B } && { ! vect_widen_shift } } } } } */
/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 8 "vect" { target vect_sizes_32B_16B } } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c b/gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c
index d296dc94486..900250f0471 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c
@@ -62,7 +62,9 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { ! vect_widen_shift } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c b/gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c
index d3d44430814..70f12fef7d3 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c
@@ -62,7 +62,9 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target {! vect_sizes_32B_16B } } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { { ! vect_sizes_32B_16B } && { ! vect_widen_shift } } } } } */
/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 8 "vect" { target vect_sizes_32B_16B } } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.target/arm/pr52633.c b/gcc/testsuite/gcc.target/arm/pr52633.c
new file mode 100644
index 00000000000..b904d59d95c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pr52633.c
@@ -0,0 +1,13 @@
+/* PR tree-optimization/52633 */
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-march=armv7-a -mfloat-abi=softfp -mfpu=neon -O -ftree-vectorize" } */
+
+void
+test (unsigned short *x, signed char *y)
+{
+ int i;
+ for (i = 0; i < 32; i++)
+ x[i] = (short) (y[i] << 5);
+}
+
diff --git a/gcc/testsuite/gcc.target/arm/pr53187.c b/gcc/testsuite/gcc.target/arm/pr53187.c
new file mode 100644
index 00000000000..648a06df5f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pr53187.c
@@ -0,0 +1,13 @@
+/* PR target/53187 */
+/* { dg-do compile } */
+/* { dg-options "-march=armv7-a -mfloat-abi=hard -O2" } */
+
+void bar (int);
+
+void
+foo (int x, double y, double z)
+{
+ _Bool t = z >= y;
+ if (!t || x)
+ bar (t ? 1 : 16);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-add-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-add-acq-1.c
new file mode 100644
index 00000000000..71230d52ce3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-add-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+add" } } */
+
+void
+hle_add (int *p, int v)
+{
+ __atomic_fetch_add (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-add-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-add-rel-1.c
new file mode 100644
index 00000000000..6b7cfc403e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-add-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+add" } } */
+
+void
+hle_add (int *p, int v)
+{
+ __atomic_fetch_add (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-and-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-and-acq-1.c
new file mode 100644
index 00000000000..078f89610f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-and-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+and" } } */
+
+void
+hle_and (int *p, int v)
+{
+ __atomic_fetch_and (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-and-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-and-rel-1.c
new file mode 100644
index 00000000000..c1025f36b81
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-and-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+and" } } */
+
+void
+hle_and (int *p, int v)
+{
+ __atomic_fetch_and (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-cmpxchg-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-cmpxchg-acq-1.c
new file mode 100644
index 00000000000..cea7c09ae43
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-cmpxchg-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64 -mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+cmpxchg" } } */
+
+int
+hle_cmpxchg (int *p, int oldv, int newv)
+{
+ return __atomic_compare_exchange_n (p, &oldv, newv, 0, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE, __ATOMIC_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-cmpxchg-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-cmpxchg-rel-1.c
new file mode 100644
index 00000000000..a2749e82ff8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-cmpxchg-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64 -mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+cmpxchg" } } */
+
+int
+hle_cmpxchg (int *p, int oldv, int newv)
+{
+ return __atomic_compare_exchange_n (p, &oldv, newv, 0, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE, __ATOMIC_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-or-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-or-acq-1.c
new file mode 100644
index 00000000000..8b28036bf1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-or-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+or" } } */
+
+void
+hle_or (int *p, int v)
+{
+ __atomic_or_fetch (p, 1, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-or-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-or-rel-1.c
new file mode 100644
index 00000000000..939697a8562
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-or-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+or" } } */
+
+void
+hle_xor (int *p, int v)
+{
+ __atomic_fetch_or (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-sub-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-sub-acq-1.c
new file mode 100644
index 00000000000..02e94b361ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-sub-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+sub" } } */
+
+void
+hle_sub (int *p, int v)
+{
+ __atomic_fetch_sub (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-sub-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-sub-rel-1.c
new file mode 100644
index 00000000000..3a8c04e5d0a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-sub-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+sub" } } */
+
+void
+hle_sub (int *p, int v)
+{
+ __atomic_fetch_sub (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-xadd-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-xadd-acq-1.c
new file mode 100644
index 00000000000..4527fa9574b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-xadd-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64 -mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+xadd" } } */
+
+int
+hle_xadd (int *p, int v)
+{
+ return __atomic_fetch_add (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-xadd-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-xadd-rel-1.c
new file mode 100644
index 00000000000..dd514143f0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-xadd-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64 -mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+xadd" } } */
+
+int
+hle_xadd (int *p, int v)
+{
+ return __atomic_fetch_add (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-xchg-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-xchg-acq-1.c
new file mode 100644
index 00000000000..441c454700d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-xchg-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+xchg" } } */
+
+int
+hle_xchg (int *p, int v)
+{
+ return __atomic_exchange_n (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-xchg-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-xchg-rel-1.c
new file mode 100644
index 00000000000..a6bad3335dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-xchg-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+xchg" } } */
+
+int
+hle_xchg (int *p, int v)
+{
+ return __atomic_exchange_n (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-xor-acq-1.c b/gcc/testsuite/gcc.target/i386/hle-xor-acq-1.c
new file mode 100644
index 00000000000..d381be92c1a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-xor-acq-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xacquire\|\.byte\[ \t\]+0xf2\)\[ \t\n\]+xor" } } */
+
+void
+hle_xor (int *p, int v)
+{
+ __atomic_fetch_xor (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/hle-xor-rel-1.c b/gcc/testsuite/gcc.target/i386/hle-xor-rel-1.c
new file mode 100644
index 00000000000..777bc0ac0ee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/hle-xor-rel-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mhle" } */
+/* { dg-final { scan-assembler "lock;?\[ \n\t\]+\(xrelease\|\.byte\[ \t\]+0xf3\)\[ \t\n\]+xor" } } */
+
+void
+hle_xor (int *p, int v)
+{
+ __atomic_fetch_xor (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr53249.c b/gcc/testsuite/gcc.target/i386/pr53249.c
new file mode 100644
index 00000000000..9eab8bc135b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr53249.c
@@ -0,0 +1,25 @@
+/* { dg-do compile { target { ! { ia32 } } } } */
+/* { dg-options "-O2 -mx32 -ftls-model=initial-exec -maddress-mode=short" } */
+
+struct gomp_task
+{
+ struct gomp_task *parent;
+};
+
+struct gomp_thread
+{
+ int foo1;
+ struct gomp_task *task;
+};
+
+extern __thread struct gomp_thread gomp_tls_data;
+
+void
+__attribute__ ((noinline))
+gomp_end_task (void)
+{
+ struct gomp_thread *thr = &gomp_tls_data;
+ struct gomp_task *task = thr->task;
+
+ thr->task = task->parent;
+}
diff --git a/gcc/testsuite/gcc.target/i386/xop-imul32widen-vector.c b/gcc/testsuite/gcc.target/i386/xop-imul32widen-vector.c
index 0406d023df5..0730987e1a6 100644
--- a/gcc/testsuite/gcc.target/i386/xop-imul32widen-vector.c
+++ b/gcc/testsuite/gcc.target/i386/xop-imul32widen-vector.c
@@ -32,5 +32,5 @@ int main ()
exit (0);
}
-/* { dg-final { scan-assembler "vpmacsdql" } } */
+/* { dg-final { scan-assembler "vpmuldq" } } */
/* { dg-final { scan-assembler "vpmacsdqh" } } */
diff --git a/gcc/testsuite/gcc.target/ia64/pr48496.c b/gcc/testsuite/gcc.target/ia64/pr48496.c
new file mode 100644
index 00000000000..6e604336adb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/ia64/pr48496.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef unsigned int UINT64 __attribute__((__mode__(__DI__)));
+
+typedef struct
+{
+ UINT64 x[2] __attribute__((aligned(16)));
+} fpreg;
+
+struct ia64_args
+{
+ fpreg fp_regs[8];
+ UINT64 gp_regs[8];
+};
+
+ffi_call(long i, long gpcount, long fpcount, void **avalue)
+{
+ struct ia64_args *stack;
+ stack = __builtin_alloca (64);
+ asm ("stf.spill %0 = %1%P0" : "=m" (*&stack->fp_regs[fpcount++])
+ : "f"(*(double *)avalue[i]));
+ stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i];
+}
diff --git a/gcc/testsuite/gcc.target/ia64/pr52657.c b/gcc/testsuite/gcc.target/ia64/pr52657.c
new file mode 100644
index 00000000000..8db5881985e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/ia64/pr52657.c
@@ -0,0 +1,44 @@
+/* { dg-do compile } */
+/* { dg-options "-O" } */
+
+typedef unsigned long int mp_limb_t;
+
+typedef struct
+{
+ int _mp_alloc;
+ int _mp_size;
+ mp_limb_t *_mp_d;
+} __mpz_struct;
+
+typedef __mpz_struct mpz_t[1];
+typedef mp_limb_t * mp_ptr;
+typedef const mp_limb_t * mp_srcptr;
+typedef long int mp_size_t;
+
+extern mp_limb_t __gmpn_addmul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
+
+void
+__gmpn_redc_2 (mp_ptr rp, mp_ptr up, mp_srcptr mp, mp_size_t n, mp_srcptr mip)
+{
+ mp_limb_t q[2];
+ mp_size_t j;
+ mp_limb_t upn;
+
+ for (j = n - 2; j >= 0; j -= 2)
+ {
+ mp_limb_t _ph, _pl;
+ __asm__ ("xma.hu %0 = %3, %5, f0\n\t"
+ "xma.l %1 = %3, %5, f0\n\t"
+ ";;\n\t"
+ "xma.l %0 = %3, %4, %0\n\t"
+ ";;\n\t"
+ "xma.l %0 = %2, %5, %0"
+ : "=&f" (q[1]), "=&f" (q[0])
+ : "f" (mip[1]), "f" (mip[0]), "f" (up[1]), "f" (up[0]));
+ upn = up[n];
+ up[1] = __gmpn_addmul_2 (up, mp, n, q);
+ up[0] = up[n];
+ up[n] = upn;
+ up += 2;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/pr53199.c b/gcc/testsuite/gcc.target/powerpc/pr53199.c
new file mode 100644
index 00000000000..89a0cad06fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr53199.c
@@ -0,0 +1,50 @@
+/* { dg-do compile { target { powerpc*-*-* } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-options "-O2 -mcpu=power6 -mavoid-indexed-addresses" } */
+/* { dg-final { scan-assembler-times "lwbrx" 6 } } */
+/* { dg-final { scan-assembler-times "stwbrx" 6 } } */
+
+/* PR 51399: bswap gets an error if -mavoid-indexed-addresses was used in
+ creating the two lwbrx instructions. */
+
+long long
+load64_reverse_1 (long long *p)
+{
+ return __builtin_bswap64 (*p);
+}
+
+long long
+load64_reverse_2 (long long *p)
+{
+ return __builtin_bswap64 (p[1]);
+}
+
+long long
+load64_reverse_3 (long long *p, int i)
+{
+ return __builtin_bswap64 (p[i]);
+}
+
+void
+store64_reverse_1 (long long *p, long long x)
+{
+ *p = __builtin_bswap64 (x);
+}
+
+void
+store64_reverse_2 (long long *p, long long x)
+{
+ p[1] = __builtin_bswap64 (x);
+}
+
+void
+store64_reverse_3 (long long *p, long long x, int i)
+{
+ p[i] = __builtin_bswap64 (x);
+}
+
+long long
+reg_reverse (long long x)
+{
+ return __builtin_bswap64 (x);
+}
diff --git a/gcc/testsuite/gcc.target/s390/20030123-1.c b/gcc/testsuite/gcc.target/s390/20030123-1.c
index 96ac6f76ced..c426866fd5a 100644
--- a/gcc/testsuite/gcc.target/s390/20030123-1.c
+++ b/gcc/testsuite/gcc.target/s390/20030123-1.c
@@ -12,7 +12,7 @@ void test (void)
char *p = alloca (4096);
long idx;
- asm ("" : "=r" (idx) : : "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "12");
+ asm volatile ("" : "=r" (idx) : : "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "12");
func (p + idx + 1);
}
diff --git a/gcc/testsuite/gfortran.dg/class_array_13.f90 b/gcc/testsuite/gfortran.dg/class_array_13.f90
new file mode 100644
index 00000000000..567bbf81546
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_array_13.f90
@@ -0,0 +1,26 @@
+! { dg-do compile }
+! { dg-options "-fcoarray=single" }
+!
+! PR fortran/41587
+!
+
+type t0
+ integer :: j = 42
+end type t0
+
+type t
+ integer :: i
+ class(t0), allocatable :: foo(3) ! { dg-error "must have a deferred shape" }
+end type t
+
+type t2
+ integer :: i
+ class(t0), pointer :: foo(3) ! { dg-error "must have a deferred shape" }
+end type t2
+
+type t3
+ integer :: i
+ class(t0), allocatable :: foo[3] ! { dg-error "Upper bound of last coarray dimension must be '\\*'" }
+end type t3
+
+end
diff --git a/gcc/testsuite/gfortran.dg/constructor_7.f90 b/gcc/testsuite/gfortran.dg/constructor_7.f90
new file mode 100644
index 00000000000..f3d6605a34c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/constructor_7.f90
@@ -0,0 +1,48 @@
+! { dg-do compile }
+! { dg-options "-std=f95" }
+!
+! PR fortran/53111
+!
+
+! ------------ INVALID ONE ------------------------
+
+module m
+type t
+ integer :: i
+end type t
+end
+
+module m2
+ interface t
+ module procedure sub
+ end interface t
+contains
+ integer function sub()
+ sub = 4
+ end function sub
+end module m2
+
+! Note: The following is formally valid as long as "t" is not used.
+! For simplicity, -std=f95 will give an error.
+! It is unlikely that a real-world program is rejected with -std=f95
+! because of that.
+
+use m ! { dg-error "Fortran 2003: Generic name 't' of function 'sub' at .1. being the same name as derived type at" }
+use m2 ! { dg-error "Fortran 2003: Generic name 't' of function 'sub' at .1. being the same name as derived type at" }
+! i = sub() ! << Truly invalid in F95, valid in F2003
+end
+
+! ------------ INVALID TWO ------------------------
+
+module m3
+type t2 ! { dg-error "Fortran 2003: Generic name 't2' of function 'sub2' at .1. being the same name as derived type at" }
+ integer :: i
+end type t2
+ interface t2
+ module procedure sub2
+ end interface t2
+contains
+ integer function sub2() ! { dg-error "Fortran 2003: Generic name 't2' of function 'sub2' at .1. being the same name as derived type at" }
+ sub2 = 4
+ end function sub2
+end module m3
diff --git a/gcc/testsuite/gfortran.dg/constructor_8.f90 b/gcc/testsuite/gfortran.dg/constructor_8.f90
new file mode 100644
index 00000000000..ff0dff7b868
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/constructor_8.f90
@@ -0,0 +1,22 @@
+! { dg-do compile }
+! { dg-options "-std=f95" }
+!
+! PR fortran/53111
+!
+! Contributed by Jacob Middag, reduced by Janus Weil.
+!
+
+module a
+ type :: my
+ real :: x
+ end type
+end module
+
+module b
+ use a
+end module
+
+program test
+ use a
+ use b
+end program
diff --git a/gcc/testsuite/gfortran.dg/mod_large_1.f90 b/gcc/testsuite/gfortran.dg/mod_large_1.f90
new file mode 100644
index 00000000000..1047ad62e98
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/mod_large_1.f90
@@ -0,0 +1,16 @@
+! { dg-do run }
+! PR fortran/24518
+! MOD/MODULO of large arguments.
+! The naive algorithm goes pear-shaped for large arguments, instead
+! use fmod.
+! Here we test only with constant arguments (evaluated with
+! mpfr_fmod), as we don't want to cause failures on targets with a
+! crappy libm.
+program mod_large_1
+ implicit none
+ real :: r1
+ r1 = mod (1e22, 1.7)
+ if (abs(r1 - 0.995928764) > 1e-5) call abort
+ r1 = modulo (1e22, -1.7)
+ if (abs(r1 + 0.704071283) > 1e-5) call abort
+end program mod_large_1
diff --git a/gcc/testsuite/gfortran.dg/mod_sign0_1.f90 b/gcc/testsuite/gfortran.dg/mod_sign0_1.f90
new file mode 100644
index 00000000000..61ef5fd046c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/mod_sign0_1.f90
@@ -0,0 +1,54 @@
+! { dg-do run }
+! PR fortran/49010
+! MOD/MODULO sign of zero.
+
+! We wish to provide the following guarantees:
+
+! MOD(A, P): The result has the sign of A and a magnitude less than
+! that of P.
+
+! MODULO(A, P): The result has the sign of P and a magnitude less than
+! that of P.
+
+! Here we test only with constant arguments (evaluated with
+! mpfr_fmod), as we don't want to cause failures on targets with a
+! crappy libm. But, a target where fmod follows C99 Annex F is
+! fine. Also, targets where GCC inline expands fmod (such as x86(-64))
+! are also fine.
+program mod_sign0_1
+ implicit none
+ real :: r, t
+
+ r = mod (4., 2.)
+ t = sign (1., r)
+ if (t < 0.) call abort
+
+ r = modulo (4., 2.)
+ t = sign (1., r)
+ if (t < 0.) call abort
+
+ r = mod (-4., 2.)
+ t = sign (1., r)
+ if (t > 0.) call abort
+
+ r = modulo (-4., 2.)
+ t = sign (1., r)
+ if (t < 0.) call abort
+
+ r = mod (4., -2.)
+ t = sign (1., r)
+ if (t < 0.) call abort
+
+ r = modulo (4., -2.)
+ t = sign (1., r)
+ if (t > 0.) call abort
+
+ r = mod (-4., -2.)
+ t = sign (1., r)
+ if (t > 0.) call abort
+
+ r = modulo (-4., -2.)
+ t = sign (1., r)
+ if (t > 0.) call abort
+
+end program mod_sign0_1
diff --git a/gcc/testsuite/gfortran.dg/pointer_intent_7.f90 b/gcc/testsuite/gfortran.dg/pointer_intent_7.f90
new file mode 100644
index 00000000000..c09eb2b5ffa
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pointer_intent_7.f90
@@ -0,0 +1,45 @@
+! { dg-do compile }
+!
+! PR fortran/
+!
+! Contributed by Neil Carlson
+!
+! Check whether passing an intent(in) pointer
+! to an intent(inout) nonpointer is allowed
+!
+module modA
+ type :: typeA
+ integer, pointer :: ptr
+ end type
+contains
+ subroutine foo (a,b,c)
+ type(typeA), intent(in) :: a
+ type(typeA), intent(in) , pointer :: b
+ class(typeA), intent(in) , pointer :: c
+
+ call bar (a%ptr)
+ call bar2 (b)
+ call bar3 (b)
+ call bar2 (c)
+ call bar3 (c)
+ call bar2p (b) ! { dg-error "INTENT\\(IN\\) in pointer association context \\(actual argument to INTENT = OUT/INOUT" }
+ call bar3p (b) ! { dg-error "INTENT\\(IN\\) in pointer association context \\(actual argument to INTENT = OUT/INOUT" }
+ call bar2p (c) ! { dg-error "INTENT\\(IN\\) in pointer association context \\(actual argument to INTENT = OUT/INOUT" }
+ call bar3p (c) ! { dg-error "INTENT\\(IN\\) in pointer association context \\(actual argument to INTENT = OUT/INOUT" }
+ end subroutine
+ subroutine bar (n)
+ integer, intent(inout) :: n
+ end subroutine
+ subroutine bar2 (n)
+ type(typeA), intent(inout) :: n
+ end subroutine
+ subroutine bar3 (n)
+ class(typeA), intent(inout) :: n
+ end subroutine
+ subroutine bar2p (n)
+ type(typeA), intent(inout), pointer :: n
+ end subroutine
+ subroutine bar3p (n)
+ class(typeA), intent(inout), pointer :: n
+ end subroutine
+end module
diff --git a/gcc/testsuite/gfortran.dg/pr52621.f90 b/gcc/testsuite/gfortran.dg/pr52621.f90
index d305e4db9b6..b45d3edc97d 100644
--- a/gcc/testsuite/gfortran.dg/pr52621.f90
+++ b/gcc/testsuite/gfortran.dg/pr52621.f90
@@ -1,5 +1,5 @@
! { dg-do compile }
-! { dg-options "-O2 -fprefetch-loop-arrays" }
+! { dg-options "-O2 -fprefetch-loop-arrays -w" }
SUBROUTINE GHDSYM(IZ,IS,LMMAX,S,LMS,Y,L2M,DRL,NLAY2,K0,DCUT)!,
!
diff --git a/gcc/testsuite/gfortran.dg/public_private_module_5.f90 b/gcc/testsuite/gfortran.dg/public_private_module_5.f90
new file mode 100644
index 00000000000..9c9d15dbd76
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/public_private_module_5.f90
@@ -0,0 +1,40 @@
+! { dg-do compile }
+! { dg-options "-O3" }
+!
+! PR fortran/53175
+!
+
+MODULE ENERGY_FUNCTION
+ IMPLICIT NONE
+
+ TYPE PARAM
+ PRIVATE
+ INTEGER :: WHICH_VECTOR
+ END TYPE PARAM
+
+ INTEGER, PRIVATE :: DIM2
+ INTEGER, PRIVATE :: DIM5
+
+ private :: specific
+ interface gen
+ module procedure specific
+ end interface gen
+
+ CONTAINS
+
+ FUNCTION ENERGY_FUNCTION_CURRENT_ARGS()
+ INTEGER, DIMENSION(DIM2) :: ENERGY_FUNCTION_CURRENT_ARGS
+ END FUNCTION ENERGY_FUNCTION_CURRENT_ARGS
+
+ FUNCTION ENERGY_FUNCTION_GET_PARAMS()
+ TYPE(PARAM), DIMENSION(DIM2) :: ENERGY_FUNCTION_GET_PARAMS
+ END FUNCTION ENERGY_FUNCTION_GET_PARAMS
+
+ function specific()
+ character(len=dim5) :: specific
+ end function specific
+END MODULE ENERGY_FUNCTION
+
+! { dg-final { scan-assembler "__energy_function_MOD_dim2" } }
+! { dg-final { scan-assembler "__energy_function_MOD_dim5" } }
+
diff --git a/gcc/testsuite/gfortran.dg/pure_formal_3.f90 b/gcc/testsuite/gfortran.dg/pure_formal_3.f90
new file mode 100644
index 00000000000..5d08057b372
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pure_formal_3.f90
@@ -0,0 +1,28 @@
+! { dg-do compile }
+!
+! Clean up, made when working on PR fortran/52864
+!
+! Test some PURE and intent checks - related to pointers.
+module m
+ type t
+ end type t
+ integer, pointer :: x
+ class(t), pointer :: y
+end module m
+
+pure subroutine foo()
+ use m
+ call bar(x) ! { dg-error "can not appear in a variable definition context" }
+ call bar2(x) ! { dg-error "is local to a PURE procedure and has the POINTER attribute" }
+ call bb(y) ! { dg-error "is local to a PURE procedure and has the POINTER attribute" }
+contains
+ pure subroutine bar(x)
+ integer, pointer, intent(inout) :: x
+ end subroutine
+ pure subroutine bar2(x)
+ integer, pointer :: x
+ end subroutine
+ pure subroutine bb(x)
+ class(t), pointer, intent(in) :: x
+ end subroutine
+end subroutine
diff --git a/gcc/testsuite/gfortran.dg/select_type_26.f03 b/gcc/testsuite/gfortran.dg/select_type_26.f03
new file mode 100644
index 00000000000..7d9c43739fe
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/select_type_26.f03
@@ -0,0 +1,110 @@
+! { dg-do run }
+! Tests fix for PR41600 and further SELECT TYPE functionality.
+!
+! Reported by Tobias Burnus <burnus@gcc.gnu.org>
+!
+ implicit none
+ type t0
+ integer :: j = 42
+ end type t0
+
+ type, extends(t0) :: t1
+ integer :: k = 99
+ end type t1
+
+ type t
+ integer :: i
+ class(t0), allocatable :: foo(:)
+ end type t
+
+ type t_scalar
+ integer :: i
+ class(t0), allocatable :: foo
+ end type t_scalar
+
+ type(t) :: m
+ type(t_scalar) :: m1(4)
+ integer :: n
+
+! Test the fix for PR41600 itself - first with m%foo of declared type.
+ allocate(m%foo(3), source = [(t0(n), n = 1,3)])
+ select type(bar => m%foo)
+ type is(t0)
+ if (any (bar%j .ne. [1,2,3])) call abort
+ type is(t1)
+ call abort
+ end select
+
+ deallocate(m%foo)
+ allocate(m%foo(3), source = [(t1(n, n*10), n = 4,6)])
+
+! Then with m%foo of another dynamic type.
+ select type(bar => m%foo)
+ type is(t0)
+ call abort
+ type is(t1)
+ if (any (bar%k .ne. [40,50,60])) call abort
+ end select
+
+! Try it with a selector array section.
+ select type(bar => m%foo(2:3))
+ type is(t0)
+ call abort
+ type is(t1)
+ if (any (bar%k .ne. [50,60])) call abort
+ end select
+
+! Try it with a selector array element.
+ select type(bar => m%foo(2))
+ type is(t0)
+ call abort
+ type is(t1)
+ if (bar%k .ne. 50) call abort
+ end select
+
+! Now try class is and a selector which is an array section of an associate name.
+ select type(bar => m%foo)
+ type is(t0)
+ call abort
+ class is (t1)
+ if (any (bar%j .ne. [4,5,6])) call abort
+ select type (foobar => bar(3:2:-1))
+ type is (t1)
+ if (any (foobar%k .ne. [60,50])) call abort
+ end select
+ end select
+
+! Now try class is and a selector which is an array element of an associate name.
+ select type(bar => m%foo)
+ type is(t0)
+ call abort
+ class is (t1)
+ if (any (bar%j .ne. [4,5,6])) call abort
+ select type (foobar => bar(2))
+ type is (t1)
+ if (foobar%k .ne. 50) call abort
+ end select
+ end select
+
+! Check class a component of an element of an array. Note that an array of such
+! objects cannot be allowed since the elements could have different dynamic types.
+! (F2003 C614)
+ do n = 1, 2
+ allocate(m1(n)%foo, source = t1(n*99, n*999))
+ end do
+ do n = 3, 4
+ allocate(m1(n)%foo, source = t0(n*99))
+ end do
+ select type(bar => m1(3)%foo)
+ type is(t0)
+ if (bar%j .ne. 297) call abort
+ type is(t1)
+ call abort
+ end select
+ select type(bar => m1(1)%foo)
+ type is(t0)
+ call abort
+ type is(t1)
+ if (bar%k .ne. 999) call abort
+ end select
+end
diff --git a/gcc/testsuite/gfortran.dg/select_type_27.f03 b/gcc/testsuite/gfortran.dg/select_type_27.f03
new file mode 100644
index 00000000000..5bd3c1a357b
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/select_type_27.f03
@@ -0,0 +1,115 @@
+! { dg-do run }
+! Tests fix for PR41600 and further SELECT TYPE functionality.
+! This differs from the original and select_type_26.f03 by 'm'
+! being a class object rather than a derived type.
+!
+! Reported by Tobias Burnus <burnus@gcc.gnu.org>
+!
+ implicit none
+ type t0
+ integer :: j = 42
+ end type t0
+
+ type, extends(t0) :: t1
+ integer :: k = 99
+ end type t1
+
+ type t
+ integer :: i
+ class(t0), allocatable :: foo(:)
+ end type t
+
+ type t_scalar
+ integer :: i
+ class(t0), allocatable :: foo
+ end type t_scalar
+
+ class(t), allocatable :: m
+ class(t_scalar), allocatable :: m1(:)
+ integer :: n
+
+ allocate (m)
+ allocate (m1(4))
+
+! Test the fix for PR41600 itself - first with m%foo of declared type.
+ allocate(m%foo(3), source = [(t0(n), n = 1,3)])
+ select type(bar => m%foo)
+ type is(t0)
+ if (any (bar%j .ne. [1,2,3])) call abort
+ type is(t1)
+ call abort
+ end select
+
+ deallocate(m%foo)
+ allocate(m%foo(3), source = [(t1(n, n*10), n = 4,6)])
+
+! Then with m%foo of another dynamic type.
+ select type(bar => m%foo)
+ type is(t0)
+ call abort
+ type is(t1)
+ if (any (bar%k .ne. [40,50,60])) call abort
+ end select
+
+! Try it with a selector array section.
+ select type(bar => m%foo(2:3))
+ type is(t0)
+ call abort
+ type is(t1)
+ if (any (bar%k .ne. [50,60])) call abort
+ end select
+
+! Try it with a selector array element.
+ select type(bar => m%foo(2))
+ type is(t0)
+ call abort
+ type is(t1)
+ if (bar%k .ne. 50) call abort
+ end select
+
+! Now try class is and a selector which is an array section of an associate name.
+ select type(bar => m%foo)
+ type is(t0)
+ call abort
+ class is (t1)
+ if (any (bar%j .ne. [4,5,6])) call abort
+ select type (foobar => bar(3:2:-1))
+ type is (t1)
+ if (any (foobar%k .ne. [60,50])) call abort
+ end select
+ end select
+
+! Now try class is and a selector which is an array element of an associate name.
+ select type(bar => m%foo)
+ type is(t0)
+ call abort
+ class is (t1)
+ if (any (bar%j .ne. [4,5,6])) call abort
+ select type (foobar => bar(2))
+ type is (t1)
+ if (foobar%k .ne. 50) call abort
+ end select
+ end select
+
+! Check class a component of an element of an array. Note that an array of such
+! objects cannot be allowed since the elements could have different dynamic types.
+! (F2003 C614)
+ do n = 1, 2
+ allocate(m1(n)%foo, source = t1(n*99, n*999))
+ end do
+ do n = 3, 4
+ allocate(m1(n)%foo, source = t0(n*99))
+ end do
+ select type(bar => m1(3)%foo)
+ type is(t0)
+ if (bar%j .ne. 297) call abort
+ type is(t1)
+ call abort
+ end select
+ select type(bar => m1(1)%foo)
+ type is(t0)
+ call abort
+ type is(t1)
+ if (bar%k .ne. 999) call abort
+ end select
+end
diff --git a/gcc/testsuite/gfortran.dg/select_type_28.f03 b/gcc/testsuite/gfortran.dg/select_type_28.f03
new file mode 100644
index 00000000000..9cab7214491
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/select_type_28.f03
@@ -0,0 +1,36 @@
+! { dg-do compile }
+!
+! Fix for PR53191
+!
+ implicit none
+ type t0
+ integer :: j = 42
+ end type t0
+ type, extends(t0) :: t1
+ integer :: k = 99
+ end type t1
+ type t
+ integer :: i
+ class(t0), allocatable :: foo
+ end type t
+ type(t) :: m(4)
+ integer :: n
+
+ do n = 1, 2
+ allocate(m(n)%foo, source = t0(n*99))
+ end do
+ do n = 3, 4
+ allocate(m(n)%foo, source = t1(n*99, n*999))
+ end do
+
+! An array of objects with ultimate class components cannot be a selector
+! since each element could have a different dynamic type. (F2003 C614)
+
+ select type(bar => m%foo) ! { dg-error "part reference with nonzero rank" }
+ type is(t0)
+ if (any (bar%j .ne. [99, 198, 297, 396])) call abort
+ type is(t1)
+ call abort
+ end select
+
+end
diff --git a/gcc/testsuite/gfortran.dg/typebound_operator_15.f90 b/gcc/testsuite/gfortran.dg/typebound_operator_15.f90
new file mode 100644
index 00000000000..ca4d45c7017
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/typebound_operator_15.f90
@@ -0,0 +1,78 @@
+! { dg-do run }
+!
+! PR fortran/53255
+!
+! Contributed by Reinhold Bader.
+!
+! Before TYPE(ext)'s .tr. wrongly called the base type's trace
+! instead of ext's trace_ext.
+!
+module mod_base
+ implicit none
+ private
+ integer, public :: base_cnt = 0
+ type, public :: base
+ private
+ real :: r(2,2) = reshape( (/ 1.0, 2.0, 3.0, 4.0 /), (/ 2, 2 /))
+ contains
+ procedure, private :: trace
+ generic :: operator(.tr.) => trace
+ end type base
+contains
+ complex function trace(this)
+ class(base), intent(in) :: this
+ base_cnt = base_cnt + 1
+! write(*,*) 'executing base'
+ trace = this%r(1,1) + this%r(2,2)
+ end function trace
+end module mod_base
+
+module mod_ext
+ use mod_base
+ implicit none
+ private
+ integer, public :: ext_cnt = 0
+ public :: base, base_cnt
+ type, public, extends(base) :: ext
+ private
+ real :: i(2,2) = reshape( (/ 1.0, 1.0, 1.0, 1.5 /), (/ 2, 2 /))
+ contains
+ procedure, private :: trace => trace_ext
+ end type ext
+contains
+ complex function trace_ext(this)
+ class(ext), intent(in) :: this
+
+! the following should be executed through invoking .tr. p below
+! write(*,*) 'executing override'
+ ext_cnt = ext_cnt + 1
+ trace_ext = .tr. this%base + (0.0, 1.0) * ( this%i(1,1) + this%i(2,2) )
+ end function trace_ext
+
+end module mod_ext
+program test_override
+ use mod_ext
+ implicit none
+ type(base) :: o
+ type(ext) :: p
+ real :: r
+
+ ! Note: ext's ".tr." (trace_ext) calls also base's "trace"
+
+! write(*,*) .tr. o
+! write(*,*) .tr. p
+ if (base_cnt /= 0 .or. ext_cnt /= 0) call abort ()
+ r = .tr. o
+ if (base_cnt /= 1 .or. ext_cnt /= 0) call abort ()
+ r = .tr. p
+ if (base_cnt /= 2 .or. ext_cnt /= 1) call abort ()
+
+ if (abs(.tr. o - 5.0 ) < 1.0e-6 .and. abs( .tr. p - (5.0,2.5)) < 1.0e-6) &
+ then
+ if (base_cnt /= 4 .or. ext_cnt /= 2) call abort ()
+! write(*,*) 'OK'
+ else
+ call abort()
+! write(*,*) 'FAIL'
+ end if
+end program test_override
diff --git a/gcc/testsuite/gfortran.dg/vect/rnflow-trs2a2.f90 b/gcc/testsuite/gfortran.dg/vect/fast-math-rnflow-trs2a2.f90
index 1d13cea80e0..1d13cea80e0 100644
--- a/gcc/testsuite/gfortran.dg/vect/rnflow-trs2a2.f90
+++ b/gcc/testsuite/gfortran.dg/vect/fast-math-rnflow-trs2a2.f90
diff --git a/gcc/testsuite/gnat.dg/discr36.adb b/gcc/testsuite/gnat.dg/discr36.adb
new file mode 100644
index 00000000000..64d95558e88
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/discr36.adb
@@ -0,0 +1,19 @@
+-- { dg-do compile }
+
+with Discr36_Pkg;
+
+package body Discr36 is
+
+ function N return Natural is begin return 0; end;
+
+ type Arr is array (1 .. N) of R;
+
+ function My_Func is new Discr36_Pkg.Func (Arr);
+
+ procedure Proc is
+ A : constant Arr := My_Func;
+ begin
+ null;
+ end;
+
+end Discr36;
diff --git a/gcc/testsuite/gnat.dg/discr36.ads b/gcc/testsuite/gnat.dg/discr36.ads
new file mode 100644
index 00000000000..9e39eb1c7c9
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/discr36.ads
@@ -0,0 +1,12 @@
+package Discr36 is
+
+ type R (D : Boolean := True) is record
+ case D is
+ when True => I : Integer;
+ when False => null;
+ end case;
+ end record;
+
+ function N return Natural;
+
+end Discr36;
diff --git a/gcc/testsuite/gnat.dg/discr36_pkg.adb b/gcc/testsuite/gnat.dg/discr36_pkg.adb
new file mode 100644
index 00000000000..5398a22e39d
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/discr36_pkg.adb
@@ -0,0 +1,10 @@
+package body Discr36_Pkg is
+
+ function Func return T is
+ Ret : T;
+ pragma Warnings (Off, Ret);
+ begin
+ return Ret;
+ end;
+
+end Discr36_Pkg;
diff --git a/gcc/testsuite/gnat.dg/discr36_pkg.ads b/gcc/testsuite/gnat.dg/discr36_pkg.ads
new file mode 100644
index 00000000000..49792d46183
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/discr36_pkg.ads
@@ -0,0 +1,7 @@
+package Discr36_Pkg is
+
+ generic
+ type T is private;
+ function Func return T;
+
+end Discr36_Pkg;
diff --git a/gcc/testsuite/gnat.dg/lto11.adb b/gcc/testsuite/gnat.dg/lto11.adb
new file mode 100644
index 00000000000..ad0b8db30a3
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/lto11.adb
@@ -0,0 +1,20 @@
+-- { dg-do compile }
+-- { dg-options "-flto" { target lto } }
+
+with Ada.Streams; use Ada.Streams;
+
+package body Lto11 is
+
+ procedure Write
+ (S : not null access Root_Stream_Type'Class;
+ V : Vector)
+ is
+ subtype M_SEA is Stream_Element_Array (1 .. V'Size / Stream_Element'Size);
+ Bytes : M_SEA;
+ for Bytes'Address use V'Address;
+ pragma Import (Ada, Bytes);
+ begin
+ Ada.Streams.Write (S.all, Bytes);
+ end;
+
+end Lto11;
diff --git a/gcc/testsuite/gnat.dg/lto11.ads b/gcc/testsuite/gnat.dg/lto11.ads
new file mode 100644
index 00000000000..386d5ac4620
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/lto11.ads
@@ -0,0 +1,9 @@
+with Ada.Streams; use Ada.Streams;
+
+package Lto11 is
+
+ type Vector is array (Positive range <>) of Float;
+
+ procedure Write (S : not null access Root_Stream_Type'Class; V : Vector);
+
+end Lto11;
diff --git a/gcc/testsuite/gnat.dg/specs/renamings.ads b/gcc/testsuite/gnat.dg/specs/renaming1.ads
index 74579529980..b97605aa7d2 100644
--- a/gcc/testsuite/gnat.dg/specs/renamings.ads
+++ b/gcc/testsuite/gnat.dg/specs/renaming1.ads
@@ -1,4 +1,6 @@
-package Renamings is
+-- { dg-do compile }
+
+package Renaming1 is
package Inner is
procedure PI (X : Integer);
@@ -11,4 +13,4 @@ package Renamings is
procedure Q (X : Float);
procedure Q (X : Integer) renames Inner.PI;
pragma Convention (C, Q); -- { dg-error "non-local entity" }
-end Renamings;
+end Renaming1;
diff --git a/gcc/testsuite/gnat.dg/specs/renaming2.ads b/gcc/testsuite/gnat.dg/specs/renaming2.ads
new file mode 100644
index 00000000000..5f199c61345
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/specs/renaming2.ads
@@ -0,0 +1,11 @@
+-- { dg-do compile }
+
+with Renaming2_Pkg1;
+
+package Renaming2 is
+
+ type T is null record;
+
+ package Iter is new Renaming2_Pkg1.GP.Inner (T);
+
+end Renaming2;
diff --git a/gcc/testsuite/gnat.dg/specs/renaming2_pkg1.ads b/gcc/testsuite/gnat.dg/specs/renaming2_pkg1.ads
new file mode 100644
index 00000000000..45d5436ffc8
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/specs/renaming2_pkg1.ads
@@ -0,0 +1,17 @@
+-- { dg-excess-errors "no code generated" }
+
+with Renaming2_Pkg2;
+with Renaming2_Pkg3;
+with Renaming2_Pkg4;
+
+package Renaming2_Pkg1 is
+
+ package Impl is new
+ Renaming2_Pkg3 (Base_Index_T => Positive, Value_T => Renaming2_Pkg2.Root);
+
+ use Impl;
+
+ package GP is new
+ Renaming2_Pkg4 (Length_T => Impl.Length_T, Value_T => Renaming2_Pkg2.Root);
+
+end Renaming2_Pkg1;
diff --git a/gcc/testsuite/gnat.dg/specs/renaming2_pkg2.ads b/gcc/testsuite/gnat.dg/specs/renaming2_pkg2.ads
new file mode 100644
index 00000000000..38e01898d51
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/specs/renaming2_pkg2.ads
@@ -0,0 +1,14 @@
+package Renaming2_Pkg2 is
+
+ type Root is private;
+
+private
+
+ type Root (D : Boolean := False) is record
+ case D is
+ when True => N : Natural;
+ when False => null;
+ end case;
+ end record;
+
+end Renaming2_Pkg2;
diff --git a/gcc/testsuite/gnat.dg/specs/renaming2_pkg3.ads b/gcc/testsuite/gnat.dg/specs/renaming2_pkg3.ads
new file mode 100644
index 00000000000..93ec0dfcf37
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/specs/renaming2_pkg3.ads
@@ -0,0 +1,25 @@
+-- { dg-excess-errors "no code generated" }
+
+generic
+
+ type Base_Index_T is range <>;
+
+ type Value_T is private;
+
+package Renaming2_Pkg3 is
+
+ type T is private;
+
+ subtype Length_T is Base_Index_T range 0 .. Base_Index_T'Last;
+
+ function Value (L : Length_T) return Value_T;
+
+ function Next return Length_T;
+
+private
+
+ type Obj_T is null record;
+
+ type T is access Obj_T;
+
+end Renaming2_Pkg3;
diff --git a/gcc/testsuite/gnat.dg/specs/renaming2_pkg4.adb b/gcc/testsuite/gnat.dg/specs/renaming2_pkg4.adb
new file mode 100644
index 00000000000..50dd5364772
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/specs/renaming2_pkg4.adb
@@ -0,0 +1,12 @@
+package body Renaming2_Pkg4 is
+
+ package body Inner is
+
+ function Next_Value return Value_T is
+ Next_Value : Value_T renames Value (Next);
+ begin
+ return Next_Value;
+ end Next_Value;
+
+ end Inner;
+end Renaming2_Pkg4;
diff --git a/gcc/testsuite/gnat.dg/specs/renaming2_pkg4.ads b/gcc/testsuite/gnat.dg/specs/renaming2_pkg4.ads
new file mode 100644
index 00000000000..abeffcc7da8
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/specs/renaming2_pkg4.ads
@@ -0,0 +1,25 @@
+-- { dg-excess-errors "no code generated" }
+
+generic
+
+ type Length_T is range <>;
+
+ with function Next return Length_T is <>;
+
+ type Value_T is private;
+
+ with function Value (L : Length_T) return Value_T is <>;
+
+package Renaming2_Pkg4 is
+
+ generic
+ type T is private;
+ package Inner is
+
+ type Slave_T is tagged null record;
+
+ function Next_Value return Value_T;
+
+ end Inner;
+
+end Renaming2_Pkg4;
diff --git a/gcc/testsuite/gnat.dg/warn7.adb b/gcc/testsuite/gnat.dg/warn7.adb
new file mode 100644
index 00000000000..93c14f4f347
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/warn7.adb
@@ -0,0 +1,18 @@
+-- { dg-do compile }
+
+procedure Warn7 is
+
+ procedure Nested;
+ pragma No_Return (Nested);
+
+ procedure Nested is
+ begin
+ raise Constraint_Error;
+ exception
+ when Constraint_Error =>
+ raise;
+ end;
+
+begin
+ Nested;
+end;
diff --git a/gcc/tlink.c b/gcc/tlink.c
index 67c7086ceb1..c4c6afc0148 100644
--- a/gcc/tlink.c
+++ b/gcc/tlink.c
@@ -859,4 +859,10 @@ do_tlink (char **ld_argv, char **object_lst ATTRIBUTE_UNUSED)
error ("ld returned %d exit status", exit);
collect_exit (exit);
}
+ else
+ {
+ /* We have just successfully produced an output file, so assume that we
+ may unlink it if need be for now on. */
+ may_unlink_output_file = true;
+ }
}
diff --git a/gcc/toplev.c b/gcc/toplev.c
index af163035a9c..a53de531d3e 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -151,10 +151,6 @@ HOST_WIDE_INT random_seed;
/* -f flags. */
-/* Nonzero means make permerror produce warnings instead of errors. */
-
-int flag_permissive = 0;
-
/* When non-NULL, indicates that whenever space is allocated on the
stack, the resulting stack pointer must not pass this
address---that is, for stacks that grow downward, the stack pointer
@@ -380,7 +376,8 @@ wrapup_global_declaration_1 (tree decl)
bool
wrapup_global_declaration_2 (tree decl)
{
- if (TREE_ASM_WRITTEN (decl) || DECL_EXTERNAL (decl))
+ if (TREE_ASM_WRITTEN (decl) || DECL_EXTERNAL (decl)
+ || (TREE_CODE (decl) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (decl)))
return false;
/* Don't write out static consts, unless we still need them.
@@ -592,7 +589,6 @@ compile_file (void)
basically finished. */
if (in_lto_p || !flag_lto || flag_fat_lto_objects)
{
- varpool_output_variables ();
finish_aliases_2 ();
/* Likewise for mudflap static object registrations. */
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index 7d0e3172c3e..0a021b421e3 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -1570,8 +1570,8 @@ lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
us some idea of what we're dealing with. */
memset (&this_wi, 0, sizeof (this_wi));
this_wi.info = (void *) &this_state;
- walk_gimple_seq (gimple_transaction_body (stmt),
- lower_sequence_tm, NULL, &this_wi);
+ walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
+ lower_sequence_tm, NULL, &this_wi);
/* If there was absolutely nothing transaction related inside the
transaction, we may elide it. Likewise if this is a nested
@@ -1600,7 +1600,7 @@ lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
gimple_seq n_seq, e_seq;
n_seq = gimple_seq_alloc_with_stmt (g);
- e_seq = gimple_seq_alloc ();
+ e_seq = NULL;
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1, integer_zero_node);
@@ -1704,13 +1704,15 @@ static unsigned int
execute_lower_tm (void)
{
struct walk_stmt_info wi;
+ gimple_seq body;
/* Transactional clones aren't created until a later pass. */
gcc_assert (!decl_is_tm_clone (current_function_decl));
+ body = gimple_body (current_function_decl);
memset (&wi, 0, sizeof (wi));
- walk_gimple_seq (gimple_body (current_function_decl),
- lower_sequence_no_tm, NULL, &wi);
+ walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
+ gimple_set_body (current_function_decl, body);
return 0;
}
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index e32daa75e0f..f8e1fb5d325 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -377,7 +377,7 @@ make_blocks (gimple_seq seq)
if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt))
{
if (!first_stmt_of_seq)
- seq = gsi_split_seq_before (&i);
+ gsi_split_seq_before (&i, &seq);
bb = create_basic_block (seq, NULL, bb);
start_new_block = false;
}
@@ -438,8 +438,7 @@ create_bb (void *h, void *e, basic_block after)
bb->index = last_basic_block;
bb->flags = BB_NEW;
- bb->il.gimple = ggc_alloc_cleared_gimple_bb_info ();
- set_bb_seq (bb, h ? (gimple_seq) h : gimple_seq_alloc ());
+ set_bb_seq (bb, h ? (gimple_seq) h : NULL);
/* Add the new block to the linked list of blocks. */
link_block (bb, after);
@@ -1655,7 +1654,6 @@ static void
gimple_merge_blocks (basic_block a, basic_block b)
{
gimple_stmt_iterator last, gsi, psi;
- gimple_seq phis = phi_nodes (b);
if (dump_file)
fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index);
@@ -1663,7 +1661,7 @@ gimple_merge_blocks (basic_block a, basic_block b)
/* Remove all single-valued PHI nodes from block B of the form
V_i = PHI <V_j> by propagating V_j to all the uses of V_i. */
gsi = gsi_last_bb (a);
- for (psi = gsi_start (phis); !gsi_end_p (psi); )
+ for (psi = gsi_start_phis (b); !gsi_end_p (psi); )
{
gimple phi = gsi_stmt (psi);
tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
@@ -1919,7 +1917,8 @@ remove_bb (basic_block bb)
}
remove_phi_nodes_and_edges_for_unreachable_block (bb);
- bb->il.gimple = NULL;
+ bb->il.gimple.seq = NULL;
+ bb->il.gimple.phi_nodes = NULL;
}
@@ -4123,6 +4122,10 @@ verify_gimple_goto (gimple stmt)
static bool
verify_gimple_switch (gimple stmt)
{
+ unsigned int i, n;
+ tree elt, prev_upper_bound = NULL_TREE;
+ tree index_type, elt_type = NULL_TREE;
+
if (!is_gimple_val (gimple_switch_index (stmt)))
{
error ("invalid operand to switch statement");
@@ -4130,6 +4133,75 @@ verify_gimple_switch (gimple stmt)
return true;
}
+ index_type = TREE_TYPE (gimple_switch_index (stmt));
+ if (! INTEGRAL_TYPE_P (index_type))
+ {
+ error ("non-integral type switch statement");
+ debug_generic_expr (index_type);
+ return true;
+ }
+
+ elt = gimple_switch_default_label (stmt);
+ if (CASE_LOW (elt) != NULL_TREE || CASE_HIGH (elt) != NULL_TREE)
+ {
+ error ("invalid default case label in switch statement");
+ debug_generic_expr (elt);
+ return true;
+ }
+
+ n = gimple_switch_num_labels (stmt);
+ for (i = 1; i < n; i++)
+ {
+ elt = gimple_switch_label (stmt, i);
+
+ if (! CASE_LOW (elt))
+ {
+ error ("invalid case label in switch statement");
+ debug_generic_expr (elt);
+ return true;
+ }
+ if (CASE_HIGH (elt)
+ && ! tree_int_cst_lt (CASE_LOW (elt), CASE_HIGH (elt)))
+ {
+ error ("invalid case range in switch statement");
+ debug_generic_expr (elt);
+ return true;
+ }
+
+ if (elt_type)
+ {
+ if (TREE_TYPE (CASE_LOW (elt)) != elt_type
+ || (CASE_HIGH (elt) && TREE_TYPE (CASE_HIGH (elt)) != elt_type))
+ {
+ error ("type mismatch for case label in switch statement");
+ debug_generic_expr (elt);
+ return true;
+ }
+ }
+ else
+ {
+ elt_type = TREE_TYPE (CASE_LOW (elt));
+ if (TYPE_PRECISION (index_type) < TYPE_PRECISION (elt_type))
+ {
+ error ("type precision mismatch in switch statement");
+ return true;
+ }
+ }
+
+ if (prev_upper_bound)
+ {
+ if (! tree_int_cst_lt (prev_upper_bound, CASE_LOW (elt)))
+ {
+ error ("case labels not sorted in switch statement");
+ return true;
+ }
+ }
+
+ prev_upper_bound = CASE_HIGH (elt);
+ if (! prev_upper_bound)
+ prev_upper_bound = CASE_LOW (elt);
+ }
+
return false;
}
@@ -4615,13 +4687,13 @@ gimple_verify_flow_info (void)
edge e;
edge_iterator ei;
- if (ENTRY_BLOCK_PTR->il.gimple)
+ if (ENTRY_BLOCK_PTR->il.gimple.seq || ENTRY_BLOCK_PTR->il.gimple.phi_nodes)
{
error ("ENTRY_BLOCK has IL associated with it");
err = 1;
}
- if (EXIT_BLOCK_PTR->il.gimple)
+ if (EXIT_BLOCK_PTR->il.gimple.seq || EXIT_BLOCK_PTR->il.gimple.phi_nodes)
{
error ("EXIT_BLOCK has IL associated with it");
err = 1;
@@ -5249,7 +5321,7 @@ gimple_split_block (basic_block bb, void *stmt)
brings ugly quadratic memory consumption in the inliner.
(We are still quadratic since we need to update stmt BB pointers,
sadly.) */
- list = gsi_split_seq_before (&gsi);
+ gsi_split_seq_before (&gsi, &list);
set_bb_seq (new_bb, list);
for (gsi_tgt = gsi_start (list);
!gsi_end_p (gsi_tgt); gsi_next (&gsi_tgt))
@@ -6085,8 +6157,8 @@ move_stmt_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
p->remap_decls_p = false;
*handled_ops_p = true;
- walk_gimple_seq (gimple_omp_body (stmt), move_stmt_r,
- move_stmt_op, wi);
+ walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), move_stmt_r,
+ move_stmt_op, wi);
p->remap_decls_p = save_remap_decls_p;
}
diff --git a/gcc/tree-complex.c b/gcc/tree-complex.c
index 0547fcf147f..928a3f39274 100644
--- a/gcc/tree-complex.c
+++ b/gcc/tree-complex.c
@@ -661,17 +661,16 @@ update_complex_components_on_edge (edge e, tree lhs, tree r, tree i)
static void
update_complex_assignment (gimple_stmt_iterator *gsi, tree r, tree i)
{
- gimple_stmt_iterator orig_si = *gsi;
gimple stmt;
- if (gimple_in_ssa_p (cfun))
- update_complex_components (gsi, gsi_stmt (*gsi), r, i);
-
- gimple_assign_set_rhs_with_ops (&orig_si, COMPLEX_EXPR, r, i);
- stmt = gsi_stmt (orig_si);
+ gimple_assign_set_rhs_with_ops (gsi, COMPLEX_EXPR, r, i);
+ stmt = gsi_stmt (*gsi);
update_stmt (stmt);
if (maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (gimple_bb (stmt));
+
+ if (gimple_in_ssa_p (cfun))
+ update_complex_components (gsi, gsi_stmt (*gsi), r, i);
}
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index 0ecec816b6a..3494fc9e962 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -814,21 +814,24 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
{
tree index = TREE_OPERAND (exp, 1);
tree low_bound, unit_size;
+ double_int doffset;
/* If the resulting bit-offset is constant, track it. */
if (TREE_CODE (index) == INTEGER_CST
- && host_integerp (index, 0)
&& (low_bound = array_ref_low_bound (exp),
- host_integerp (low_bound, 0))
+ TREE_CODE (low_bound) == INTEGER_CST)
&& (unit_size = array_ref_element_size (exp),
- host_integerp (unit_size, 1)))
+ host_integerp (unit_size, 1))
+ && (doffset = double_int_sext
+ (double_int_sub (TREE_INT_CST (index),
+ TREE_INT_CST (low_bound)),
+ TYPE_PRECISION (TREE_TYPE (index))),
+ double_int_fits_in_shwi_p (doffset)))
{
- HOST_WIDE_INT hindex = TREE_INT_CST_LOW (index);
-
- hindex -= TREE_INT_CST_LOW (low_bound);
- hindex *= TREE_INT_CST_LOW (unit_size);
- hindex *= BITS_PER_UNIT;
- bit_offset += hindex;
+ HOST_WIDE_INT hoffset = double_int_to_shwi (doffset);
+ hoffset *= TREE_INT_CST_LOW (unit_size);
+ hoffset *= BITS_PER_UNIT;
+ bit_offset += hoffset;
/* An array ref with a constant index up in the structure
hierarchy will constrain the size of any variable array ref
diff --git a/gcc/tree-diagnostic.c b/gcc/tree-diagnostic.c
index 0a55925fb31..cbdbb778259 100644
--- a/gcc/tree-diagnostic.c
+++ b/gcc/tree-diagnostic.c
@@ -187,30 +187,30 @@ maybe_unwind_expanded_macro_loc (diagnostic_context *context,
LRK_MACRO_DEFINITION_LOCATION, NULL);
saved_kind = diagnostic->kind;
- saved_prefix = context->printer->prefix;
+ saved_prefix = pp_get_prefix (context->printer);
saved_location = diagnostic->location;
diagnostic->kind = DK_NOTE;
diagnostic->location = resolved_def_loc;
- pp_base_set_prefix (context->printer,
- diagnostic_build_prefix (context,
- diagnostic));
+ pp_set_prefix (context->printer,
+ diagnostic_build_prefix (context, diagnostic));
pp_newline (context->printer);
pp_printf (context->printer, "in expansion of macro '%s'",
linemap_map_get_macro_name (iter->map));
pp_destroy_prefix (context->printer);
+ diagnostic_show_locus (context, diagnostic);
diagnostic->location = resolved_exp_loc;
- pp_base_set_prefix (context->printer,
- diagnostic_build_prefix (context,
- diagnostic));
+ pp_set_prefix (context->printer,
+ diagnostic_build_prefix (context, diagnostic));
pp_newline (context->printer);
- pp_printf (context->printer, "expanded from here");
+ pp_string (context->printer, "expanded from here");
pp_destroy_prefix (context->printer);
+ diagnostic_show_locus (context, diagnostic);
diagnostic->kind = saved_kind;
diagnostic->location = saved_location;
- context->printer->prefix = saved_prefix;
+ pp_set_prefix (context->printer, saved_prefix);
}
VEC_free (loc_map_pair, heap, loc_vec);
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 0241a5f1bc4..ef2b5848569 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -406,7 +406,7 @@ static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
#define LARGE_GOTO_QUEUE 20
-static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq);
+static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
static gimple_seq
find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
@@ -481,7 +481,7 @@ replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
/* The real work of replace_goto_queue. Returns with TSI updated to
point to the next statement. */
-static void replace_goto_queue_stmt_list (gimple_seq, struct leh_tf_state *);
+static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
static void
replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
@@ -511,18 +511,18 @@ replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
break;
case GIMPLE_TRY:
- replace_goto_queue_stmt_list (gimple_try_eval (stmt), tf);
- replace_goto_queue_stmt_list (gimple_try_cleanup (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
break;
case GIMPLE_CATCH:
- replace_goto_queue_stmt_list (gimple_catch_handler (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt), tf);
break;
case GIMPLE_EH_FILTER:
- replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
break;
case GIMPLE_EH_ELSE:
- replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf);
- replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt), tf);
break;
default:
@@ -536,9 +536,9 @@ replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
/* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
static void
-replace_goto_queue_stmt_list (gimple_seq seq, struct leh_tf_state *tf)
+replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
{
- gimple_stmt_iterator gsi = gsi_start (seq);
+ gimple_stmt_iterator gsi = gsi_start (*seq);
while (!gsi_end_p (gsi))
replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
@@ -551,8 +551,8 @@ replace_goto_queue (struct leh_tf_state *tf)
{
if (tf->goto_queue_active == 0)
return;
- replace_goto_queue_stmt_list (tf->top_p_seq, tf);
- replace_goto_queue_stmt_list (eh_seq, tf);
+ replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
+ replace_goto_queue_stmt_list (&eh_seq, tf);
}
/* Add a new record to the goto queue contained in TF. NEW_STMT is the
@@ -731,9 +731,6 @@ do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
q->cont_stmt = q->stmt.g;
- if (!q->repl_stmt)
- q->repl_stmt = gimple_seq_alloc ();
-
if (mod)
gimple_seq_add_seq (&q->repl_stmt, mod);
@@ -750,8 +747,6 @@ do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
gimple x;
gcc_assert (q->is_label);
- if (!q->repl_stmt)
- q->repl_stmt = gimple_seq_alloc ();
q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index));
@@ -1050,13 +1045,13 @@ lower_try_finally_nofallthru (struct leh_state *state,
if (eh_else)
{
finally = gimple_eh_else_n_body (eh_else);
- lower_eh_constructs_1 (state, finally);
+ lower_eh_constructs_1 (state, &finally);
gimple_seq_add_seq (&tf->top_p_seq, finally);
if (tf->may_throw)
{
finally = gimple_eh_else_e_body (eh_else);
- lower_eh_constructs_1 (state, finally);
+ lower_eh_constructs_1 (state, &finally);
emit_post_landing_pad (&eh_seq, tf->region);
gimple_seq_add_seq (&eh_seq, finally);
@@ -1064,7 +1059,7 @@ lower_try_finally_nofallthru (struct leh_state *state,
}
else
{
- lower_eh_constructs_1 (state, finally);
+ lower_eh_constructs_1 (state, &finally);
gimple_seq_add_seq (&tf->top_p_seq, finally);
if (tf->may_throw)
@@ -1105,7 +1100,7 @@ lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
finally = gimple_eh_else_n_body (x);
}
- lower_eh_constructs_1 (state, finally);
+ lower_eh_constructs_1 (state, &finally);
if (tf->may_throw)
{
@@ -1193,7 +1188,7 @@ lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
if (tf->may_fallthru)
{
seq = lower_try_finally_dup_block (finally, state);
- lower_eh_constructs_1 (state, seq);
+ lower_eh_constructs_1 (state, &seq);
gimple_seq_add_seq (&new_stmt, seq);
tmp = lower_try_finally_fallthru_label (tf);
@@ -1209,7 +1204,7 @@ lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
seq = gimple_eh_else_e_body (eh_else);
else
seq = lower_try_finally_dup_block (finally, state);
- lower_eh_constructs_1 (state, seq);
+ lower_eh_constructs_1 (state, &seq);
emit_post_landing_pad (&eh_seq, tf->region);
gimple_seq_add_seq (&eh_seq, seq);
@@ -1259,7 +1254,7 @@ lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
gimple_seq_add_stmt (&new_stmt, x);
seq = lower_try_finally_dup_block (finally, state);
- lower_eh_constructs_1 (state, seq);
+ lower_eh_constructs_1 (state, &seq);
gimple_seq_add_seq (&new_stmt, seq);
gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
@@ -1306,7 +1301,7 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
int nlabels, ndests, j, last_case_index;
tree last_case;
VEC (tree,heap) *case_label_vec;
- gimple_seq switch_body;
+ gimple_seq switch_body = NULL;
gimple x, eh_else;
tree tmp;
gimple switch_stmt;
@@ -1317,7 +1312,6 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
/* The location of the finally block. */
location_t finally_loc;
- switch_body = gimple_seq_alloc ();
finally = gimple_try_cleanup (tf->top_p);
eh_else = get_eh_else (finally);
@@ -1331,7 +1325,7 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
: tf_loc;
/* Lower the finally block itself. */
- lower_eh_constructs_1 (state, finally);
+ lower_eh_constructs_1 (state, &finally);
/* Prepare for switch statement generation. */
nlabels = VEC_length (tree, tf->dest_array);
@@ -1382,7 +1376,7 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
if (tf->may_throw)
{
finally = gimple_eh_else_e_body (eh_else);
- lower_eh_constructs_1 (state, finally);
+ lower_eh_constructs_1 (state, &finally);
emit_post_landing_pad (&eh_seq, tf->region);
gimple_seq_add_seq (&eh_seq, finally);
@@ -1426,12 +1420,10 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
entrance through a particular edge. */
for (; q < qe; ++q)
{
- gimple_seq mod;
+ gimple_seq mod = NULL;
int switch_id;
unsigned int case_index;
- mod = gimple_seq_alloc ();
-
if (q->index < 0)
{
x = gimple_build_assign (finally_tmp,
@@ -1623,7 +1615,7 @@ lower_try_finally (struct leh_state *state, gimple tp)
old_eh_seq = eh_seq;
eh_seq = NULL;
- lower_eh_constructs_1 (&this_state, gimple_try_eval(tp));
+ lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
/* Determine if the try block is escaped through the bottom. */
this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
@@ -1706,7 +1698,7 @@ lower_catch (struct leh_state *state, gimple tp)
struct leh_state this_state = *state;
gimple_stmt_iterator gsi;
tree out_label;
- gimple_seq new_seq;
+ gimple_seq new_seq, cleanup;
gimple x;
location_t try_catch_loc = gimple_location (tp);
@@ -1716,7 +1708,7 @@ lower_catch (struct leh_state *state, gimple tp)
this_state.cur_region = try_region;
}
- lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
+ lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
if (!eh_region_may_contain_throw (try_region))
return gimple_try_eval (tp);
@@ -1729,7 +1721,8 @@ lower_catch (struct leh_state *state, gimple tp)
this_state.ehp_region = try_region;
out_label = NULL;
- for (gsi = gsi_start (gimple_try_cleanup (tp));
+ cleanup = gimple_try_cleanup (tp);
+ for (gsi = gsi_start (cleanup);
!gsi_end_p (gsi);
gsi_next (&gsi))
{
@@ -1741,7 +1734,7 @@ lower_catch (struct leh_state *state, gimple tp)
c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
handler = gimple_catch_handler (gcatch);
- lower_eh_constructs_1 (&this_state, handler);
+ lower_eh_constructs_1 (&this_state, &handler);
c->label = create_artificial_label (UNKNOWN_LOCATION);
x = gimple_build_label (c->label);
@@ -1787,7 +1780,7 @@ lower_eh_filter (struct leh_state *state, gimple tp)
this_state.cur_region = this_region;
}
- lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
+ lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
if (!eh_region_may_contain_throw (this_region))
return gimple_try_eval (tp);
@@ -1803,7 +1796,7 @@ lower_eh_filter (struct leh_state *state, gimple tp)
x = gimple_build_label (this_region->u.allowed.label);
gimple_seq_add_stmt (&new_seq, x);
- lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner));
+ lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
gimple_try_set_cleanup (tp, new_seq);
@@ -1838,7 +1831,7 @@ lower_eh_must_not_throw (struct leh_state *state, gimple tp)
this_state.cur_region = this_region;
}
- lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
+ lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
return gimple_try_eval (tp);
}
@@ -1861,7 +1854,7 @@ lower_cleanup (struct leh_state *state, gimple tp)
this_state.cur_region = this_region;
}
- lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
+ lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
if (cleanup_dead || !eh_region_may_contain_throw (this_region))
return gimple_try_eval (tp);
@@ -1881,7 +1874,7 @@ lower_cleanup (struct leh_state *state, gimple tp)
{
/* In this case honor_protect_cleanup_actions had nothing to do,
and we should process this normally. */
- lower_eh_constructs_1 (state, gimple_try_cleanup (tp));
+ lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
result = frob_into_branch_around (tp, this_region,
fake_tf.fallthru_label);
}
@@ -2010,7 +2003,7 @@ lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
if (!x)
{
replace = gimple_try_eval (stmt);
- lower_eh_constructs_1 (state, replace);
+ lower_eh_constructs_1 (state, &replace);
}
else
switch (gimple_code (x))
@@ -2057,10 +2050,10 @@ lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
/* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
static void
-lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq)
+lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start (seq); !gsi_end_p (gsi);)
+ for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
lower_eh_constructs_2 (state, &gsi);
}
@@ -2079,7 +2072,8 @@ lower_eh_constructs (void)
memset (&null_state, 0, sizeof (null_state));
collect_finally_tree_1 (bodyp, NULL);
- lower_eh_constructs_1 (&null_state, bodyp);
+ lower_eh_constructs_1 (&null_state, &bodyp);
+ gimple_set_body (current_function_decl, bodyp);
/* We assume there's a return statement, or something, at the end of
the function, and thus ploping the EH sequence afterward won't
@@ -2874,8 +2868,10 @@ optimize_double_finally (gimple one, gimple two)
{
gimple oneh;
gimple_stmt_iterator gsi;
+ gimple_seq cleanup;
- gsi = gsi_start (gimple_try_cleanup (one));
+ cleanup = gimple_try_cleanup (one);
+ gsi = gsi_start (cleanup);
if (!gsi_one_before_end_p (gsi))
return;
diff --git a/gcc/tree-emutls.c b/gcc/tree-emutls.c
index 63e4a756e86..bae41ac62e6 100644
--- a/gcc/tree-emutls.c
+++ b/gcc/tree-emutls.c
@@ -338,7 +338,7 @@ new_emutls_decl (tree decl, tree alias_of)
else
varpool_create_variable_alias (to,
varpool_node_for_asm
- (DECL_ASSEMBLER_NAME (alias_of))->symbol.decl);
+ (DECL_ASSEMBLER_NAME (DECL_VALUE_EXPR (alias_of)))->symbol.decl);
return to;
}
diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h
index 56edae9fef3..8627fc37250 100644
--- a/gcc/tree-flow-inline.h
+++ b/gcc/tree-flow-inline.h
@@ -506,9 +506,14 @@ static inline gimple_seq
phi_nodes (const_basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
- if (!bb->il.gimple)
- return NULL;
- return bb->il.gimple->phi_nodes;
+ return bb->il.gimple.phi_nodes;
+}
+
+static inline gimple_seq *
+phi_nodes_ptr (basic_block bb)
+{
+ gcc_checking_assert (!(bb->flags & BB_RTL));
+ return &bb->il.gimple.phi_nodes;
}
/* Set PHI nodes of a basic block BB to SEQ. */
@@ -519,7 +524,7 @@ set_phi_nodes (basic_block bb, gimple_seq seq)
gimple_stmt_iterator i;
gcc_checking_assert (!(bb->flags & BB_RTL));
- bb->il.gimple->phi_nodes = seq;
+ bb->il.gimple.phi_nodes = seq;
if (seq)
for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
gimple_set_bb (gsi_stmt (i), bb);
diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h
index 794047bc7f0..315d9558d14 100644
--- a/gcc/tree-flow.h
+++ b/gcc/tree-flow.h
@@ -136,12 +136,17 @@ struct GTY(()) ptr_info_def
align and misalign specify low known bits of the pointer.
ptr & (align - 1) == misalign. */
- /* The power-of-two byte alignment of the object this pointer
- points into. This is usually DECL_ALIGN_UNIT for decls and
- MALLOC_ABI_ALIGNMENT for allocated storage. */
+ /* When known, this is the power-of-two byte alignment of the object this
+ pointer points into. This is usually DECL_ALIGN_UNIT for decls and
+ MALLOC_ABI_ALIGNMENT for allocated storage. When the alignment is not
+ known, it is zero. Do not access directly but use functions
+ get_ptr_info_alignment, set_ptr_info_alignment,
+ mark_ptr_info_alignment_unknown and similar. */
unsigned int align;
- /* The byte offset this pointer differs from the above alignment. */
+ /* When alignment is known, the byte offset this pointer differs from the
+ above alignment. Access only through the same helper functions as align
+ above. */
unsigned int misalign;
};
@@ -593,6 +598,13 @@ extern void duplicate_ssa_name_ptr_info (tree, struct ptr_info_def *);
extern void release_ssa_name (tree);
extern void release_defs (gimple);
extern void replace_ssa_name_symbol (tree, tree);
+extern bool get_ptr_info_alignment (struct ptr_info_def *, unsigned int *,
+ unsigned int *);
+extern void mark_ptr_info_alignment_unknown (struct ptr_info_def *);
+extern void set_ptr_info_alignment (struct ptr_info_def *, unsigned int,
+ unsigned int);
+extern void adjust_ptr_info_misalignment (struct ptr_info_def *,
+ unsigned int);
#ifdef GATHER_STATISTICS
extern void ssanames_print_statistics (void);
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 2ba95f509fe..057087e62a6 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -1996,7 +1996,7 @@ copy_phis_for_bb (basic_block bb, copy_body_data *id)
edge new_edge;
bool inserted = false;
- for (si = gsi_start (phi_nodes (bb)); !gsi_end_p (si); gsi_next (&si))
+ for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
tree res, new_res;
gimple new_phi;
@@ -2608,6 +2608,17 @@ setup_one_parameter (copy_body_data *id, tree p, tree value, tree fn,
/* Make gimplifier happy about this variable. */
DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
+ /* We are eventually using the value - make sure all variables
+ referenced therein are properly recorded. */
+ if (value
+ && gimple_in_ssa_p (cfun)
+ && TREE_CODE (value) == ADDR_EXPR)
+ {
+ tree base = get_base_address (TREE_OPERAND (value, 0));
+ if (base && TREE_CODE (base) == VAR_DECL)
+ add_referenced_var (base);
+ }
+
/* If the parameter is never assigned to, has no SSA_NAMEs created,
we would not need to create a new variable here at all, if it
weren't for debug info. Still, we can just use the argument
diff --git a/gcc/tree-mudflap.c b/gcc/tree-mudflap.c
index cc8b98d1ea4..edd7755a701 100644
--- a/gcc/tree-mudflap.c
+++ b/gcc/tree-mudflap.c
@@ -472,7 +472,7 @@ static void
mf_decl_cache_locals (void)
{
gimple g;
- gimple_seq seq = gimple_seq_alloc ();
+ gimple_seq seq = NULL;
/* Build the cache vars. */
mf_cache_shift_decl_l
@@ -572,7 +572,7 @@ mf_build_check_statement_for (tree base, tree limit,
mf_limit = make_rename_temp (mf_uintptr_type, "__mf_limit");
/* Build: __mf_base = (uintptr_t) <base address expression>. */
- seq = gimple_seq_alloc ();
+ seq = NULL;
t = fold_convert_loc (location, mf_uintptr_type,
unshare_expr (base));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
@@ -683,7 +683,7 @@ mf_build_check_statement_for (tree base, tree limit,
This is the body of the conditional. */
- seq = gimple_seq_alloc ();
+ seq = NULL;
/* u is a string, so it is already a gimple value. */
u = mf_file_function_line_tree (location);
/* NB: we pass the overall [base..limit] range to mf_check. */
@@ -704,7 +704,7 @@ mf_build_check_statement_for (tree base, tree limit,
gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
e = split_block (then_bb, g);
then_bb = e->dest;
- seq = gimple_seq_alloc ();
+ seq = NULL;
}
g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl);
@@ -1114,7 +1114,7 @@ mx_register_decls (tree decl, gimple_seq seq, location_t location)
if (finally_stmts != NULL)
{
gimple stmt = gimple_build_try (seq, finally_stmts, GIMPLE_TRY_FINALLY);
- gimple_seq new_seq = gimple_seq_alloc ();
+ gimple_seq new_seq = NULL;
gimple_seq_add_stmt (&new_seq, stmt);
return new_seq;
diff --git a/gcc/tree-nested.c b/gcc/tree-nested.c
index 042137f09ad..b5d37e98e35 100644
--- a/gcc/tree-nested.c
+++ b/gcc/tree-nested.c
@@ -577,18 +577,18 @@ get_nl_goto_field (struct nesting_info *info)
return field;
}
-/* Invoke CALLBACK on all statements of GIMPLE sequence SEQ. */
+/* Invoke CALLBACK on all statements of GIMPLE sequence *PSEQ. */
static void
walk_body (walk_stmt_fn callback_stmt, walk_tree_fn callback_op,
- struct nesting_info *info, gimple_seq seq)
+ struct nesting_info *info, gimple_seq *pseq)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = info;
wi.val_only = true;
- walk_gimple_seq (seq, callback_stmt, callback_op, &wi);
+ walk_gimple_seq_mod (pseq, callback_stmt, callback_op, &wi);
}
@@ -598,7 +598,9 @@ static inline void
walk_function (walk_stmt_fn callback_stmt, walk_tree_fn callback_op,
struct nesting_info *info)
{
- walk_body (callback_stmt, callback_op, info, gimple_body (info->context));
+ gimple_seq body = gimple_body (info->context);
+ walk_body (callback_stmt, callback_op, info, &body);
+ gimple_set_body (info->context, body);
}
/* Invoke CALLBACK on a GIMPLE_OMP_FOR's init, cond, incr and pre-body. */
@@ -613,9 +615,9 @@ walk_gimple_omp_for (gimple for_stmt,
tree t;
size_t i;
- walk_body (callback_stmt, callback_op, info, gimple_omp_for_pre_body (for_stmt));
+ walk_body (callback_stmt, callback_op, info, gimple_omp_for_pre_body_ptr (for_stmt));
- seq = gimple_seq_alloc ();
+ seq = NULL;
memset (&wi, 0, sizeof (wi));
wi.info = info;
wi.gsi = gsi_last (seq);
@@ -644,9 +646,8 @@ walk_gimple_omp_for (gimple for_stmt,
walk_tree (&TREE_OPERAND (t, 1), callback_op, &wi, NULL);
}
- if (gimple_seq_empty_p (seq))
- gimple_seq_free (seq);
- else
+ seq = gsi_seq (wi.gsi);
+ if (!gimple_seq_empty_p (seq))
{
gimple_seq pre_body = gimple_omp_for_pre_body (for_stmt);
annotate_all_with_location (seq, gimple_location (for_stmt));
@@ -1136,10 +1137,10 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
= info->context;
walk_body (convert_nonlocal_reference_stmt,
convert_nonlocal_reference_op, info,
- OMP_CLAUSE_REDUCTION_GIMPLE_INIT (clause));
+ &OMP_CLAUSE_REDUCTION_GIMPLE_INIT (clause));
walk_body (convert_nonlocal_reference_stmt,
convert_nonlocal_reference_op, info,
- OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (clause));
+ &OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (clause));
DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
= old_context;
}
@@ -1148,7 +1149,7 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
case OMP_CLAUSE_LASTPRIVATE:
walk_body (convert_nonlocal_reference_stmt,
convert_nonlocal_reference_op, info,
- OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (clause));
+ &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (clause));
break;
default:
@@ -1261,7 +1262,7 @@ convert_nonlocal_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
info->new_local_var_chain = NULL;
walk_body (convert_nonlocal_reference_stmt, convert_nonlocal_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
if (info->new_local_var_chain)
declare_vars (info->new_local_var_chain,
@@ -1277,7 +1278,7 @@ convert_nonlocal_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
walk_gimple_omp_for (stmt, convert_nonlocal_reference_stmt,
convert_nonlocal_reference_op, info);
walk_body (convert_nonlocal_reference_stmt,
- convert_nonlocal_reference_op, info, gimple_omp_body (stmt));
+ convert_nonlocal_reference_op, info, gimple_omp_body_ptr (stmt));
info->suppress_expansion = save_suppress;
break;
@@ -1285,7 +1286,7 @@ convert_nonlocal_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
save_suppress = info->suppress_expansion;
convert_nonlocal_omp_clauses (gimple_omp_sections_clauses_ptr (stmt), wi);
walk_body (convert_nonlocal_reference_stmt, convert_nonlocal_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
info->suppress_expansion = save_suppress;
break;
@@ -1293,7 +1294,7 @@ convert_nonlocal_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
save_suppress = info->suppress_expansion;
convert_nonlocal_omp_clauses (gimple_omp_single_clauses_ptr (stmt), wi);
walk_body (convert_nonlocal_reference_stmt, convert_nonlocal_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
info->suppress_expansion = save_suppress;
break;
@@ -1301,7 +1302,7 @@ convert_nonlocal_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
walk_body (convert_nonlocal_reference_stmt, convert_nonlocal_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
break;
case GIMPLE_BIND:
@@ -1635,10 +1636,10 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
= info->context;
walk_body (convert_local_reference_stmt,
convert_local_reference_op, info,
- OMP_CLAUSE_REDUCTION_GIMPLE_INIT (clause));
+ &OMP_CLAUSE_REDUCTION_GIMPLE_INIT (clause));
walk_body (convert_local_reference_stmt,
convert_local_reference_op, info,
- OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (clause));
+ &OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (clause));
DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
= old_context;
}
@@ -1647,7 +1648,7 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
case OMP_CLAUSE_LASTPRIVATE:
walk_body (convert_local_reference_stmt,
convert_local_reference_op, info,
- OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (clause));
+ &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (clause));
break;
default:
@@ -1692,7 +1693,7 @@ convert_local_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
info->new_local_var_chain = NULL;
walk_body (convert_local_reference_stmt, convert_local_reference_op, info,
- gimple_omp_body (stmt));
+ gimple_omp_body_ptr (stmt));
if (info->new_local_var_chain)
declare_vars (info->new_local_var_chain,
@@ -1707,7 +1708,7 @@ convert_local_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
walk_gimple_omp_for (stmt, convert_local_reference_stmt,
convert_local_reference_op, info);
walk_body (convert_local_reference_stmt, convert_local_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
info->suppress_expansion = save_suppress;
break;
@@ -1715,7 +1716,7 @@ convert_local_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
save_suppress = info->suppress_expansion;
convert_local_omp_clauses (gimple_omp_sections_clauses_ptr (stmt), wi);
walk_body (convert_local_reference_stmt, convert_local_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
info->suppress_expansion = save_suppress;
break;
@@ -1723,7 +1724,7 @@ convert_local_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
save_suppress = info->suppress_expansion;
convert_local_omp_clauses (gimple_omp_single_clauses_ptr (stmt), wi);
walk_body (convert_local_reference_stmt, convert_local_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
info->suppress_expansion = save_suppress;
break;
@@ -1731,7 +1732,7 @@ convert_local_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
walk_body (convert_local_reference_stmt, convert_local_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
break;
case GIMPLE_COND:
@@ -1809,12 +1810,12 @@ convert_nl_goto_reference (gimple_stmt_iterator *gsi, bool *handled_ops_p,
/* Build: __builtin_nl_goto(new_label, &chain->nl_goto_field). */
field = get_nl_goto_field (i);
- x = get_frame_field (info, target_context, field, &wi->gsi);
+ x = get_frame_field (info, target_context, field, gsi);
x = build_addr (x, target_context);
- x = gsi_gimplify_val (info, x, &wi->gsi);
+ x = gsi_gimplify_val (info, x, gsi);
call = gimple_build_call (builtin_decl_implicit (BUILT_IN_NONLOCAL_GOTO),
2, build_addr (new_label, target_context), x);
- gsi_replace (&wi->gsi, call, false);
+ gsi_replace (gsi, call, false);
/* We have handled all of STMT's operands, no need to keep going. */
*handled_ops_p = true;
@@ -1979,7 +1980,7 @@ convert_tramp_reference_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
save_local_var_chain = info->new_local_var_chain;
info->new_local_var_chain = NULL;
walk_body (convert_tramp_reference_stmt, convert_tramp_reference_op,
- info, gimple_omp_body (stmt));
+ info, gimple_omp_body_ptr (stmt));
if (info->new_local_var_chain)
declare_vars (info->new_local_var_chain,
gimple_seq_first_stmt (gimple_omp_body (stmt)),
@@ -2035,7 +2036,7 @@ convert_gimple_call (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_TASK:
save_static_chain_added = info->static_chain_added;
info->static_chain_added = 0;
- walk_body (convert_gimple_call, NULL, info, gimple_omp_body (stmt));
+ walk_body (convert_gimple_call, NULL, info, gimple_omp_body_ptr (stmt));
for (i = 0; i < 2; i++)
{
tree c, decl;
@@ -2065,7 +2066,7 @@ convert_gimple_call (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_FOR:
walk_body (convert_gimple_call, NULL, info,
- gimple_omp_for_pre_body (stmt));
+ gimple_omp_for_pre_body_ptr (stmt));
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SECTION:
@@ -2073,7 +2074,7 @@ convert_gimple_call (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
- walk_body (convert_gimple_call, NULL, info, gimple_omp_body (stmt));
+ walk_body (convert_gimple_call, NULL, info, gimple_omp_body_ptr (stmt));
break;
default:
diff --git a/gcc/tree-phinodes.c b/gcc/tree-phinodes.c
index 218a5515a38..bac9b52abcd 100644
--- a/gcc/tree-phinodes.c
+++ b/gcc/tree-phinodes.c
@@ -221,6 +221,7 @@ make_phi_node (tree var, int len)
- sizeof (struct phi_arg_d)
+ sizeof (struct phi_arg_d) * len));
phi->gsbase.code = GIMPLE_PHI;
+ gimple_init_singleton (phi);
phi->gimple_phi.nargs = len;
phi->gimple_phi.capacity = capacity;
if (TREE_CODE (var) == SSA_NAME)
@@ -269,29 +270,29 @@ release_phi_node (gimple phi)
/* Resize an existing PHI node. The only way is up. Return the
possibly relocated phi. */
-static void
-resize_phi_node (gimple *phi, size_t len)
+static gimple
+resize_phi_node (gimple phi, size_t len)
{
size_t old_size, i;
gimple new_phi;
- gcc_assert (len > gimple_phi_capacity (*phi));
+ gcc_assert (len > gimple_phi_capacity (phi));
/* The garbage collector will not look at the PHI node beyond the
first PHI_NUM_ARGS elements. Therefore, all we have to copy is a
portion of the PHI node currently in use. */
old_size = sizeof (struct gimple_statement_phi)
- + (gimple_phi_num_args (*phi) - 1) * sizeof (struct phi_arg_d);
+ + (gimple_phi_num_args (phi) - 1) * sizeof (struct phi_arg_d);
new_phi = allocate_phi_node (len);
- memcpy (new_phi, *phi, old_size);
+ memcpy (new_phi, phi, old_size);
for (i = 0; i < gimple_phi_num_args (new_phi); i++)
{
use_operand_p imm, old_imm;
imm = gimple_phi_arg_imm_use_ptr (new_phi, i);
- old_imm = gimple_phi_arg_imm_use_ptr (*phi, i);
+ old_imm = gimple_phi_arg_imm_use_ptr (phi, i);
imm->use = gimple_phi_arg_def_ptr (new_phi, i);
relink_imm_use_stmt (imm, old_imm, new_phi);
}
@@ -310,7 +311,7 @@ resize_phi_node (gimple *phi, size_t len)
imm->loc.stmt = new_phi;
}
- *phi = new_phi;
+ return new_phi;
}
/* Reserve PHI arguments for a new edge to basic block BB. */
@@ -324,18 +325,18 @@ reserve_phi_args_for_new_edge (basic_block bb)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple *loc = gsi_stmt_ptr (&gsi);
+ gimple stmt = gsi_stmt (gsi);
- if (len > gimple_phi_capacity (*loc))
+ if (len > gimple_phi_capacity (stmt))
{
- gimple old_phi = *loc;
-
- resize_phi_node (loc, cap);
+ gimple new_phi = resize_phi_node (stmt, cap);
/* The result of the PHI is defined by this PHI node. */
- SSA_NAME_DEF_STMT (gimple_phi_result (*loc)) = *loc;
+ SSA_NAME_DEF_STMT (gimple_phi_result (new_phi)) = new_phi;
+ gsi_set_stmt (&gsi, new_phi);
- release_phi_node (old_phi);
+ release_phi_node (stmt);
+ stmt = new_phi;
}
/* We represent a "missing PHI argument" by placing NULL_TREE in
@@ -345,9 +346,9 @@ reserve_phi_args_for_new_edge (basic_block bb)
example, the loop optimizer duplicates several basic blocks,
redirects edges, and then fixes up PHI arguments later in
batch. */
- SET_PHI_ARG_DEF (*loc, len - 1, NULL_TREE);
+ SET_PHI_ARG_DEF (stmt, len - 1, NULL_TREE);
- (*loc)->gimple_phi.nargs++;
+ stmt->gimple_phi.nargs++;
}
}
@@ -356,13 +357,15 @@ reserve_phi_args_for_new_edge (basic_block bb)
void
add_phi_node_to_bb (gimple phi, basic_block bb)
{
- gimple_stmt_iterator gsi;
+ gimple_seq seq = phi_nodes (bb);
/* Add the new PHI node to the list of PHI nodes for block BB. */
- if (phi_nodes (bb) == NULL)
- set_phi_nodes (bb, gimple_seq_alloc ());
-
- gsi = gsi_last (phi_nodes (bb));
- gsi_insert_after (&gsi, phi, GSI_NEW_STMT);
+ if (seq == NULL)
+ set_phi_nodes (bb, gimple_seq_alloc_with_stmt (phi));
+ else
+ {
+ gimple_seq_add_stmt (&seq, phi);
+ gcc_assert (seq == phi_nodes (bb));
+ }
/* Associate BB to the PHI node. */
gimple_set_bb (phi, bb);
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index bef5252923e..1566902e3d7 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -1707,6 +1707,7 @@ remove_stmt (gimple stmt)
{
name = PHI_RESULT (stmt);
next = single_nonlooparound_use (name);
+ reset_debug_uses (stmt);
psi = gsi_for_stmt (stmt);
remove_phi_node (&psi, true);
@@ -1728,6 +1729,7 @@ remove_stmt (gimple stmt)
gcc_assert (TREE_CODE (name) == SSA_NAME);
next = single_nonlooparound_use (name);
+ reset_debug_uses (stmt);
mark_virtual_ops_for_renaming (stmt);
gsi_remove (&bsi, true);
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 110990a211a..3f84f6b22fb 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -1472,11 +1472,13 @@ build_ref_for_offset (location_t loc, tree base, HOST_WIDE_INT offset,
by looking at the access mode. That would constrain the
alignment of base + base_offset which we would need to
adjust according to offset. */
- align = get_pointer_alignment_1 (base, &misalign);
- if (misalign == 0
- && (TREE_CODE (prev_base) == MEM_REF
- || TREE_CODE (prev_base) == TARGET_MEM_REF))
- align = MAX (align, TYPE_ALIGN (TREE_TYPE (prev_base)));
+ if (!get_pointer_alignment_1 (base, &align, &misalign))
+ {
+ gcc_assert (misalign == 0);
+ if (TREE_CODE (prev_base) == MEM_REF
+ || TREE_CODE (prev_base) == TARGET_MEM_REF)
+ align = TYPE_ALIGN (TREE_TYPE (prev_base));
+ }
misalign += (double_int_sext (tree_to_double_int (off),
TYPE_PRECISION (TREE_TYPE (off))).low
* BITS_PER_UNIT);
@@ -3192,6 +3194,7 @@ initialize_parameter_reductions (void)
gimple_seq seq = NULL;
tree parm;
+ gsi = gsi_start (seq);
for (parm = DECL_ARGUMENTS (current_function_decl);
parm;
parm = DECL_CHAIN (parm))
@@ -3205,12 +3208,6 @@ initialize_parameter_reductions (void)
if (!access_vec)
continue;
- if (!seq)
- {
- seq = gimple_seq_alloc ();
- gsi = gsi_start (seq);
- }
-
for (access = VEC_index (access_p, access_vec, 0);
access;
access = access->next_grp)
@@ -3218,6 +3215,7 @@ initialize_parameter_reductions (void)
EXPR_LOCATION (parm));
}
+ seq = gsi_seq (gsi);
if (seq)
gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR), seq);
}
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index cf131578d2d..e11da3eb869 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -863,26 +863,26 @@ copy_ref_info (tree new_ref, tree old_ref)
&& SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)))
{
struct ptr_info_def *new_pi;
+ unsigned int align, misalign;
+
duplicate_ssa_name_ptr_info
(new_ptr_base, SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)));
new_pi = SSA_NAME_PTR_INFO (new_ptr_base);
/* We have to be careful about transfering alignment information. */
- if (TREE_CODE (old_ref) == MEM_REF
+ if (get_ptr_info_alignment (new_pi, &align, &misalign)
+ && TREE_CODE (old_ref) == MEM_REF
&& !(TREE_CODE (new_ref) == TARGET_MEM_REF
&& (TMR_INDEX2 (new_ref)
|| (TMR_STEP (new_ref)
&& (TREE_INT_CST_LOW (TMR_STEP (new_ref))
- < new_pi->align)))))
+ < align)))))
{
- new_pi->misalign += double_int_sub (mem_ref_offset (old_ref),
- mem_ref_offset (new_ref)).low;
- new_pi->misalign &= (new_pi->align - 1);
+ unsigned int inc = double_int_sub (mem_ref_offset (old_ref),
+ mem_ref_offset (new_ref)).low;
+ adjust_ptr_info_misalignment (new_pi, inc);
}
else
- {
- new_pi->align = 1;
- new_pi->misalign = 0;
- }
+ mark_ptr_info_alignment_unknown (new_pi);
}
else if (TREE_CODE (base) == VAR_DECL
|| TREE_CODE (base) == PARM_DECL
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 4e86b8db0c3..feded5bfab8 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -513,7 +513,7 @@ get_value_from_alignment (tree expr)
gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
- align = get_object_alignment_1 (TREE_OPERAND (expr, 0), &bitpos);
+ get_object_alignment_1 (TREE_OPERAND (expr, 0), &align, &bitpos);
val.mask
= double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
? double_int_mask (TYPE_PRECISION (type))
@@ -807,7 +807,6 @@ ccp_finalize (void)
{
tree name = ssa_name (i);
prop_value_t *val;
- struct ptr_info_def *pi;
unsigned int tem, align;
if (!name
@@ -823,12 +822,9 @@ ccp_finalize (void)
bits the misalignment. */
tem = val->mask.low;
align = (tem & -tem);
- if (align == 1)
- continue;
-
- pi = get_ptr_info (name);
- pi->align = align;
- pi->misalign = TREE_INT_CST_LOW (val->value) & (align - 1);
+ if (align > 1)
+ set_ptr_info_alignment (get_ptr_info (name), align,
+ TREE_INT_CST_LOW (val->value) & (align - 1));
}
/* Perform substitutions based on the known constant values. */
@@ -1101,14 +1097,12 @@ bit_value_unop_1 (enum tree_code code, tree type,
bool uns;
/* First extend mask and value according to the original type. */
- uns = (TREE_CODE (rtype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (rtype)
- ? 0 : TYPE_UNSIGNED (rtype));
+ uns = TYPE_UNSIGNED (rtype);
*mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
*val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
/* Then extend mask and value according to the target type. */
- uns = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
- ? 0 : TYPE_UNSIGNED (type));
+ uns = TYPE_UNSIGNED (type);
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
*val = double_int_ext (*val, TYPE_PRECISION (type), uns);
break;
@@ -1130,8 +1124,7 @@ bit_value_binop_1 (enum tree_code code, tree type,
tree r1type, double_int r1val, double_int r1mask,
tree r2type, double_int r2val, double_int r2mask)
{
- bool uns = (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type) ? 0 : TYPE_UNSIGNED (type));
+ bool uns = TYPE_UNSIGNED (type);
/* Assume we'll get a constant result. Use an initial varying value,
we fall back to varying in the end if necessary. */
*mask = double_int_minus_one;
@@ -1198,13 +1191,6 @@ bit_value_binop_1 (enum tree_code code, tree type,
}
else if (shift < 0)
{
- /* ??? We can have sizetype related inconsistencies in
- the IL. */
- if ((TREE_CODE (r1type) == INTEGER_TYPE
- && (TYPE_IS_SIZETYPE (r1type)
- ? 0 : TYPE_UNSIGNED (r1type))) != uns)
- break;
-
shift = -shift;
*mask = double_int_rshift (r1mask, shift,
TYPE_PRECISION (type), !uns);
@@ -1316,12 +1302,7 @@ bit_value_binop_1 (enum tree_code code, tree type,
break;
/* For comparisons the signedness is in the comparison operands. */
- uns = (TREE_CODE (r1type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (r1type) ? 0 : TYPE_UNSIGNED (r1type));
- /* ??? We can have sizetype related inconsistencies in the IL. */
- if ((TREE_CODE (r2type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (r2type) ? 0 : TYPE_UNSIGNED (r2type)) != uns)
- break;
+ uns = TYPE_UNSIGNED (r1type);
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index d954f3b43e3..1d72e06fe76 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -1042,12 +1042,10 @@ static bool
remove_dead_phis (basic_block bb)
{
bool something_changed = false;
- gimple_seq phis;
gimple phi;
gimple_stmt_iterator gsi;
- phis = phi_nodes (bb);
- for (gsi = gsi_start (phis); !gsi_end_p (gsi);)
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);)
{
stats.total_phis++;
phi = gsi_stmt (gsi);
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index a8599420763..5fdba8c543b 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -199,9 +199,9 @@ dse_possible_dead_store_p (gimple stmt, gimple *use_stmt)
post dominates the first store, then the first store is dead. */
static void
-dse_optimize_stmt (gimple_stmt_iterator gsi)
+dse_optimize_stmt (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (gsi);
+ gimple stmt = gsi_stmt (*gsi);
/* If this statement has no virtual defs, then there is nothing
to do. */
@@ -252,7 +252,7 @@ dse_optimize_stmt (gimple_stmt_iterator gsi)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " Deleted dead store '");
- print_gimple_stmt (dump_file, gsi_stmt (gsi), dump_flags, 0);
+ print_gimple_stmt (dump_file, gsi_stmt (*gsi), dump_flags, 0);
fprintf (dump_file, "'\n");
}
@@ -261,7 +261,7 @@ dse_optimize_stmt (gimple_stmt_iterator gsi)
/* Remove the dead store. */
bb = gimple_bb (stmt);
- if (gsi_remove (&gsi, true))
+ if (gsi_remove (gsi, true))
bitmap_set_bit (need_eh_cleanup, bb->index);
/* And release any SSA_NAMEs set in this statement back to the
@@ -277,8 +277,14 @@ dse_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_last (bb_seq (bb)); !gsi_end_p (gsi); gsi_prev (&gsi))
- dse_optimize_stmt (gsi);
+ for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
+ {
+ dse_optimize_stmt (&gsi);
+ if (gsi_end_p (gsi))
+ gsi = gsi_last_bb (bb);
+ else
+ gsi_prev (&gsi);
+ }
}
/* Main entry point. */
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 4739de12501..3c01623130c 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -163,7 +163,7 @@ along with GCC; see the file COPYING3. If not see
static bool forward_propagate_addr_expr (tree name, tree rhs);
-/* Set to true if we delete EH edges during the optimization. */
+/* Set to true if we delete dead edges during the optimization. */
static bool cfg_changed;
static tree rhs_to_tree (tree type, gimple stmt);
@@ -1202,16 +1202,18 @@ forward_propagate_addr_expr (tree name, tree rhs)
}
-/* Forward propagate the comparison defined in STMT like
+/* Forward propagate the comparison defined in *DEFGSI like
cond_1 = x CMP y to uses of the form
a_1 = (T')cond_1
a_1 = !cond_1
a_1 = cond_1 != 0
- Returns true if stmt is now unused. */
+ Returns true if stmt is now unused. Advance DEFGSI to the next
+ statement. */
static bool
-forward_propagate_comparison (gimple stmt)
+forward_propagate_comparison (gimple_stmt_iterator *defgsi)
{
+ gimple stmt = gsi_stmt (*defgsi);
tree name = gimple_assign_lhs (stmt);
gimple use_stmt;
tree tmp = NULL_TREE;
@@ -1224,18 +1226,18 @@ forward_propagate_comparison (gimple stmt)
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (stmt)))
|| (TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs2 (stmt))))
- return false;
+ goto bailout;
/* Do not un-cse comparisons. But propagate through copies. */
use_stmt = get_prop_dest_stmt (name, &name);
if (!use_stmt
|| !is_gimple_assign (use_stmt))
- return false;
+ goto bailout;
code = gimple_assign_rhs_code (use_stmt);
lhs = gimple_assign_lhs (use_stmt);
if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
- return false;
+ goto bailout;
/* We can propagate the condition into a statement that
computes the logical negation of the comparison result. */
@@ -1249,13 +1251,13 @@ forward_propagate_comparison (gimple stmt)
enum tree_code inv_code;
inv_code = invert_tree_comparison (gimple_assign_rhs_code (stmt), nans);
if (inv_code == ERROR_MARK)
- return false;
+ goto bailout;
tmp = build2 (inv_code, TREE_TYPE (lhs), gimple_assign_rhs1 (stmt),
gimple_assign_rhs2 (stmt));
}
else
- return false;
+ goto bailout;
gsi = gsi_for_stmt (use_stmt);
gimple_assign_set_rhs_from_tree (&gsi, unshare_expr (tmp));
@@ -1271,8 +1273,16 @@ forward_propagate_comparison (gimple stmt)
fprintf (dump_file, "'\n");
}
+ /* When we remove stmt now the iterator defgsi goes off it's current
+ sequence, hence advance it now. */
+ gsi_next (defgsi);
+
/* Remove defining statements. */
return remove_prop_source_from_use (name);
+
+bailout:
+ gsi_next (defgsi);
+ return false;
}
@@ -1319,6 +1329,78 @@ simplify_not_neg_expr (gimple_stmt_iterator *gsi_p)
return false;
}
+/* Helper function for simplify_gimple_switch. Remove case labels that
+ have values outside the range of the new type. */
+
+static void
+simplify_gimple_switch_label_vec (gimple stmt, tree index_type)
+{
+ unsigned int branch_num = gimple_switch_num_labels (stmt);
+ VEC(tree, heap) *labels = VEC_alloc (tree, heap, branch_num);
+ unsigned int i, len;
+
+ /* Collect the existing case labels in a VEC, and preprocess it as if
+ we are gimplifying a GENERIC SWITCH_EXPR. */
+ for (i = 1; i < branch_num; i++)
+ VEC_quick_push (tree, labels, gimple_switch_label (stmt, i));
+ preprocess_case_label_vec_for_gimple (labels, index_type, NULL);
+
+ /* If any labels were removed, replace the existing case labels
+ in the GIMPLE_SWITCH statement with the correct ones.
+ Note that the type updates were done in-place on the case labels,
+ so we only have to replace the case labels in the GIMPLE_SWITCH
+ if the number of labels changed. */
+ len = VEC_length (tree, labels);
+ if (len < branch_num - 1)
+ {
+ bitmap target_blocks;
+ edge_iterator ei;
+ edge e;
+
+ /* Corner case: *all* case labels have been removed as being
+ out-of-range for INDEX_TYPE. Push one label and let the
+ CFG cleanups deal with this further. */
+ if (len == 0)
+ {
+ tree label, elt;
+
+ label = CASE_LABEL (gimple_switch_default_label (stmt));
+ elt = build_case_label (build_int_cst (index_type, 0), NULL, label);
+ VEC_quick_push (tree, labels, elt);
+ len = 1;
+ }
+
+ for (i = 0; i < VEC_length (tree, labels); i++)
+ gimple_switch_set_label (stmt, i + 1, VEC_index (tree, labels, i));
+ for (i++ ; i < branch_num; i++)
+ gimple_switch_set_label (stmt, i, NULL_TREE);
+ gimple_switch_set_num_labels (stmt, len + 1);
+
+ /* Cleanup any edges that are now dead. */
+ target_blocks = BITMAP_ALLOC (NULL);
+ for (i = 0; i < gimple_switch_num_labels (stmt); i++)
+ {
+ tree elt = gimple_switch_label (stmt, i);
+ basic_block target = label_to_block (CASE_LABEL (elt));
+ bitmap_set_bit (target_blocks, target->index);
+ }
+ for (ei = ei_start (gimple_bb (stmt)->succs); (e = ei_safe_edge (ei)); )
+ {
+ if (! bitmap_bit_p (target_blocks, e->dest->index))
+ {
+ remove_edge (e);
+ cfg_changed = true;
+ free_dominance_info (CDI_DOMINATORS);
+ }
+ else
+ ei_next (&ei);
+ }
+ BITMAP_FREE (target_blocks);
+ }
+
+ VEC_free (tree, heap, labels);
+}
+
/* STMT is a SWITCH_EXPR for which we attempt to find equivalent forms of
the condition which we may be able to optimize better. */
@@ -1344,9 +1426,6 @@ simplify_gimple_switch (gimple stmt)
def = gimple_assign_rhs1 (def_stmt);
- /* ??? Why was Jeff testing this? We are gimple... */
- gcc_checking_assert (is_gimple_val (def));
-
to = TREE_TYPE (cond);
ti = TREE_TYPE (def);
@@ -1367,6 +1446,7 @@ simplify_gimple_switch (gimple stmt)
if (!fail)
{
gimple_switch_set_index (stmt, def);
+ simplify_gimple_switch_label_vec (stmt, ti);
update_stmt (stmt);
return true;
}
@@ -2597,8 +2677,7 @@ ssa_forward_propagate_and_combine (void)
FOR_EACH_BB (bb)
{
- gimple_stmt_iterator gsi, prev;
- bool prev_initialized;
+ gimple_stmt_iterator gsi;
/* Apply forward propagation to all stmts in the basic-block.
Note we update GSI within the loop as necessary. */
@@ -2682,9 +2761,8 @@ ssa_forward_propagate_and_combine (void)
}
else if (TREE_CODE_CLASS (code) == tcc_comparison)
{
- if (forward_propagate_comparison (stmt))
+ if (forward_propagate_comparison (&gsi))
cfg_changed = true;
- gsi_next (&gsi);
}
else
gsi_next (&gsi);
@@ -2692,12 +2770,14 @@ ssa_forward_propagate_and_combine (void)
/* Combine stmts with the stmts defining their operands.
Note we update GSI within the loop as necessary. */
- prev_initialized = false;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
gimple stmt = gsi_stmt (gsi);
bool changed = false;
+ /* Mark stmt as potentially needing revisiting. */
+ gimple_set_plf (stmt, GF_PLF_1, false);
+
switch (gimple_code (stmt))
{
case GIMPLE_ASSIGN:
@@ -2777,18 +2857,18 @@ ssa_forward_propagate_and_combine (void)
{
/* If the stmt changed then re-visit it and the statements
inserted before it. */
- if (!prev_initialized)
+ for (; !gsi_end_p (gsi); gsi_prev (&gsi))
+ if (gimple_plf (gsi_stmt (gsi), GF_PLF_1))
+ break;
+ if (gsi_end_p (gsi))
gsi = gsi_start_bb (bb);
else
- {
- gsi = prev;
- gsi_next (&gsi);
- }
+ gsi_next (&gsi);
}
else
{
- prev = gsi;
- prev_initialized = true;
+ /* Stmt no longer needs to be revisited. */
+ gimple_set_plf (stmt, GF_PLF_1, true);
gsi_next (&gsi);
}
}
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index ce5eb208850..5a01e618da5 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1328,8 +1328,8 @@ move_computations_stmt (struct dom_walk_data *dw_data,
}
mark_virtual_ops_for_renaming (stmt);
- gsi_insert_on_edge (loop_preheader_edge (level), stmt);
gsi_remove (&bsi, false);
+ gsi_insert_on_edge (loop_preheader_edge (level), stmt);
}
}
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index d346e0034e0..3016f08e7e3 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -2362,8 +2362,12 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step,
cstepi = int_cst_value (step);
mem_mode = TYPE_MODE (TREE_TYPE (*use->op_p));
- if ((HAVE_PRE_INCREMENT && GET_MODE_SIZE (mem_mode) == cstepi)
- || (HAVE_PRE_DECREMENT && GET_MODE_SIZE (mem_mode) == -cstepi))
+ if (((USE_LOAD_PRE_INCREMENT (mem_mode)
+ || USE_STORE_PRE_INCREMENT (mem_mode))
+ && GET_MODE_SIZE (mem_mode) == cstepi)
+ || ((USE_LOAD_PRE_DECREMENT (mem_mode)
+ || USE_STORE_PRE_DECREMENT (mem_mode))
+ && GET_MODE_SIZE (mem_mode) == -cstepi))
{
enum tree_code code = MINUS_EXPR;
tree new_base;
@@ -2380,8 +2384,12 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step,
add_candidate_1 (data, new_base, step, important, IP_BEFORE_USE, use,
use->stmt);
}
- if ((HAVE_POST_INCREMENT && GET_MODE_SIZE (mem_mode) == cstepi)
- || (HAVE_POST_DECREMENT && GET_MODE_SIZE (mem_mode) == -cstepi))
+ if (((USE_LOAD_POST_INCREMENT (mem_mode)
+ || USE_STORE_POST_INCREMENT (mem_mode))
+ && GET_MODE_SIZE (mem_mode) == cstepi)
+ || ((USE_LOAD_POST_DECREMENT (mem_mode)
+ || USE_STORE_POST_DECREMENT (mem_mode))
+ && GET_MODE_SIZE (mem_mode) == -cstepi))
{
add_candidate_1 (data, base, step, important, IP_AFTER_USE, use,
use->stmt);
@@ -3315,25 +3323,29 @@ get_address_cost (bool symbol_present, bool var_present,
reg0 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2);
- if (HAVE_PRE_DECREMENT)
+ if (USE_LOAD_PRE_DECREMENT (mem_mode)
+ || USE_STORE_PRE_DECREMENT (mem_mode))
{
addr = gen_rtx_PRE_DEC (address_mode, reg0);
has_predec[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
}
- if (HAVE_POST_DECREMENT)
+ if (USE_LOAD_POST_DECREMENT (mem_mode)
+ || USE_STORE_POST_DECREMENT (mem_mode))
{
addr = gen_rtx_POST_DEC (address_mode, reg0);
has_postdec[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
}
- if (HAVE_PRE_INCREMENT)
+ if (USE_LOAD_PRE_INCREMENT (mem_mode)
+ || USE_STORE_PRE_DECREMENT (mem_mode))
{
addr = gen_rtx_PRE_INC (address_mode, reg0);
has_preinc[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
}
- if (HAVE_POST_INCREMENT)
+ if (USE_LOAD_POST_INCREMENT (mem_mode)
+ || USE_STORE_POST_INCREMENT (mem_mode))
{
addr = gen_rtx_POST_INC (address_mode, reg0);
has_postinc[mem_mode]
@@ -6259,10 +6271,7 @@ rewrite_use_nonlinear_expr (struct ivopts_data *data,
/* As this isn't a plain copy we have to reset alignment
information. */
if (SSA_NAME_PTR_INFO (comp))
- {
- SSA_NAME_PTR_INFO (comp)->align = 1;
- SSA_NAME_PTR_INFO (comp)->misalign = 0;
- }
+ mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp));
}
}
diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c
index 19a6a22d959..00f30a1f31b 100644
--- a/gcc/tree-ssa-loop-prefetch.c
+++ b/gcc/tree-ssa-loop-prefetch.c
@@ -1495,9 +1495,9 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
/* Determines the distance till the first reuse of each reference in REFS
in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
- memory references in the loop. */
+ memory references in the loop. Return false if the analysis fails. */
-static void
+static bool
determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
bool no_other_refs)
{
@@ -1515,7 +1515,7 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
ddr_p dep;
if (loop->inner)
- return;
+ return true;
/* Find the outermost loop of the loop nest of loop (we require that
there are no sibling loops inside the nest). */
@@ -1585,7 +1585,8 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
ref->independent_p = true;
}
- compute_all_dependences (datarefs, &dependences, vloops, true);
+ if (!compute_all_dependences (datarefs, &dependences, vloops, true))
+ return false;
FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
{
@@ -1664,6 +1665,8 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
fprintf (dump_file, " ref %p distance %u\n",
(void *) ref, ref->reuse_distance);
}
+
+ return true;
}
/* Determine whether or not the trip count to ahead ratio is too small based
@@ -1826,7 +1829,8 @@ loop_prefetch_arrays (struct loop *loop)
if (nothing_to_prefetch_p (refs))
goto fail;
- determine_loop_nest_reuse (loop, refs, no_other_refs);
+ if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
+ goto fail;
/* Step 3: determine unroll factor. */
unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 88c16e68373..1cfa0f512bc 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -1624,8 +1624,17 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
/* Compute and check data dependencies in both basic blocks. */
then_ddrs = VEC_alloc (ddr_p, heap, 1);
else_ddrs = VEC_alloc (ddr_p, heap, 1);
- compute_all_dependences (then_datarefs, &then_ddrs, NULL, false);
- compute_all_dependences (else_datarefs, &else_ddrs, NULL, false);
+ if (!compute_all_dependences (then_datarefs, &then_ddrs, NULL, false)
+ || !compute_all_dependences (else_datarefs, &else_ddrs, NULL, false))
+ {
+ free_dependence_relations (then_ddrs);
+ free_dependence_relations (else_ddrs);
+ free_data_refs (then_datarefs);
+ free_data_refs (else_datarefs);
+ VEC_free (gimple, heap, then_stores);
+ VEC_free (gimple, heap, else_stores);
+ return false;
+ }
blocks[0] = then_bb;
blocks[1] = else_bb;
blocks[2] = join_bb;
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index a89856aa959..fcd7feeea1e 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -1029,6 +1029,24 @@ debug_bitmap_set (bitmap_set_t set)
print_bitmap_set (stderr, set, "debug", 0);
}
+void debug_bitmap_sets_for (basic_block);
+
+DEBUG_FUNCTION void
+debug_bitmap_sets_for (basic_block bb)
+{
+ print_bitmap_set (stderr, AVAIL_OUT (bb), "avail_out", bb->index);
+ if (!in_fre)
+ {
+ print_bitmap_set (stderr, EXP_GEN (bb), "exp_gen", bb->index);
+ print_bitmap_set (stderr, PHI_GEN (bb), "phi_gen", bb->index);
+ print_bitmap_set (stderr, TMP_GEN (bb), "tmp_gen", bb->index);
+ print_bitmap_set (stderr, ANTIC_IN (bb), "antic_in", bb->index);
+ if (do_partial_partial)
+ print_bitmap_set (stderr, PA_IN (bb), "pa_in", bb->index);
+ print_bitmap_set (stderr, NEW_SETS (bb), "new_sets", bb->index);
+ }
+}
+
/* Print out the expressions that have VAL to OUTFILE. */
static void
@@ -1641,7 +1659,6 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
{
unsigned int new_val_id;
pre_expr constant;
- bool converted = false;
tree result = vn_reference_lookup_pieces (newvuse, ref->set,
ref->type,
@@ -1650,12 +1667,29 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
if (result)
VEC_free (vn_reference_op_s, heap, newoperands);
- if (result
- && !useless_type_conversion_p (ref->type, TREE_TYPE (result)))
+ /* We can always insert constants, so if we have a partial
+ redundant constant load of another type try to translate it
+ to a constant of appropriate type. */
+ if (result && is_gimple_min_invariant (result))
{
- result = fold_build1 (VIEW_CONVERT_EXPR, ref->type, result);
- converted = true;
+ tree tem = result;
+ if (!useless_type_conversion_p (ref->type, TREE_TYPE (result)))
+ {
+ tem = fold_unary (VIEW_CONVERT_EXPR, ref->type, result);
+ if (tem && !is_gimple_min_invariant (tem))
+ tem = NULL_TREE;
+ }
+ if (tem)
+ return get_or_alloc_expr_for_constant (tem);
}
+
+ /* If we'd have to convert things we would need to validate
+ if we can insert the translated expression. So fail
+ here for now - we cannot insert an alias with a different
+ type in the VN tables either, as that would assert. */
+ if (result
+ && !useless_type_conversion_p (ref->type, TREE_TYPE (result)))
+ return NULL;
else if (!result && newref
&& !useless_type_conversion_p (ref->type, newref->type))
{
@@ -1663,61 +1697,11 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
return NULL;
}
- if (result && is_gimple_min_invariant (result))
- {
- gcc_assert (!newoperands);
- return get_or_alloc_expr_for_constant (result);
- }
-
expr = (pre_expr) pool_alloc (pre_expr_pool);
expr->kind = REFERENCE;
expr->id = 0;
- if (converted)
- {
- vn_nary_op_t nary;
- tree nresult;
-
- gcc_assert (CONVERT_EXPR_P (result)
- || TREE_CODE (result) == VIEW_CONVERT_EXPR);
-
- nresult = vn_nary_op_lookup_pieces (1, TREE_CODE (result),
- TREE_TYPE (result),
- &TREE_OPERAND (result, 0),
- &nary);
- if (nresult && is_gimple_min_invariant (nresult))
- return get_or_alloc_expr_for_constant (nresult);
-
- expr->kind = NARY;
- if (nary)
- {
- PRE_EXPR_NARY (expr) = nary;
- constant = fully_constant_expression (expr);
- if (constant != expr)
- return constant;
-
- new_val_id = nary->value_id;
- get_or_alloc_expression_id (expr);
- }
- else
- {
- new_val_id = get_next_value_id ();
- VEC_safe_grow_cleared (bitmap_set_t, heap,
- value_expressions,
- get_max_value_id() + 1);
- nary = vn_nary_op_insert_pieces (1, TREE_CODE (result),
- TREE_TYPE (result),
- &TREE_OPERAND (result, 0),
- NULL_TREE,
- new_val_id);
- PRE_EXPR_NARY (expr) = nary;
- constant = fully_constant_expression (expr);
- if (constant != expr)
- return constant;
- get_or_alloc_expression_id (expr);
- }
- }
- else if (newref)
+ if (newref)
{
PRE_EXPR_REFERENCE (expr) = newref;
constant = fully_constant_expression (expr);
@@ -2014,57 +1998,19 @@ value_dies_in_block_x (pre_expr expr, basic_block block)
}
-#define union_contains_value(SET1, SET2, VAL) \
- (bitmap_set_contains_value ((SET1), (VAL)) \
- || ((SET2) && bitmap_set_contains_value ((SET2), (VAL))))
+/* Determine if OP is valid in SET1 U SET2, which it is when the union
+ contains its value-id. */
-/* Determine if vn_reference_op_t VRO is legal in SET1 U SET2.
- */
static bool
-vro_valid_in_sets (bitmap_set_t set1, bitmap_set_t set2,
- vn_reference_op_t vro)
+op_valid_in_sets (bitmap_set_t set1, bitmap_set_t set2, tree op)
{
- if (vro->op0 && TREE_CODE (vro->op0) == SSA_NAME)
- {
- struct pre_expr_d temp;
- temp.kind = NAME;
- temp.id = 0;
- PRE_EXPR_NAME (&temp) = vro->op0;
- temp.id = lookup_expression_id (&temp);
- if (temp.id == 0)
- return false;
- if (!union_contains_value (set1, set2,
- get_expr_value_id (&temp)))
- return false;
- }
- if (vro->op1 && TREE_CODE (vro->op1) == SSA_NAME)
- {
- struct pre_expr_d temp;
- temp.kind = NAME;
- temp.id = 0;
- PRE_EXPR_NAME (&temp) = vro->op1;
- temp.id = lookup_expression_id (&temp);
- if (temp.id == 0)
- return false;
- if (!union_contains_value (set1, set2,
- get_expr_value_id (&temp)))
- return false;
- }
-
- if (vro->op2 && TREE_CODE (vro->op2) == SSA_NAME)
+ if (op && TREE_CODE (op) == SSA_NAME)
{
- struct pre_expr_d temp;
- temp.kind = NAME;
- temp.id = 0;
- PRE_EXPR_NAME (&temp) = vro->op2;
- temp.id = lookup_expression_id (&temp);
- if (temp.id == 0)
- return false;
- if (!union_contains_value (set1, set2,
- get_expr_value_id (&temp)))
+ unsigned int value_id = VN_INFO (op)->value_id;
+ if (!bitmap_set_contains_value (set1, value_id)
+ || (set2 && !bitmap_set_contains_value (set2, value_id)))
return false;
}
-
return true;
}
@@ -2087,28 +2033,8 @@ valid_in_sets (bitmap_set_t set1, bitmap_set_t set2, pre_expr expr,
unsigned int i;
vn_nary_op_t nary = PRE_EXPR_NARY (expr);
for (i = 0; i < nary->length; i++)
- {
- if (TREE_CODE (nary->op[i]) == SSA_NAME)
- {
- struct pre_expr_d temp;
- temp.kind = NAME;
- temp.id = 0;
- PRE_EXPR_NAME (&temp) = nary->op[i];
- temp.id = lookup_expression_id (&temp);
- if (temp.id == 0)
- return false;
- if (!union_contains_value (set1, set2,
- get_expr_value_id (&temp)))
- return false;
- }
- }
- /* If the NARY may trap make sure the block does not contain
- a possible exit point.
- ??? This is overly conservative if we translate AVAIL_OUT
- as the available expression might be after the exit point. */
- if (BB_MAY_NOTRETURN (block)
- && vn_nary_may_trap (nary))
- return false;
+ if (!op_valid_in_sets (set1, set2, nary->op[i]))
+ return false;
return true;
}
break;
@@ -2120,7 +2046,9 @@ valid_in_sets (bitmap_set_t set1, bitmap_set_t set2, pre_expr expr,
FOR_EACH_VEC_ELT (vn_reference_op_s, ref->operands, i, vro)
{
- if (!vro_valid_in_sets (set1, set2, vro))
+ if (!op_valid_in_sets (set1, set2, vro->op0)
+ || !op_valid_in_sets (set1, set2, vro->op1)
+ || !op_valid_in_sets (set1, set2, vro->op2))
return false;
}
return true;
@@ -2171,35 +2099,44 @@ clean (bitmap_set_t set, basic_block block)
}
/* Clean the set of expressions that are no longer valid in SET because
- they are clobbered in BLOCK. */
+ they are clobbered in BLOCK or because they trap and may not be executed. */
static void
prune_clobbered_mems (bitmap_set_t set, basic_block block)
{
- VEC (pre_expr, heap) *exprs = sorted_array_from_bitmap_set (set);
- pre_expr expr;
- int i;
+ bitmap_iterator bi;
+ unsigned i;
- FOR_EACH_VEC_ELT (pre_expr, exprs, i, expr)
+ FOR_EACH_EXPR_ID_IN_SET (set, i, bi)
{
- vn_reference_t ref;
- if (expr->kind != REFERENCE)
- continue;
-
- ref = PRE_EXPR_REFERENCE (expr);
- if (ref->vuse)
+ pre_expr expr = expression_for_id (i);
+ if (expr->kind == REFERENCE)
{
- gimple def_stmt = SSA_NAME_DEF_STMT (ref->vuse);
- if (!gimple_nop_p (def_stmt)
- && ((gimple_bb (def_stmt) != block
- && !dominated_by_p (CDI_DOMINATORS,
- block, gimple_bb (def_stmt)))
- || (gimple_bb (def_stmt) == block
- && value_dies_in_block_x (expr, block))))
+ vn_reference_t ref = PRE_EXPR_REFERENCE (expr);
+ if (ref->vuse)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (ref->vuse);
+ if (!gimple_nop_p (def_stmt)
+ && ((gimple_bb (def_stmt) != block
+ && !dominated_by_p (CDI_DOMINATORS,
+ block, gimple_bb (def_stmt)))
+ || (gimple_bb (def_stmt) == block
+ && value_dies_in_block_x (expr, block))))
+ bitmap_remove_from_set (set, expr);
+ }
+ }
+ else if (expr->kind == NARY)
+ {
+ vn_nary_op_t nary = PRE_EXPR_NARY (expr);
+ /* If the NARY may trap make sure the block does not contain
+ a possible exit point.
+ ??? This is overly conservative if we translate AVAIL_OUT
+ as the available expression might be after the exit point. */
+ if (BB_MAY_NOTRETURN (block)
+ && vn_nary_may_trap (nary))
bitmap_remove_from_set (set, expr);
}
}
- VEC_free (pre_expr, heap, exprs);
}
static sbitmap has_abnormal_preds;
@@ -3330,13 +3267,6 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
tree temp;
gimple phi;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Found partial redundancy for expression ");
- print_pre_expr (dump_file, expr);
- fprintf (dump_file, " (%04d)\n", val);
- }
-
/* Make sure we aren't creating an induction variable. */
if (block->loop_depth > 0 && EDGE_COUNT (block->preds) == 2)
{
@@ -3651,11 +3581,21 @@ do_regular_insertion (basic_block block, basic_block dom)
"optimized for speed edge\n", val);
}
}
- else if (dbg_cnt (treepre_insert)
- && insert_into_preds_of_block (block,
- get_expression_id (expr),
- avail))
- new_stuff = true;
+ else if (dbg_cnt (treepre_insert))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Found partial redundancy for "
+ "expression ");
+ print_pre_expr (dump_file, expr);
+ fprintf (dump_file, " (%04d)\n",
+ get_expr_value_id (expr));
+ }
+ if (insert_into_preds_of_block (block,
+ get_expression_id (expr),
+ avail))
+ new_stuff = true;
+ }
}
/* If all edges produce the same value and that value is
an invariant, then the PHI has the same value on all
@@ -3813,6 +3753,14 @@ do_partial_partial_insertion (basic_block block, basic_block dom)
else if (dbg_cnt (treepre_insert))
{
pre_stats.pa_insert++;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Found partial partial redundancy "
+ "for expression ");
+ print_pre_expr (dump_file, expr);
+ fprintf (dump_file, " (%04d)\n",
+ get_expr_value_id (expr));
+ }
if (insert_into_preds_of_block (block,
get_expression_id (expr),
avail))
@@ -3888,6 +3836,8 @@ insert (void)
while (new_stuff)
{
num_iterations++;
+ if (dump_file && dump_flags & TDF_DETAILS)
+ fprintf (dump_file, "Starting insert iteration %d\n", num_iterations);
new_stuff = insert_aux (ENTRY_BLOCK_PTR);
}
statistics_histogram_event (cfun, "insert iterations", num_iterations);
@@ -4137,6 +4087,13 @@ compute_avail (void)
if (TREE_CODE (nary->op[i]) == SSA_NAME)
add_to_exp_gen (block, nary->op[i]);
+ /* If the NARY traps and there was a preceeding
+ point in the block that might not return avoid
+ adding the nary to EXP_GEN. */
+ if (BB_MAY_NOTRETURN (block)
+ && vn_nary_may_trap (nary))
+ continue;
+
result = (pre_expr) pool_alloc (pre_expr_pool);
result->kind = NARY;
result->id = 0;
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index e9e6bfa7c70..ad9460b9411 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -1348,18 +1348,19 @@ vn_reference_lookup_2 (ao_ref *op ATTRIBUTE_UNUSED, tree vuse, void *vr_)
/* Lookup an existing or insert a new vn_reference entry into the
value table for the VUSE, SET, TYPE, OPERANDS reference which
- has the constant value CST. */
+ has the value VALUE which is either a constant or an SSA name. */
static vn_reference_t
-vn_reference_lookup_or_insert_constant_for_pieces (tree vuse,
- alias_set_type set,
- tree type,
- VEC (vn_reference_op_s,
- heap) *operands,
- tree cst)
+vn_reference_lookup_or_insert_for_pieces (tree vuse,
+ alias_set_type set,
+ tree type,
+ VEC (vn_reference_op_s,
+ heap) *operands,
+ tree value)
{
struct vn_reference_s vr1;
vn_reference_t result;
+ unsigned value_id;
vr1.vuse = vuse;
vr1.operands = operands;
vr1.type = type;
@@ -1367,10 +1368,13 @@ vn_reference_lookup_or_insert_constant_for_pieces (tree vuse,
vr1.hashcode = vn_reference_compute_hash (&vr1);
if (vn_reference_lookup_1 (&vr1, &result))
return result;
+ if (TREE_CODE (value) == SSA_NAME)
+ value_id = VN_INFO (value)->value_id;
+ else
+ value_id = get_or_alloc_constant_value_id (value);
return vn_reference_insert_pieces (vuse, set, type,
VEC_copy (vn_reference_op_s, heap,
- operands), cst,
- get_or_alloc_constant_value_id (cst));
+ operands), value, value_id);
}
/* Callback for walk_non_aliased_vuses. Tries to perform a lookup
@@ -1452,7 +1456,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
&& offset2 + size2 >= offset + maxsize)
{
tree val = build_zero_cst (vr->type);
- return vn_reference_lookup_or_insert_constant_for_pieces
+ return vn_reference_lookup_or_insert_for_pieces
(vuse, vr->set, vr->type, vr->operands, val);
}
}
@@ -1473,7 +1477,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
&& offset2 + size2 >= offset + maxsize)
{
tree val = build_zero_cst (vr->type);
- return vn_reference_lookup_or_insert_constant_for_pieces
+ return vn_reference_lookup_or_insert_for_pieces
(vuse, vr->set, vr->type, vr->operands, val);
}
}
@@ -1514,7 +1518,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
/ BITS_PER_UNIT),
ref->size / BITS_PER_UNIT);
if (val)
- return vn_reference_lookup_or_insert_constant_for_pieces
+ return vn_reference_lookup_or_insert_for_pieces
(vuse, vr->set, vr->type, vr->operands, val);
}
}
@@ -1568,7 +1572,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
}
}
if (val)
- return vn_reference_lookup_or_insert_constant_for_pieces
+ return vn_reference_lookup_or_insert_for_pieces
(vuse, vr->set, vr->type, vr->operands, val);
}
}
diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c
index 7c315509bd1..64455af9604 100644
--- a/gcc/tree-ssanames.c
+++ b/gcc/tree-ssanames.c
@@ -238,6 +238,62 @@ release_ssa_name (tree var)
}
}
+/* If the alignment of the pointer described by PI is known, return true and
+ store the alignment and the deviation from it into *ALIGNP and *MISALIGNP
+ respectively. Otherwise return false. */
+
+bool
+get_ptr_info_alignment (struct ptr_info_def *pi, unsigned int *alignp,
+ unsigned int *misalignp)
+{
+ if (pi->align)
+ {
+ *alignp = pi->align;
+ *misalignp = pi->misalign;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* State that the pointer described by PI has unknown alignment. */
+
+void
+mark_ptr_info_alignment_unknown (struct ptr_info_def *pi)
+{
+ pi->align = 0;
+ pi->misalign = 0;
+}
+
+/* Store the the power-of-two byte alignment and the deviation from that
+ alignment of pointer described by PI to ALIOGN and MISALIGN
+ respectively. */
+
+void
+set_ptr_info_alignment (struct ptr_info_def *pi, unsigned int align,
+ unsigned int misalign)
+{
+ gcc_checking_assert (align != 0);
+ gcc_assert ((align & (align - 1)) == 0);
+ gcc_assert ((misalign & ~(align - 1)) == 0);
+
+ pi->align = align;
+ pi->misalign = misalign;
+}
+
+/* If pointer decribed by PI has known alignment, increase its known
+ misalignment by INCREMENT modulo its current alignment. */
+
+void
+adjust_ptr_info_misalignment (struct ptr_info_def *pi,
+ unsigned int increment)
+{
+ if (pi->align != 0)
+ {
+ pi->misalign += increment;
+ pi->misalign &= (pi->align - 1);
+ }
+}
/* Return the alias information associated with pointer T. It creates a
new instance if none existed. */
@@ -254,8 +310,7 @@ get_ptr_info (tree t)
{
pi = ggc_alloc_cleared_ptr_info_def ();
pt_solution_reset (&pi->pt);
- pi->align = 1;
- pi->misalign = 0;
+ mark_ptr_info_alignment_unknown (pi);
SSA_NAME_PTR_INFO (t) = pi;
}
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 3d10750e4dc..4f3d1d30f20 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -800,6 +800,14 @@ gen_inbound_check (gimple swtch, struct switch_conv_info *info)
location_t loc = gimple_location (swtch);
gcc_assert (info->default_values);
+
+ /* Make no effort to update the post-dominator tree. It is actually not
+ that hard for the transformations we have performed, but it is not
+ supported by iterate_fix_dominators.
+ Freeing post-dominance info is dome early to avoid pointless work in
+ create_basic_block, which is called when we split SWITCH_BB. */
+ free_dominance_info (CDI_POST_DOMINATORS);
+
bb0 = gimple_bb (swtch);
tidx = gimple_assign_lhs (info->arr_ref_first);
@@ -866,13 +874,32 @@ gen_inbound_check (gimple swtch, struct switch_conv_info *info)
bb2->frequency = EDGE_FREQUENCY (e02);
bbf->frequency = EDGE_FREQUENCY (e1f) + EDGE_FREQUENCY (e2f);
- prune_bbs (bbd, info->final_bb); /* To keep calc_dfs_tree() in dominance.c
- happy. */
+ /* Tidy blocks that have become unreachable. */
+ prune_bbs (bbd, info->final_bb);
+ /* Fixup the PHI nodes in bbF. */
fix_phi_nodes (e1f, e2f, bbf, info);
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
+ /* Fix the dominator tree, if it is available. */
+ if (dom_info_available_p (CDI_DOMINATORS))
+ {
+ VEC (basic_block, heap) *bbs_to_fix_dom;
+
+ set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
+ set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
+ if (! get_immediate_dominator(CDI_DOMINATORS, bbf))
+ /* If bbD was the immediate dominator ... */
+ set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
+
+ bbs_to_fix_dom = VEC_alloc (basic_block, heap, 4);
+ VEC_quick_push (basic_block, bbs_to_fix_dom, bb0);
+ VEC_quick_push (basic_block, bbs_to_fix_dom, bb1);
+ VEC_quick_push (basic_block, bbs_to_fix_dom, bb2);
+ VEC_quick_push (basic_block, bbs_to_fix_dom, bbf);
+
+ iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
+ VEC_free (basic_block, heap, bbs_to_fix_dom);
+ }
}
/* The following function is invoked on every switch statement (the current one
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 37df7ab32e9..715e3ffde61 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -1507,6 +1507,17 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
+ /* FORNOW: Any strided load prevents peeling. The induction
+ variable analysis will fail when the prologue loop is generated,
+ and so we can't generate the new base for the pointer. */
+ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "strided load prevents peeling");
+ do_peeling = false;
+ break;
+ }
+
/* For invariant accesses there is nothing to enhance. */
if (integer_zerop (DR_STEP (dr)))
continue;
@@ -3397,10 +3408,7 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
{
duplicate_ssa_name_ptr_info (vec_stmt, DR_PTR_INFO (dr));
if (offset)
- {
- SSA_NAME_PTR_INFO (vec_stmt)->align = 1;
- SSA_NAME_PTR_INFO (vec_stmt)->misalign = 0;
- }
+ mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (vec_stmt));
}
if (vect_print_dump_info (REPORT_DETAILS))
@@ -3799,8 +3807,7 @@ bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
if (DR_PTR_INFO (dr))
{
duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
- SSA_NAME_PTR_INFO (new_dataref_ptr)->align = 1;
- SSA_NAME_PTR_INFO (new_dataref_ptr)->misalign = 0;
+ mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
}
if (!ptr_incr)
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index 63885f9578a..5327e98a240 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -1853,34 +1853,6 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
}
}
-/* Return the more conservative threshold between the
- min_profitable_iters returned by the cost model and the user
- specified threshold, if provided. */
-
-static unsigned int
-conservative_cost_threshold (loop_vec_info loop_vinfo,
- int min_profitable_iters)
-{
- unsigned int th;
- int min_scalar_loop_bound;
-
- min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
- * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
-
- /* Use the cost model only if it is more conservative than user specified
- threshold. */
- th = (unsigned) min_scalar_loop_bound;
- if (min_profitable_iters
- && (!min_scalar_loop_bound
- || min_profitable_iters > min_scalar_loop_bound))
- th = (unsigned) min_profitable_iters;
-
- if (th && vect_print_dump_info (REPORT_COST))
- fprintf (vect_dump, "Profitability threshold is %u loop iterations.", th);
-
- return th;
-}
-
/* Function vect_do_peeling_for_loop_bound
Peel the last iterations of the loop represented by LOOP_VINFO.
@@ -1896,7 +1868,7 @@ conservative_cost_threshold (loop_vec_info loop_vinfo,
void
vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
- tree cond_expr, gimple_seq cond_expr_stmt_list)
+ unsigned int th, bool check_profitability)
{
tree ni_name, ratio_mult_vf_name;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -1904,10 +1876,9 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
edge update_e;
basic_block preheader;
int loop_num;
- bool check_profitability = false;
- unsigned int th = 0;
- int min_profitable_iters;
int max_iter;
+ tree cond_expr = NULL_TREE;
+ gimple_seq cond_expr_stmt_list = NULL;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
@@ -1925,22 +1896,6 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
loop_num = loop->num;
- /* If cost model check not done during versioning and
- peeling for alignment. */
- if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
- && !LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)
- && !LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo)
- && !cond_expr)
- {
- check_profitability = true;
-
- /* Get profitability threshold for vectorized loop. */
- min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
-
- th = conservative_cost_threshold (loop_vinfo,
- min_profitable_iters);
- }
-
new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
&ratio_mult_vf_name, ni_name, false,
th, check_profitability,
@@ -1967,7 +1922,9 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
by ratio_mult_vf_name steps. */
vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
- max_iter = MAX (LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1, (int) th);
+ max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
+ if (check_profitability)
+ max_iter = MAX (max_iter, (int) th);
record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Setting upper bound of nb iterations for epilogue "
@@ -2158,15 +2115,14 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
void
-vect_do_peeling_for_alignment (loop_vec_info loop_vinfo)
+vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
+ unsigned int th, bool check_profitability)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree niters_of_prolog_loop, ni_name;
tree n_iters;
tree wide_prolog_niters;
struct loop *new_loop;
- unsigned int th = 0;
- int min_profitable_iters;
int max_iter;
if (vect_print_dump_info (REPORT_DETAILS))
@@ -2178,22 +2134,19 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo)
niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo,
ni_name);
- /* Get profitability threshold for vectorized loop. */
- min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
- th = conservative_cost_threshold (loop_vinfo,
- min_profitable_iters);
-
/* Peel the prolog loop and iterate it niters_of_prolog_loop. */
new_loop =
slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
&niters_of_prolog_loop, ni_name, true,
- th, true, NULL_TREE, NULL);
+ th, check_profitability, NULL_TREE, NULL);
gcc_assert (new_loop);
#ifdef ENABLE_CHECKING
slpeel_verify_cfg_after_peeling (new_loop, loop);
#endif
- max_iter = MAX (LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1, (int) th);
+ max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
+ if (check_profitability)
+ max_iter = MAX (max_iter, (int) th);
record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Setting upper bound of nb iterations for prologue "
@@ -2547,7 +2500,8 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
*COND_EXPR_STMT_LIST. */
void
-vect_loop_versioning (loop_vec_info loop_vinfo)
+vect_loop_versioning (loop_vec_info loop_vinfo,
+ unsigned int th, bool check_profitability)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block condition_bb;
@@ -2556,25 +2510,20 @@ vect_loop_versioning (loop_vec_info loop_vinfo)
basic_block new_exit_bb;
edge new_exit_e, e;
gimple orig_phi, new_phi;
- tree cond_expr;
+ tree cond_expr = NULL_TREE;
gimple_seq cond_expr_stmt_list = NULL;
tree arg;
unsigned prob = 4 * REG_BR_PROB_BASE / 5;
gimple_seq gimplify_stmt_list = NULL;
tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
- int min_profitable_iters = 0;
- unsigned int th;
- /* Get profitability threshold for vectorized loop. */
- min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
-
- th = conservative_cost_threshold (loop_vinfo,
- min_profitable_iters);
-
- cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
- build_int_cst (TREE_TYPE (scalar_loop_iters), th));
- cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
- is_gimple_condexpr, NULL_TREE);
+ if (check_profitability)
+ {
+ cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
+ build_int_cst (TREE_TYPE (scalar_loop_iters), th));
+ cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
+ is_gimple_condexpr, NULL_TREE);
+ }
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index fa38c524f09..b2ee97a1c22 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -183,7 +183,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
HOST_WIDE_INT dummy;
gimple stmt, pattern_stmt = NULL;
gimple_seq pattern_def_seq = NULL;
- gimple_stmt_iterator pattern_def_si = gsi_start (NULL);
+ gimple_stmt_iterator pattern_def_si = gsi_none ();
bool analyze_pattern_stmt = false;
if (vect_print_dump_info (REPORT_DETAILS))
@@ -336,7 +336,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
}
else
{
- pattern_def_si = gsi_start (NULL);
+ pattern_def_si = gsi_none ();
analyze_pattern_stmt = false;
}
}
@@ -5227,25 +5227,48 @@ vect_transform_loop (loop_vec_info loop_vinfo)
bool grouped_store;
bool slp_scheduled = false;
unsigned int nunits;
- tree cond_expr = NULL_TREE;
- gimple_seq cond_expr_stmt_list = NULL;
gimple stmt, pattern_stmt;
gimple_seq pattern_def_seq = NULL;
- gimple_stmt_iterator pattern_def_si = gsi_start (NULL);
+ gimple_stmt_iterator pattern_def_si = gsi_none ();
bool transform_pattern_stmt = false;
+ bool check_profitability;
+ int th;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vec_transform_loop ===");
+ /* Use the more conservative vectorization threshold. If the number
+ of iterations is constant assume the cost check has been performed
+ by our caller. If the threshold makes all loops profitable that
+ run at least the vectorization factor number of times checking
+ is pointless, too. */
+ th = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
+ * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
+ th = MAX (th, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo));
+ if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
+ && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ {
+ if (vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump,
+ "Profitability threshold is %d loop iterations.", th);
+ check_profitability = true;
+ }
+
/* Peel the loop if there are data refs with unknown alignment.
Only one data ref with unknown store is allowed. */
if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
- vect_do_peeling_for_alignment (loop_vinfo);
+ {
+ vect_do_peeling_for_alignment (loop_vinfo, th, check_profitability);
+ check_profitability = false;
+ }
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
- vect_loop_versioning (loop_vinfo);
+ {
+ vect_loop_versioning (loop_vinfo, th, check_profitability);
+ check_profitability = false;
+ }
/* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
compile time constant), or it is a constant that doesn't divide by the
@@ -5260,7 +5283,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
|| LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
- cond_expr, cond_expr_stmt_list);
+ th, check_profitability);
else
ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
@@ -5409,7 +5432,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
}
else
{
- pattern_def_si = gsi_start (NULL);
+ pattern_def_si = gsi_none ();
transform_pattern_stmt = false;
}
}
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 79357f51f5f..b4fadf8b69e 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -63,8 +63,8 @@ static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
vect_recog_widen_sum_pattern,
vect_recog_dot_prod_pattern,
vect_recog_pow_pattern,
- vect_recog_over_widening_pattern,
vect_recog_widen_shift_pattern,
+ vect_recog_over_widening_pattern,
vect_recog_vector_vector_shift_pattern,
vect_recog_sdivmod_pow2_pattern,
vect_recog_mixed_size_cond_pattern,
@@ -84,6 +84,60 @@ new_pattern_def_seq (stmt_vec_info stmt_info, gimple stmt)
append_pattern_def_seq (stmt_info, stmt);
}
+/* Check whether STMT2 is in the same loop or basic block as STMT1.
+ Which of the two applies depends on whether we're currently doing
+ loop-based or basic-block-based vectorization, as determined by
+ the vinfo_for_stmt for STMT1 (which must be defined).
+
+ If this returns true, vinfo_for_stmt for STMT2 is guaranteed
+ to be defined as well. */
+
+static bool
+vect_same_loop_or_bb_p (gimple stmt1, gimple stmt2)
+{
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt1);
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+ bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
+
+ if (!gimple_bb (stmt2))
+ return false;
+
+ if (loop_vinfo)
+ {
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt2)))
+ return false;
+ }
+ else
+ {
+ if (gimple_bb (stmt2) != BB_VINFO_BB (bb_vinfo)
+ || gimple_code (stmt2) == GIMPLE_PHI)
+ return false;
+ }
+
+ gcc_assert (vinfo_for_stmt (stmt2));
+ return true;
+}
+
+/* If the LHS of DEF_STMT has a single use, and that statement is
+ in the same loop or basic block, return it. */
+
+static gimple
+vect_single_imm_use (gimple def_stmt)
+{
+ tree lhs = gimple_assign_lhs (def_stmt);
+ use_operand_p use_p;
+ gimple use_stmt;
+
+ if (!single_imm_use (lhs, &use_p, &use_stmt))
+ return NULL;
+
+ if (!vect_same_loop_or_bb_p (def_stmt, use_stmt))
+ return NULL;
+
+ return use_stmt;
+}
+
/* Check whether NAME, an ssa-name used in USE_STMT,
is a result of a type promotion or demotion, such that:
DEF_STMT: NAME = NOP (name0)
@@ -400,16 +454,6 @@ vect_handle_widen_op_by_const (gimple stmt, enum tree_code code,
{
tree new_type, new_oprnd, tmp;
gimple new_stmt;
- loop_vec_info loop_vinfo;
- struct loop *loop = NULL;
- bb_vec_info bb_vinfo;
- stmt_vec_info stmt_vinfo;
-
- stmt_vinfo = vinfo_for_stmt (stmt);
- loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
- if (loop_vinfo)
- loop = LOOP_VINFO_LOOP (loop_vinfo);
if (code != MULT_EXPR && code != LSHIFT_EXPR)
return false;
@@ -425,12 +469,10 @@ vect_handle_widen_op_by_const (gimple stmt, enum tree_code code,
return true;
}
- if (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 4)
- || !gimple_bb (def_stmt)
- || (loop && !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
- || (!loop && gimple_bb (def_stmt) != BB_VINFO_BB (bb_vinfo)
- && gimple_code (def_stmt) != GIMPLE_PHI)
- || !vinfo_for_stmt (def_stmt))
+ if (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 4))
+ return false;
+
+ if (!vect_same_loop_or_bb_p (stmt, def_stmt))
return false;
/* TYPE is 4 times bigger than HALF_TYPE, try widening operation for
@@ -564,16 +606,6 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
VEC (tree, heap) *dummy_vec;
bool op1_ok;
bool promotion;
- loop_vec_info loop_vinfo;
- struct loop *loop = NULL;
- bb_vec_info bb_vinfo;
- stmt_vec_info stmt_vinfo;
-
- stmt_vinfo = vinfo_for_stmt (last_stmt);
- loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
- if (loop_vinfo)
- loop = LOOP_VINFO_LOOP (loop_vinfo);
if (!is_gimple_assign (last_stmt))
return NULL;
@@ -623,33 +655,18 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
Use unsigned TYPE as the type for WIDEN_MULT_EXPR. */
if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type0))
{
- tree lhs = gimple_assign_lhs (last_stmt), use_lhs;
- imm_use_iterator imm_iter;
- use_operand_p use_p;
- int nuses = 0;
- gimple use_stmt = NULL;
+ gimple use_stmt;
+ tree use_lhs;
tree use_type;
if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (half_type1))
return NULL;
- FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
- {
- if (is_gimple_debug (USE_STMT (use_p)))
- continue;
- use_stmt = USE_STMT (use_p);
- nuses++;
- }
-
- if (nuses != 1 || !is_gimple_assign (use_stmt)
- || gimple_assign_rhs_code (use_stmt) != NOP_EXPR)
+ use_stmt = vect_single_imm_use (last_stmt);
+ if (!use_stmt || !is_gimple_assign (use_stmt)
+ || gimple_assign_rhs_code (use_stmt) != NOP_EXPR)
return NULL;
- if (!gimple_bb (use_stmt)
- || (loop && !flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
- || (!loop && gimple_bb (use_stmt) != BB_VINFO_BB (bb_vinfo)))
- return NULL;
-
use_lhs = gimple_assign_lhs (use_stmt);
use_type = TREE_TYPE (use_lhs);
if (!INTEGRAL_TYPE_P (use_type)
@@ -952,14 +969,8 @@ vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type,
tree interm_type = NULL_TREE, half_type, tmp, new_oprnd, type;
gimple def_stmt, new_stmt;
bool first = false;
- loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt));
- bb_vec_info bb_info = STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt));
- struct loop *loop = NULL;
bool promotion;
- if (loop_info)
- loop = LOOP_VINFO_LOOP (loop_info);
-
*op0 = NULL_TREE;
*op1 = NULL_TREE;
*new_def_stmt = NULL;
@@ -991,13 +1002,9 @@ vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type,
{
first = true;
if (!type_conversion_p (oprnd, stmt, false, &half_type, &def_stmt,
- &promotion)
- || !promotion
- || !gimple_bb (def_stmt)
- || (loop && !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
- || (!loop && gimple_bb (def_stmt) != BB_VINFO_BB (bb_info)
- && gimple_code (def_stmt) != GIMPLE_PHI)
- || !vinfo_for_stmt (def_stmt))
+ &promotion)
+ || !promotion
+ || !vect_same_loop_or_bb_p (stmt, def_stmt))
return false;
}
@@ -1164,23 +1171,10 @@ vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
{
gimple stmt = VEC_pop (gimple, *stmts);
gimple pattern_stmt = NULL, new_def_stmt, prev_stmt = NULL, use_stmt = NULL;
- tree op0, op1, vectype = NULL_TREE, lhs, use_lhs, use_type;
- imm_use_iterator imm_iter;
- use_operand_p use_p;
- int nuses = 0;
+ tree op0, op1, vectype = NULL_TREE, use_lhs, use_type;
tree var = NULL_TREE, new_type = NULL_TREE, tmp, new_oprnd;
bool first;
tree type = NULL;
- loop_vec_info loop_vinfo;
- struct loop *loop = NULL;
- bb_vec_info bb_vinfo;
- stmt_vec_info stmt_vinfo;
-
- stmt_vinfo = vinfo_for_stmt (stmt);
- loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
- if (loop_vinfo)
- loop = LOOP_VINFO_LOOP (loop_vinfo);
first = true;
while (1)
@@ -1201,20 +1195,8 @@ vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
}
/* STMT can be performed on a smaller type. Check its uses. */
- lhs = gimple_assign_lhs (stmt);
- nuses = 0;
- FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
- {
- if (is_gimple_debug (USE_STMT (use_p)))
- continue;
- use_stmt = USE_STMT (use_p);
- nuses++;
- }
-
- if (nuses != 1 || !is_gimple_assign (use_stmt)
- || !gimple_bb (use_stmt)
- || (loop && !flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
- || (!loop && gimple_bb (use_stmt) != BB_VINFO_BB (bb_vinfo)))
+ use_stmt = vect_single_imm_use (stmt);
+ if (!use_stmt || !is_gimple_assign (use_stmt))
return NULL;
/* Create pattern statement for STMT. */
@@ -1327,16 +1309,20 @@ vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
where type 'TYPE' is at least double the size of type 'type'.
- Also detect unsigned cases:
+ Also detect cases where the shift result is immediately converted
+ to another type 'result_type' that is no larger in size than 'TYPE'.
+ In those cases we perform a widen-shift that directly results in
+ 'result_type', to avoid a possible over-widening situation:
- unsigned type a_t;
- unsigned TYPE u_res_T;
+ type a_t;
TYPE a_T, res_T;
+ result_type res_result;
S1 a_t = ;
S2 a_T = (TYPE) a_t;
S3 res_T = a_T << CONST;
- S4 u_res_T = (unsigned TYPE) res_T;
+ S4 res_result = (result_type) res_T;
+ '--> res_result' = a_t w<< CONST;
And a case when 'TYPE' is 4 times bigger than 'type'. In that case we
create an additional pattern stmt for S2 to create a variable of an
@@ -1377,60 +1363,21 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
gimple def_stmt0;
tree oprnd0, oprnd1;
tree type, half_type0;
- gimple pattern_stmt, orig_stmt = NULL;
+ gimple pattern_stmt;
tree vectype, vectype_out = NULL_TREE;
tree dummy;
tree var;
enum tree_code dummy_code;
int dummy_int;
VEC (tree, heap) * dummy_vec;
- gimple use_stmt = NULL;
- bool over_widen = false;
+ gimple use_stmt;
bool promotion;
if (!is_gimple_assign (last_stmt) || !vinfo_for_stmt (last_stmt))
return NULL;
- orig_stmt = last_stmt;
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (last_stmt)))
- {
- /* This statement was also detected as over-widening operation (it can't
- be any other pattern, because only over-widening detects shifts).
- LAST_STMT is the final type demotion statement, but its related
- statement is shift. We analyze the related statement to catch cases:
-
- orig code:
- type a_t;
- itype res;
- TYPE a_T, res_T;
-
- S1 a_T = (TYPE) a_t;
- S2 res_T = a_T << CONST;
- S3 res = (itype)res_T;
-
- (size of type * 2 <= size of itype
- and size of itype * 2 <= size of TYPE)
-
- code after over-widening pattern detection:
-
- S1 a_T = (TYPE) a_t;
- --> a_it = (itype) a_t;
- S2 res_T = a_T << CONST;
- S3 res = (itype)res_T; <--- LAST_STMT
- --> res = a_it << CONST;
-
- after widen_shift:
-
- S1 a_T = (TYPE) a_t;
- --> a_it = (itype) a_t; - redundant
- S2 res_T = a_T << CONST;
- S3 res = (itype)res_T;
- --> res = a_t w<< CONST;
-
- i.e., we replace the three statements with res = a_t w<< CONST. */
- last_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_stmt));
- over_widen = true;
- }
+ return NULL;
if (gimple_assign_rhs_code (last_stmt) != LSHIFT_EXPR)
return NULL;
@@ -1454,59 +1401,29 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
oprnd0 = gimple_assign_rhs1 (def_stmt0);
type = gimple_expr_type (last_stmt);
+ /* Check for subsequent conversion to another type. */
+ use_stmt = vect_single_imm_use (last_stmt);
+ if (use_stmt && is_gimple_assign (use_stmt)
+ && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt))
+ && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
+ {
+ tree use_lhs = gimple_assign_lhs (use_stmt);
+ tree use_type = TREE_TYPE (use_lhs);
+
+ if (INTEGRAL_TYPE_P (use_type)
+ && TYPE_PRECISION (use_type) <= TYPE_PRECISION (type))
+ {
+ last_stmt = use_stmt;
+ type = use_type;
+ }
+ }
+
/* Check if this a widening operation. */
if (!vect_handle_widen_op_by_const (last_stmt, LSHIFT_EXPR, oprnd1,
&oprnd0, stmts,
type, &half_type0, def_stmt0))
return NULL;
- /* Handle unsigned case. Look for
- S4 u_res_T = (unsigned TYPE) res_T;
- Use unsigned TYPE as the type for WIDEN_LSHIFT_EXPR. */
- if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type0))
- {
- tree lhs = gimple_assign_lhs (last_stmt), use_lhs;
- imm_use_iterator imm_iter;
- use_operand_p use_p;
- int nuses = 0;
- tree use_type;
-
- if (over_widen)
- {
- /* In case of over-widening pattern, S4 should be ORIG_STMT itself.
- We check here that TYPE is the correct type for the operation,
- i.e., it's the type of the original result. */
- tree orig_type = gimple_expr_type (orig_stmt);
- if ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (orig_type))
- || (TYPE_PRECISION (type) != TYPE_PRECISION (orig_type)))
- return NULL;
- }
- else
- {
- FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
- {
- if (is_gimple_debug (USE_STMT (use_p)))
- continue;
- use_stmt = USE_STMT (use_p);
- nuses++;
- }
-
- if (nuses != 1 || !is_gimple_assign (use_stmt)
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt)))
- return NULL;
-
- use_lhs = gimple_assign_lhs (use_stmt);
- use_type = TREE_TYPE (use_lhs);
-
- if (!INTEGRAL_TYPE_P (use_type)
- || (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (use_type))
- || (TYPE_PRECISION (type) != TYPE_PRECISION (use_type)))
- return NULL;
-
- type = use_type;
- }
- }
-
/* Pattern detected. */
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "vect_recog_widen_shift_pattern: detected: ");
@@ -1535,11 +1452,6 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
if (vect_print_dump_info (REPORT_DETAILS))
print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
- if (use_stmt)
- last_stmt = use_stmt;
- else
- last_stmt = orig_stmt;
-
VEC_safe_push (gimple, heap, *stmts, last_stmt);
return pattern_stmt;
}
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 5e6f71a19bf..a0368d83e94 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -106,15 +106,12 @@ write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
static tree
create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
{
- struct ptr_info_def *pi;
tree mem_ref, alias_ptr_type;
alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
/* Arrays have the same alignment as their type. */
- pi = get_ptr_info (ptr);
- pi->align = TYPE_ALIGN_UNIT (type);
- pi->misalign = 0;
+ set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
return mem_ref;
}
@@ -4029,7 +4026,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
next_stmt = first_stmt;
for (i = 0; i < vec_num; i++)
{
- struct ptr_info_def *pi;
+ unsigned align, misalign;
if (i > 0)
/* Bump the vector pointer. */
@@ -4046,25 +4043,26 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
+ align = TYPE_ALIGN_UNIT (vectype);
if (aligned_access_p (first_dr))
- pi->misalign = 0;
+ misalign = 0;
else if (DR_MISALIGNMENT (first_dr) == -1)
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->align = TYPE_ALIGN_UNIT (elem_type);
- pi->misalign = 0;
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ misalign = DR_MISALIGNMENT (first_dr);
}
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
+ misalign);
/* Arguments are ready. Create the new vector stmt. */
new_stmt = gimple_build_assign (data_ref, vec_oprnd);
@@ -4860,33 +4858,35 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
case dr_aligned:
case dr_unaligned_supported:
{
- struct ptr_info_def *pi;
+ unsigned int align, misalign;
+
data_ref
= build2 (MEM_REF, vectype, dataref_ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
+ align = TYPE_ALIGN_UNIT (vectype);
if (alignment_support_scheme == dr_aligned)
{
gcc_assert (aligned_access_p (first_dr));
- pi->misalign = 0;
+ misalign = 0;
}
else if (DR_MISALIGNMENT (first_dr) == -1)
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->align = TYPE_ALIGN_UNIT (elem_type);
- pi->misalign = 0;
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ misalign = DR_MISALIGNMENT (first_dr);
}
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr),
+ align, misalign);
break;
}
case dr_explicit_realign:
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 095af937358..e8ca7cb7fcb 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -807,10 +807,10 @@ extern LOC vect_loop_location;
in tree-vect-loop-manip.c. */
extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
-extern void vect_loop_versioning (loop_vec_info);
+extern void vect_loop_versioning (loop_vec_info, unsigned int, bool);
extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree *,
- tree, gimple_seq);
-extern void vect_do_peeling_for_alignment (loop_vec_info);
+ unsigned int, bool);
+extern void vect_do_peeling_for_alignment (loop_vec_info, unsigned int, bool);
extern LOC find_loop_location (struct loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 90660967c27..72c647fca4a 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -695,17 +695,22 @@ get_value_range (const_tree var)
/* If VAR is a default definition of a parameter, the variable can
take any value in VAR's type. */
sym = SSA_NAME_VAR (var);
- if (SSA_NAME_IS_DEFAULT_DEF (var)
- && TREE_CODE (sym) == PARM_DECL)
- {
- /* Try to use the "nonnull" attribute to create ~[0, 0]
- anti-ranges for pointers. Note that this is only valid with
- default definitions of PARM_DECLs. */
- if (POINTER_TYPE_P (TREE_TYPE (sym))
- && nonnull_arg_p (sym))
+ if (SSA_NAME_IS_DEFAULT_DEF (var))
+ {
+ if (TREE_CODE (sym) == PARM_DECL)
+ {
+ /* Try to use the "nonnull" attribute to create ~[0, 0]
+ anti-ranges for pointers. Note that this is only valid with
+ default definitions of PARM_DECLs. */
+ if (POINTER_TYPE_P (TREE_TYPE (sym))
+ && nonnull_arg_p (sym))
+ set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+ else
+ set_value_range_to_varying (vr);
+ }
+ else if (TREE_CODE (sym) == RESULT_DECL
+ && DECL_BY_REFERENCE (sym))
set_value_range_to_nonnull (vr, TREE_TYPE (sym));
- else
- set_value_range_to_varying (vr);
}
return vr;
diff --git a/gcc/tree.c b/gcc/tree.c
index 4623d3765e4..1ee3f1db51e 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1062,10 +1062,7 @@ build_int_cst_type (tree type, HOST_WIDE_INT low)
tree
double_int_to_tree (tree type, double_int cst)
{
- /* Size types *are* sign extended. */
- bool sign_extended_type = (!TYPE_UNSIGNED (type)
- || (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)));
+ bool sign_extended_type = !TYPE_UNSIGNED (type);
cst = double_int_ext (cst, TYPE_PRECISION (type), !sign_extended_type);
@@ -1079,9 +1076,7 @@ bool
double_int_fits_to_tree_p (const_tree type, double_int cst)
{
/* Size types *are* sign extended. */
- bool sign_extended_type = (!TYPE_UNSIGNED (type)
- || (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)));
+ bool sign_extended_type = !TYPE_UNSIGNED (type);
double_int ext
= double_int_ext (cst, TYPE_PRECISION (type), !sign_extended_type);
@@ -1111,9 +1106,7 @@ force_fit_type_double (tree type, double_int cst, int overflowable,
bool sign_extended_type;
/* Size types *are* sign extended. */
- sign_extended_type = (!TYPE_UNSIGNED (type)
- || (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)));
+ sign_extended_type = !TYPE_UNSIGNED (type);
/* If we need to set overflow flags, return a new unshared node. */
if (overflowed || !double_int_fits_to_tree_p(type, cst))
@@ -6553,9 +6546,7 @@ host_integerp (const_tree t, int pos)
&& (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0)
|| (! pos && TREE_INT_CST_HIGH (t) == -1
&& (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0
- && (!TYPE_UNSIGNED (TREE_TYPE (t))
- || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (TREE_TYPE (t)))))
+ && !TYPE_UNSIGNED (TREE_TYPE (t)))
|| (pos && TREE_INT_CST_HIGH (t) == 0)));
}
@@ -6850,6 +6841,20 @@ compare_tree_int (const_tree t, unsigned HOST_WIDE_INT u)
return 1;
}
+/* Return true if SIZE represents a constant size that is in bounds of
+ what the middle-end and the backend accepts (covering not more than
+ half of the address-space). */
+
+bool
+valid_constant_size_p (const_tree size)
+{
+ if (! host_integerp (size, 1)
+ || TREE_OVERFLOW (size)
+ || tree_int_cst_sign_bit (size) != 0)
+ return false;
+ return true;
+}
+
/* Return true if CODE represents an associative tree code. Otherwise
return false. */
bool
@@ -8276,18 +8281,6 @@ int_fits_type_p (const_tree c, const_tree type)
dc = tree_to_double_int (c);
unsc = TYPE_UNSIGNED (TREE_TYPE (c));
- if (TREE_CODE (TREE_TYPE (c)) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (TREE_TYPE (c))
- && unsc)
- /* So c is an unsigned integer whose type is sizetype and type is not.
- sizetype'd integers are sign extended even though they are
- unsigned. If the integer value fits in the lower end word of c,
- and if the higher end word has all its bits set to 1, that
- means the higher end bits are set to 1 only for sign extension.
- So let's convert c into an equivalent zero extended unsigned
- integer. */
- dc = double_int_zext (dc, TYPE_PRECISION (TREE_TYPE (c)));
-
retry:
type_low_bound = TYPE_MIN_VALUE (type);
type_high_bound = TYPE_MAX_VALUE (type);
@@ -8306,10 +8299,6 @@ retry:
if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST)
{
dd = tree_to_double_int (type_low_bound);
- if (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)
- && TYPE_UNSIGNED (type))
- dd = double_int_zext (dd, TYPE_PRECISION (type));
if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound)))
{
int c_neg = (!unsc && double_int_negative_p (dc));
@@ -8331,10 +8320,6 @@ retry:
if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST)
{
dd = tree_to_double_int (type_high_bound);
- if (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)
- && TYPE_UNSIGNED (type))
- dd = double_int_zext (dd, TYPE_PRECISION (type));
if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound)))
{
int c_neg = (!unsc && double_int_negative_p (dc));
diff --git a/gcc/tree.h b/gcc/tree.h
index e719be2ed40..419d8f4e638 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -2251,17 +2251,6 @@ extern enum machine_mode vector_type_mode (const_tree);
#define TYPE_NO_FORCE_BLK(NODE) \
(TYPE_CHECK (NODE)->type_common.no_force_blk_flag)
-/* In an INTEGER_TYPE, it means the type represents a size. We use
- this both for validity checking and to permit optimizations that
- are unsafe for other types. Note that the C `size_t' type should
- *not* have this flag set. The `size_t' type is simply a typedef
- for an ordinary integer type that happens to be the type of an
- expression returned by `sizeof'; `size_t' has no special
- properties. Expressions whose type have TYPE_IS_SIZETYPE set are
- always actual sizes. */
-#define TYPE_IS_SIZETYPE(NODE) \
- (INTEGER_TYPE_CHECK (NODE)->type_common.no_force_blk_flag)
-
/* Nonzero in a type considered volatile as a whole. */
#define TYPE_VOLATILE(NODE) (TYPE_CHECK (NODE)->base.volatile_flag)
@@ -4451,6 +4440,7 @@ extern bool tree_expr_nonnegative_warnv_p (tree, bool *);
extern bool may_negate_without_overflow_p (const_tree);
extern tree strip_array_types (tree);
extern tree excess_precision_type (tree);
+extern bool valid_constant_size_p (const_tree);
/* Construct various nodes representing fract or accum data types. */
@@ -5464,10 +5454,12 @@ extern tree build_string_literal (int, const char *);
extern bool validate_arglist (const_tree, ...);
extern rtx builtin_memset_read_str (void *, HOST_WIDE_INT, enum machine_mode);
extern bool is_builtin_fn (tree);
-extern unsigned int get_object_alignment_1 (tree, unsigned HOST_WIDE_INT *);
+extern bool get_object_alignment_1 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *);
extern unsigned int get_object_alignment (tree);
extern unsigned int get_object_or_type_alignment (tree);
-extern unsigned int get_pointer_alignment_1 (tree, unsigned HOST_WIDE_INT *);
+extern bool get_pointer_alignment_1 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *);
extern unsigned int get_pointer_alignment (tree);
extern tree fold_call_stmt (gimple, bool);
extern tree gimple_fold_builtin_snprintf_chk (gimple, tree, enum built_in_function);
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index c3fe428ab90..69e6847d022 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -811,7 +811,7 @@ static HOST_WIDE_INT cfa_base_offset;
static inline rtx
compute_cfa_pointer (HOST_WIDE_INT adjustment)
{
- return plus_constant (cfa_base_rtx, adjustment + cfa_base_offset);
+ return plus_constant (Pmode, cfa_base_rtx, adjustment + cfa_base_offset);
}
/* Adjustment for hard_frame_pointer_rtx to cfa base reg,
@@ -4909,17 +4909,6 @@ find_use_val (rtx x, enum machine_mode mode, struct count_use_info *cui)
return NULL;
}
-/* Helper function to get mode of MEM's address. */
-
-static inline enum machine_mode
-get_address_mode (rtx mem)
-{
- enum machine_mode mode = GET_MODE (XEXP (mem, 0));
- if (mode != VOIDmode)
- return mode;
- return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
-}
-
/* Replace all registers and addresses in an expression with VALUE
expressions that map back to them, unless the expression is a
register. If no mapping is or can be performed, returns NULL. */
@@ -5982,7 +5971,8 @@ prepare_call_arguments (basic_block bb, rtx insn)
HOST_WIDE_INT token
= tree_low_cst (OBJ_TYPE_REF_TOKEN (obj_type_ref), 0);
if (token)
- clobbered = plus_constant (clobbered, token * GET_MODE_SIZE (mode));
+ clobbered = plus_constant (mode, clobbered,
+ token * GET_MODE_SIZE (mode));
clobbered = gen_rtx_MEM (mode, clobbered);
x = gen_rtx_CONCAT (mode, gen_rtx_CLOBBER (VOIDmode, pc_rtx), clobbered);
call_arguments
@@ -9021,7 +9011,8 @@ vt_add_function_parameter (tree parm)
off += INTVAL (XEXP (XEXP (incoming, 0), 1));
incoming
= replace_equiv_address_nv (incoming,
- plus_constant (arg_pointer_rtx, off));
+ plus_constant (Pmode,
+ arg_pointer_rtx, off));
}
#ifdef HAVE_window_save
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 03ac49b4677..ce9e3280364 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -1992,7 +1992,7 @@ assemble_variable (tree decl, int top_level ATTRIBUTE_UNUSED,
return;
if (! dont_output_data
- && ! host_integerp (DECL_SIZE_UNIT (decl), 1))
+ && ! valid_constant_size_p (DECL_SIZE_UNIT (decl)))
{
error ("size of variable %q+D is too large", decl);
return;
@@ -4773,9 +4773,13 @@ output_constructor_regular_field (oc_local_state *local)
if (local->index != NULL_TREE)
{
+ /* Perform the index calculation in modulo arithmetic but
+ sign-extend the result because Ada has negative DECL_FIELD_OFFSETs
+ but we are using an unsigned sizetype. */
+ unsigned prec = TYPE_PRECISION (sizetype);
double_int idx = double_int_sub (tree_to_double_int (local->index),
tree_to_double_int (local->min_index));
- gcc_assert (double_int_fits_in_shwi_p (idx));
+ idx = double_int_sext (idx, prec);
fieldpos = (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (local->val)), 1)
* idx.low);
}
diff --git a/gcc/varpool.c b/gcc/varpool.c
index 103b5b5f810..ef025670125 100644
--- a/gcc/varpool.c
+++ b/gcc/varpool.c
@@ -269,24 +269,42 @@ assemble_aliases (struct varpool_node *node)
}
/* Output one variable, if necessary. Return whether we output it. */
+
bool
varpool_assemble_decl (struct varpool_node *node)
{
tree decl = node->symbol.decl;
- if (!TREE_ASM_WRITTEN (decl)
- && !node->alias
- && !node->symbol.in_other_partition
- && !DECL_EXTERNAL (decl)
- && (TREE_CODE (decl) != VAR_DECL || !DECL_HAS_VALUE_EXPR_P (decl)))
+ /* Aliases are outout when their target is produced or by
+ output_weakrefs. */
+ if (node->alias)
+ return false;
+
+ /* Constant pool is output from RTL land when the reference
+ survive till this level. */
+ if (DECL_IN_CONSTANT_POOL (decl) && TREE_ASM_WRITTEN (decl))
+ return false;
+
+ /* Decls with VALUE_EXPR should not be in the varpool at all. They
+ are not real variables, but just info for debugging and codegen.
+ Unfortunately at the moment emutls is not updating varpool correctly
+ after turning real vars into value_expr vars. */
+ if (DECL_HAS_VALUE_EXPR_P (decl)
+ && !targetm.have_tls)
+ return false;
+
+ gcc_checking_assert (!TREE_ASM_WRITTEN (decl)
+ && TREE_CODE (decl) == VAR_DECL
+ && !DECL_HAS_VALUE_EXPR_P (decl));
+
+ if (!node->symbol.in_other_partition
+ && !DECL_EXTERNAL (decl))
{
assemble_variable (decl, 0, 1, 0);
- if (TREE_ASM_WRITTEN (decl))
- {
- node->finalized = 1;
- assemble_aliases (node);
- return true;
- }
+ gcc_assert (TREE_ASM_WRITTEN (decl));
+ node->finalized = 1;
+ assemble_aliases (node);
+ return true;
}
return false;