summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authormrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4>2013-12-13 17:31:30 +0000
committermrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4>2013-12-13 17:31:30 +0000
commit3dd775fb895cffb77ac74098a74e9fca28edaf79 (patch)
treef68062e9cfe09046337dc976767a5f7938462868 /gcc
parent84014c53e113ab540befd1eceb8598d28a323ab3 (diff)
parent34a5d2a56d4b0a0ea74339c985c919aabfc530a4 (diff)
downloadgcc-3dd775fb895cffb77ac74098a74e9fca28edaf79.tar.gz
Merge in trunk.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@205966 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog1719
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog29
-rw-r--r--gcc/ada/gcc-interface/Make-lang.in2
-rw-r--r--gcc/ada/gcc-interface/Makefile.in10
-rw-r--r--gcc/ada/gcc-interface/decl.c23
-rw-r--r--gcc/ada/gcc-interface/trans.c17
-rw-r--r--gcc/ada/indepsw-darwin.adb67
-rw-r--r--gcc/alias.c2
-rw-r--r--gcc/asan.c16
-rw-r--r--gcc/auto-inc-dec.c2
-rw-r--r--gcc/basic-block.h44
-rw-r--r--gcc/bb-reorder.c29
-rw-r--r--gcc/bitmap.c10
-rw-r--r--gcc/bitmap.h30
-rw-r--r--gcc/bt-load.c45
-rw-r--r--gcc/c-family/ChangeLog41
-rw-r--r--gcc/c-family/c-common.c35
-rw-r--r--gcc/c-family/c-common.h14
-rw-r--r--gcc/c-family/c-gimplify.c26
-rw-r--r--gcc/c-family/cilk.c50
-rw-r--r--gcc/c/ChangeLog21
-rw-r--r--gcc/c/c-decl.c3
-rw-r--r--gcc/c/c-objc-common.h10
-rw-r--r--gcc/c/c-parser.c12
-rw-r--r--gcc/c/c-typeck.c29
-rw-r--r--gcc/caller-save.c8
-rw-r--r--gcc/cfg.c32
-rw-r--r--gcc/cfganal.c35
-rw-r--r--gcc/cfgbuild.c12
-rw-r--r--gcc/cfgcleanup.c6
-rw-r--r--gcc/cfgexpand.c14
-rw-r--r--gcc/cfghooks.c16
-rw-r--r--gcc/cfgloop.c20
-rw-r--r--gcc/cfgloop.h3
-rw-r--r--gcc/cfgloopanal.c8
-rw-r--r--gcc/cfgloopmanip.c6
-rw-r--r--gcc/cfgrtl.c61
-rw-r--r--gcc/cgraph.h99
-rw-r--r--gcc/cgraphbuild.c22
-rw-r--r--gcc/cgraphunit.c16
-rw-r--r--gcc/cilk.h2
-rw-r--r--gcc/combine-stack-adj.c2
-rw-r--r--gcc/combine.c35
-rw-r--r--gcc/common.opt17
-rw-r--r--gcc/common/config/sh/sh-common.c1
-rw-r--r--gcc/conditions.h2
-rw-r--r--gcc/config.gcc63
-rw-r--r--gcc/config.in6
-rw-r--r--gcc/config/aarch64/aarch64-elf.h10
-rw-r--r--gcc/config/aarch64/aarch64.c7
-rw-r--r--gcc/config/aarch64/aarch64.md6
-rw-r--r--gcc/config/aarch64/t-aarch642
-rw-r--r--gcc/config/alpha/linux.h2
-rw-r--r--gcc/config/arc/arc.h3
-rw-r--r--gcc/config/arm/arm-cores.def1
-rw-r--r--gcc/config/arm/arm-tables.opt3
-rw-r--r--gcc/config/arm/arm-tune.md2
-rw-r--r--gcc/config/arm/arm.c155
-rw-r--r--gcc/config/arm/arm.md21
-rw-r--r--gcc/config/arm/arm.opt2
-rw-r--r--gcc/config/arm/bpabi.h2
-rw-r--r--gcc/config/arm/types.md2
-rw-r--r--gcc/config/bfin/bfin.c4
-rw-r--r--gcc/config/bfin/bfin.h3
-rw-r--r--gcc/config/bfin/uclinux.h3
-rw-r--r--gcc/config/c6x/c6x.c6
-rw-r--r--gcc/config/c6x/uclinux-elf.h3
-rw-r--r--gcc/config/epiphany/epiphany.md24
-rw-r--r--gcc/config/epiphany/resolve-sw-modes.c6
-rw-r--r--gcc/config/frv/frv-protos.h12
-rw-r--r--gcc/config/frv/frv.c18
-rw-r--r--gcc/config/h8300/h8300.md18
-rw-r--r--gcc/config/i386/i386-modes.def5
-rw-r--r--gcc/config/i386/i386.c278
-rw-r--r--gcc/config/i386/i386.h1
-rw-r--r--gcc/config/i386/i386.md184
-rw-r--r--gcc/config/i386/ia32intrin.h33
-rw-r--r--gcc/config/i386/sse.md54
-rw-r--r--gcc/config/ia64/ia64.c8
-rw-r--r--gcc/config/ia64/ia64.md41
-rw-r--r--gcc/config/linux-android.h3
-rw-r--r--gcc/config/linux-protos.h4
-rw-r--r--gcc/config/linux.c (renamed from gcc/config/linux-android.c)6
-rw-r--r--gcc/config/linux.h28
-rw-r--r--gcc/config/lm32/lm32.h1
-rw-r--r--gcc/config/lm32/uclinux-elf.h3
-rw-r--r--gcc/config/m32c/m32c.h1
-rw-r--r--gcc/config/m68k/uclinux.h3
-rw-r--r--gcc/config/mcore/mcore.md6
-rw-r--r--gcc/config/microblaze/microblaze.h1
-rw-r--r--gcc/config/mips/mips.c8
-rw-r--r--gcc/config/moxie/moxie.md6
-rw-r--r--gcc/config/moxie/uclinux.h8
-rw-r--r--gcc/config/msp430/msp430.c25
-rw-r--r--gcc/config/msp430/msp430.md10
-rw-r--r--gcc/config/pdp11/predicates.md4
-rw-r--r--gcc/config/picochip/picochip.c2
-rw-r--r--gcc/config/picochip/picochip.h2
-rw-r--r--gcc/config/rs6000/linux.h2
-rw-r--r--gcc/config/rs6000/linux64.h2
-rw-r--r--gcc/config/rs6000/rs6000.c2
-rw-r--r--gcc/config/rs6000/rs6000.md4
-rw-r--r--gcc/config/s390/s390.c4
-rw-r--r--gcc/config/score/score.c24
-rw-r--r--gcc/config/score/score.h16
-rw-r--r--gcc/config/sh/sh.c7
-rw-r--r--gcc/config/sh/sh.opt4
-rw-r--r--gcc/config/sh/sh_optimize_sett_clrt.cc2
-rw-r--r--gcc/config/sh/sh_treg_combine.cc2
-rw-r--r--gcc/config/sparc/sol2.h3
-rw-r--r--gcc/config/sparc/sparc.c701
-rw-r--r--gcc/config/sparc/sparc.h3
-rw-r--r--gcc/config/sparc/sparc.md42
-rw-r--r--gcc/config/spu/spu.c6
-rw-r--r--gcc/config/spu/spu.h2
-rw-r--r--gcc/config/t-linux (renamed from gcc/config/t-linux-android)2
-rw-r--r--gcc/config/tilegx/tilegx.c4
-rw-r--r--gcc/config/tilegx/tilegx.md138
-rw-r--r--gcc/config/tilepro/tilepro.c4
-rw-r--r--gcc/config/v850/v850.md10
-rwxr-xr-xgcc/configure71
-rw-r--r--gcc/configure.ac42
-rw-r--r--gcc/coretypes.h9
-rw-r--r--gcc/coverage.c2
-rw-r--r--gcc/cp/ChangeLog72
-rw-r--r--gcc/cp/class.c2
-rw-r--r--gcc/cp/cp-cilkplus.c70
-rw-r--r--gcc/cp/cp-gimplify.c25
-rw-r--r--gcc/cp/cp-objcp-common.h1
-rw-r--r--gcc/cp/cp-tree.h4
-rw-r--r--gcc/cp/decl.c101
-rw-r--r--gcc/cp/decl2.c8
-rw-r--r--gcc/cp/except.c18
-rw-r--r--gcc/cp/parser.c63
-rw-r--r--gcc/cp/parser.h1
-rw-r--r--gcc/cp/pt.c62
-rw-r--r--gcc/cp/semantics.c2
-rw-r--r--gcc/cp/typeck.c20
-rw-r--r--gcc/cp/vtable-class-hierarchy.c2
-rw-r--r--gcc/cprop.c23
-rw-r--r--gcc/cse.c8
-rw-r--r--gcc/cselib.h6
-rw-r--r--gcc/dbxout.c2
-rw-r--r--gcc/dce.c10
-rw-r--r--gcc/ddg.h5
-rw-r--r--gcc/defaults.h4
-rw-r--r--gcc/df-core.c68
-rw-r--r--gcc/df-problems.c54
-rw-r--r--gcc/df-scan.c42
-rw-r--r--gcc/df.h2
-rw-r--r--gcc/diagnostic.h8
-rw-r--r--gcc/doc/extend.texi7
-rw-r--r--gcc/doc/install.texi25
-rw-r--r--gcc/doc/invoke.texi78
-rw-r--r--gcc/doc/rtl.texi17
-rw-r--r--gcc/doc/tm.texi7
-rw-r--r--gcc/doc/tm.texi.in5
-rw-r--r--gcc/dominance.c37
-rw-r--r--gcc/domwalk.c2
-rw-r--r--gcc/dse.c14
-rw-r--r--gcc/dwarf2cfi.c6
-rw-r--r--gcc/dwarf2out.c65
-rw-r--r--gcc/dwarf2out.h52
-rw-r--r--gcc/except.c2
-rw-r--r--gcc/expmed.c245
-rw-r--r--gcc/expr.c58
-rw-r--r--gcc/final.c6
-rw-r--r--gcc/flag-types.h2
-rw-r--r--gcc/fold-const.c6
-rw-r--r--gcc/fortran/ChangeLog61
-rw-r--r--gcc/fortran/gfortran.texi26
-rw-r--r--gcc/fortran/interface.c33
-rw-r--r--gcc/fortran/invoke.texi70
-rw-r--r--gcc/fortran/resolve.c32
-rw-r--r--gcc/fortran/trans-decl.c73
-rw-r--r--gcc/function.c16
-rw-r--r--gcc/function.h8
-rw-r--r--gcc/gcse.c54
-rw-r--r--gcc/gdbasan.in3
-rw-r--r--gcc/genmodes.c45
-rw-r--r--gcc/genrecog.c30
-rw-r--r--gcc/ggc-internal.h4
-rw-r--r--gcc/gimple-fold.c35
-rw-r--r--gcc/gimple-iterator.c2
-rw-r--r--gcc/gimple-iterator.h4
-rw-r--r--gcc/gimple-ssa-isolate-paths.c33
-rw-r--r--gcc/gimple-ssa-strength-reduction.c9
-rw-r--r--gcc/gimple-streamer-in.c4
-rw-r--r--gcc/gimple.c29
-rw-r--r--gcc/gimple.h10
-rw-r--r--gcc/gimplify.c60
-rw-r--r--gcc/go/ChangeLog19
-rw-r--r--gcc/go/Make-lang.in3
-rw-r--r--gcc/go/go-lang.c8
-rw-r--r--gcc/go/gofrontend/expressions.cc19
-rw-r--r--gcc/go/gofrontend/gogo.cc28
-rw-r--r--gcc/go/gofrontend/parse.cc25
-rw-r--r--gcc/go/gofrontend/types.cc52
-rw-r--r--gcc/go/gofrontend/types.h6
-rw-r--r--gcc/graph.c6
-rw-r--r--gcc/graphite-clast-to-gimple.h8
-rw-r--r--gcc/graphite-scop-detection.c6
-rw-r--r--gcc/graphite-sese-to-poly.c6
-rw-r--r--gcc/graphite-sese-to-poly.h1
-rw-r--r--gcc/graphite.c6
-rw-r--r--gcc/haifa-sched.c4
-rw-r--r--gcc/hard-reg-set.h4
-rw-r--r--gcc/hw-doloop.c6
-rw-r--r--gcc/ifcvt.c14
-rw-r--r--gcc/init-regs.c2
-rw-r--r--gcc/internal-fn.c362
-rw-r--r--gcc/internal-fn.def6
-rw-r--r--gcc/ipa-cp.c38
-rw-r--r--gcc/ipa-devirt.c4
-rw-r--r--gcc/ipa-inline-analysis.c8
-rw-r--r--gcc/ipa-inline.h46
-rw-r--r--gcc/ipa-prop.c26
-rw-r--r--gcc/ipa-prop.h31
-rw-r--r--gcc/ipa-pure-const.c2
-rw-r--r--gcc/ipa-ref-inline.h4
-rw-r--r--gcc/ipa-ref.h2
-rw-r--r--gcc/ipa-reference.c2
-rw-r--r--gcc/ipa-split.c13
-rw-r--r--gcc/ipa-utils.c18
-rw-r--r--gcc/ipa.c10
-rw-r--r--gcc/ira-build.c15
-rw-r--r--gcc/ira-costs.c2
-rw-r--r--gcc/ira-emit.c24
-rw-r--r--gcc/ira-int.h28
-rw-r--r--gcc/ira.c42
-rw-r--r--gcc/jump.c2
-rw-r--r--gcc/langhooks-def.h13
-rw-r--r--gcc/langhooks.c15
-rw-r--r--gcc/langhooks.h19
-rw-r--r--gcc/lcm.c115
-rw-r--r--gcc/loop-init.c6
-rw-r--r--gcc/loop-invariant.c2
-rw-r--r--gcc/loop-unroll.c16
-rw-r--r--gcc/lower-subreg.c8
-rw-r--r--gcc/lra-assigns.c2
-rw-r--r--gcc/lra-coalesce.c35
-rw-r--r--gcc/lra-constraints.c10
-rw-r--r--gcc/lra-eliminations.c2
-rw-r--r--gcc/lra-lives.c4
-rw-r--r--gcc/lra-spills.c6
-rw-r--r--gcc/lra.c10
-rw-r--r--gcc/lto-cgraph.c24
-rw-r--r--gcc/lto-streamer-in.c54
-rw-r--r--gcc/lto-streamer-out.c27
-rw-r--r--gcc/lto-streamer.h20
-rw-r--r--gcc/lto/ChangeLog6
-rw-r--r--gcc/lto/lto-partition.c12
-rw-r--r--gcc/lto/lto-symtab.c4
-rw-r--r--gcc/lto/lto.c2
-rw-r--r--gcc/mcf.c4
-rw-r--r--gcc/mkconfig.sh3
-rw-r--r--gcc/mode-switching.c27
-rw-r--r--gcc/modulo-sched.c2
-rw-r--r--gcc/objc/ChangeLog6
-rw-r--r--gcc/objc/objc-act.c2
-rw-r--r--gcc/omp-low.c6
-rw-r--r--gcc/optabs.c44
-rw-r--r--gcc/optabs.def6
-rw-r--r--gcc/optabs.h5
-rw-r--r--gcc/opts.c4
-rw-r--r--gcc/opts.h4
-rw-r--r--gcc/params.h8
-rw-r--r--gcc/pass_manager.h6
-rw-r--r--gcc/passes.c107
-rw-r--r--gcc/passes.def2
-rw-r--r--gcc/postreload-gcse.c4
-rw-r--r--gcc/postreload.c4
-rw-r--r--gcc/predict.c62
-rw-r--r--gcc/predict.h10
-rw-r--r--gcc/profile.c12
-rw-r--r--gcc/recog.c6
-rw-r--r--gcc/ree.c2
-rw-r--r--gcc/reg-stack.c6
-rw-r--r--gcc/regcprop.c8
-rw-r--r--gcc/reginfo.c2
-rw-r--r--gcc/regrename.c12
-rw-r--r--gcc/regrename.h8
-rw-r--r--gcc/regstat.c8
-rw-r--r--gcc/reload.h4
-rw-r--r--gcc/reload1.c10
-rw-r--r--gcc/resource.c13
-rw-r--r--gcc/rtl.def2
-rw-r--r--gcc/rtl.h25
-rw-r--r--gcc/rtlanal.c21
-rw-r--r--gcc/sanitizer.def16
-rw-r--r--gcc/sbitmap.h4
-rw-r--r--gcc/sched-ebb.c4
-rw-r--r--gcc/sched-int.h10
-rw-r--r--gcc/sched-rgn.c103
-rw-r--r--gcc/sched-vis.c2
-rw-r--r--gcc/sel-sched-dump.c2
-rw-r--r--gcc/sel-sched-ir.c35
-rw-r--r--gcc/sel-sched-ir.h31
-rw-r--r--gcc/sel-sched.c22
-rw-r--r--gcc/sese.c6
-rw-r--r--gcc/simplify-rtx.c15
-rw-r--r--gcc/sreal.h4
-rw-r--r--gcc/ssa-iterators.h10
-rw-r--r--gcc/stack-ptr-mod.c2
-rw-r--r--gcc/store-motion.c38
-rw-r--r--gcc/target.def2
-rw-r--r--gcc/target.h15
-rw-r--r--gcc/targhooks.h2
-rw-r--r--gcc/testsuite/ChangeLog443
-rw-r--r--gcc/testsuite/c-c++-common/asan/null-deref-1.c1
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr59063-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr59063-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/gomp/pr59467.c68
-rw-r--r--gcc/testsuite/c-c++-common/tsan/atomic_stack.c32
-rw-r--r--gcc/testsuite/c-c++-common/tsan/fd_pipe_race.c37
-rw-r--r--gcc/testsuite/c-c++-common/tsan/free_race.c28
-rw-r--r--gcc/testsuite/c-c++-common/tsan/free_race2.c29
-rw-r--r--gcc/testsuite/c-c++-common/tsan/mutexset1.c41
-rw-r--r--gcc/testsuite/c-c++-common/tsan/race_on_barrier.c33
-rw-r--r--gcc/testsuite/c-c++-common/tsan/race_on_barrier2.c33
-rw-r--r--gcc/testsuite/c-c++-common/tsan/race_on_mutex.c44
-rw-r--r--gcc/testsuite/c-c++-common/tsan/race_on_mutex2.c26
-rw-r--r--gcc/testsuite/c-c++-common/tsan/simple_race.c28
-rw-r--r--gcc/testsuite/c-c++-common/tsan/simple_stack.c66
-rw-r--r--gcc/testsuite/c-c++-common/tsan/sleep_sync.c31
-rw-r--r--gcc/testsuite/c-c++-common/tsan/thread_leak.c18
-rw-r--r--gcc/testsuite/c-c++-common/tsan/thread_leak1.c19
-rw-r--r--gcc/testsuite/c-c++-common/tsan/thread_leak2.c22
-rw-r--r--gcc/testsuite/c-c++-common/tsan/tiny_race.c23
-rw-r--r--gcc/testsuite/c-c++-common/tsan/tls_race.c21
-rw-r--r--gcc/testsuite/c-c++-common/tsan/write_in_reader_lock.c37
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c61
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c61
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c47
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c27
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c14
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c63
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c55
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr59333.c19
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr59397.c10
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/catch_exc.cc67
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/const_spawn.cc78
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/fib-opr-overload.cc94
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/fib-tplt.cc53
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns.cc236
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns_tplt.cc173
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/cilk-plus.exp27
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/access02.C39
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-46336.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-template6.C20
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/defaulted2.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/deleted2.C9
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic-sizeof3.C15
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn8.C2
-rw-r--r--gcc/testsuite/g++.dg/dg.exp1
-rw-r--r--gcc/testsuite/g++.dg/gomp/udr-3.C18
-rw-r--r--gcc/testsuite/g++.dg/lookup/extern-c-redecl5.C4
-rw-r--r--gcc/testsuite/g++.dg/lookup/linkage1.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/pr59470.C188
-rw-r--r--gcc/testsuite/g++.dg/overload/new1.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/friend5.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/namespace-alias-1.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/namespace10.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/redef2.C2
-rw-r--r--gcc/testsuite/g++.dg/plugin/selfassign.c2
-rw-r--r--gcc/testsuite/g++.dg/pr59445.C81
-rw-r--r--gcc/testsuite/g++.dg/pubtypes.C2
-rw-r--r--gcc/testsuite/g++.dg/template/friend44.C4
-rw-r--r--gcc/testsuite/g++.dg/template/partial14.C16
-rw-r--r--gcc/testsuite/g++.dg/torture/pr59163.C30
-rw-r--r--gcc/testsuite/g++.dg/tsan/aligned_vs_unaligned_race.C31
-rw-r--r--gcc/testsuite/g++.dg/tsan/atomic_free.C21
-rw-r--r--gcc/testsuite/g++.dg/tsan/atomic_free2.C21
-rw-r--r--gcc/testsuite/g++.dg/tsan/benign_race.C40
-rw-r--r--gcc/testsuite/g++.dg/tsan/cond_race.C37
-rw-r--r--gcc/testsuite/g++.dg/tsan/default_options.C34
-rw-r--r--gcc/testsuite/g++.dg/tsan/fd_close_norace.C32
-rw-r--r--gcc/testsuite/g++.dg/tsan/fd_close_norace2.C31
-rw-r--r--gcc/testsuite/g++.dg/tsan/tsan.exp47
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr59415.C8
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr59437.C24
-rw-r--r--gcc/testsuite/g++.dg/warn/pr15774-1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.brendan/crash42.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.brendan/crash52.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.brendan/crash55.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/overload21.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/overload5.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/redecl1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/arm8.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.other/main1.C4
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr39834.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr48929.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr55569.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr59134.c16
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr59386.c24
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr59417.c39
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/sra-1.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr58726.c26
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr59388.c11
-rw-r--r--gcc/testsuite/gcc.dg/c11-align-6.c40
-rw-r--r--gcc/testsuite/gcc.dg/cpp/expr-overflow-1.c44
-rw-r--r--gcc/testsuite/gcc.dg/ipa/ipa-pta-14.c5
-rw-r--r--gcc/testsuite/gcc.dg/macro-fusion-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/macro-fusion-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/plugin/selfassign.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr23623.c48
-rw-r--r--gcc/testsuite/gcc.dg/pr38984.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr41488.c18
-rw-r--r--gcc/testsuite/gcc.dg/pr48784-1.c18
-rw-r--r--gcc/testsuite/gcc.dg/pr48784-2.c18
-rw-r--r--gcc/testsuite/gcc.dg/pr54113.c5
-rw-r--r--gcc/testsuite/gcc.dg/pr56341-1.c40
-rw-r--r--gcc/testsuite/gcc.dg/pr56341-2.c40
-rw-r--r--gcc/testsuite/gcc.dg/pr56997-1.c44
-rw-r--r--gcc/testsuite/gcc.dg/pr56997-2.c44
-rw-r--r--gcc/testsuite/gcc.dg/pr56997-3.c44
-rw-r--r--gcc/testsuite/gcc.dg/pubtypes-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/pubtypes-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/pubtypes-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/pubtypes-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr59058.c19
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr59374-1.c24
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr59374-2.c26
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/loop-31.c6
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr45085.c (renamed from gcc/testsuite/gcc.c-torture/compile/pr45085.c)1
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr45685.c41
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/scev-7.c18
-rw-r--r--gcc/testsuite/gcc.dg/tsan/tsan.exp47
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr56787.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr58508.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-nop-move.c65
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-pattern-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-simd-clone-10a.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-simd-clone-12a.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/builtin-trap.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/ldrd-strd-offset.c17
-rw-r--r--gcc/testsuite/gcc.target/arm/thumb-builtin-trap.c11
-rw-r--r--gcc/testsuite/gcc.target/i386/avx-vmovapd-256-1.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/avx-vmovapd-256-2.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr59390.c16
-rw-r--r--gcc/testsuite/gcc.target/i386/pr59390_1.c17
-rw-r--r--gcc/testsuite/gcc.target/i386/pr59390_2.c16
-rw-r--r--gcc/testsuite/gcc.target/i386/pr59405.c24
-rw-r--r--gcc/testsuite/gcc.target/i386/readeflags-1.c40
-rw-r--r--gcc/testsuite/gcc.target/i386/sse2-movapd-1.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/sse2-movapd-2.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/writeeflags-1.c30
-rw-r--r--gcc/testsuite/gcc.target/ia64/pr52731.c19
-rw-r--r--gcc/testsuite/gcc.target/mips/pr59317.c83
-rw-r--r--gcc/testsuite/gcc.target/sh/pr51697.c21
-rw-r--r--gcc/testsuite/gcc.target/sparc/pdistn-2.c16
-rw-r--r--gcc/testsuite/gcc.target/sparc/pdistn.c10
-rw-r--r--gcc/testsuite/gfortran.dg/allocate_with_source_4.f9012
-rw-r--r--gcc/testsuite/gfortran.dg/c_by_val_5.f902
-rw-r--r--gcc/testsuite/gfortran.dg/class_result_2.f9021
-rw-r--r--gcc/testsuite/gfortran.dg/dummy_procedure_10.f9056
-rw-r--r--gcc/testsuite/gfortran.dg/elemental_subroutine_8.f9050
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/pr59467.f9024
-rw-r--r--gcc/testsuite/gfortran.dg/namelist_83.f9022
-rw-r--r--gcc/testsuite/gfortran.dg/namelist_83_2.f9022
-rw-r--r--gcc/testsuite/gfortran.dg/proc_decl_9.f904
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_11.f9017
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_32.f904
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_33.f902
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_result_1.f908
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_result_4.f908
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_result_7.f9011
-rw-r--r--gcc/testsuite/gfortran.dg/proc_ptr_result_8.f9013
-rw-r--r--gcc/testsuite/gnat.dg/misaligned_volatile.adb28
-rw-r--r--gcc/testsuite/gnat.dg/pack19.adb56
-rw-r--r--gcc/testsuite/go.test/test/bench/shootout/timing.log91
-rw-r--r--gcc/testsuite/go.test/test/blank.go14
-rw-r--r--gcc/testsuite/go.test/test/blank1.go7
-rw-r--r--gcc/testsuite/go.test/test/chan/doubleselect.go2
-rw-r--r--gcc/testsuite/go.test/test/chan/select2.go3
-rw-r--r--gcc/testsuite/go.test/test/cmp.go6
-rw-r--r--gcc/testsuite/go.test/test/cmp6.go2
-rw-r--r--gcc/testsuite/go.test/test/deferfin.go63
-rw-r--r--gcc/testsuite/go.test/test/divmod.go460
-rwxr-xr-xgcc/testsuite/go.test/test/errchk2
-rw-r--r--gcc/testsuite/go.test/test/escape2.go32
-rw-r--r--gcc/testsuite/go.test/test/escape5.go7
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug191.dir/a.go4
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug191.dir/b.go4
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug191.dir/main.go3
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug295.go4
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug385_64.go217
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug435.go2
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug460.dir/a.go4
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug460.dir/b.go7
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug475.go22
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug476.go23
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug477.go34
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug478.dir/a.go9
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug478.dir/b.go13
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug478.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug479.dir/a.go15
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug479.dir/b.go16
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug479.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug480.dir/a.go17
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug480.dir/b.go13
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug480.go9
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug481.go18
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/bug482.go20
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue4085a.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue4251.go6
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue4517d.go9
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue4776.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue4847.go24
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5172.go19
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5358.go17
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5493.go2
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5581.go34
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5609.go13
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5698.go18
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5704.go46
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5856.go38
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/a.go22
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/main.go12
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5910.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/a.go3
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/b.go2
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/c.go12
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5957.go7
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue5963.go50
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6004.go15
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6036.go44
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6055.go35
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6131.go20
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6140.go31
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6247.go17
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6269.go39
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6298.go15
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6399.go27
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/a.go7
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/b.go9
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/main.go16
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6513.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/a.go14
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/b.go12
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6789.go10
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6899.go13
-rw-r--r--gcc/testsuite/go.test/test/fixedbugs/issue6899.out1
-rw-r--r--gcc/testsuite/go.test/test/import1.go3
-rw-r--r--gcc/testsuite/go.test/test/interface/explicit.go19
-rw-r--r--gcc/testsuite/go.test/test/interface/fail.go19
-rw-r--r--gcc/testsuite/go.test/test/mapnan.go12
-rw-r--r--gcc/testsuite/go.test/test/method2.go5
-rw-r--r--gcc/testsuite/go.test/test/nilcheck.go184
-rw-r--r--gcc/testsuite/go.test/test/nilptr.go28
-rw-r--r--gcc/testsuite/go.test/test/nilptr2.go128
-rw-r--r--gcc/testsuite/go.test/test/nilptr3.go191
-rw-r--r--gcc/testsuite/go.test/test/recover.go240
-rw-r--r--gcc/testsuite/go.test/test/recover3.go3
-rw-r--r--gcc/testsuite/go.test/test/run.go134
-rw-r--r--gcc/testsuite/go.test/test/shift2.go1
-rw-r--r--gcc/testsuite/go.test/test/sizeof.go54
-rw-r--r--gcc/testsuite/go.test/test/slice3.go156
-rw-r--r--gcc/testsuite/go.test/test/slice3err.go121
-rw-r--r--gcc/testsuite/go.test/test/stress/runstress.go11
-rw-r--r--gcc/testsuite/go.test/test/string_lit.go5
-rw-r--r--gcc/testsuite/go.test/test/syntax/chan1.go4
-rw-r--r--gcc/testsuite/go.test/test/syntax/semi1.go2
-rw-r--r--gcc/testsuite/go.test/test/syntax/semi2.go2
-rw-r--r--gcc/testsuite/go.test/test/syntax/semi3.go2
-rw-r--r--gcc/testsuite/go.test/test/syntax/semi4.go2
-rw-r--r--gcc/testsuite/go.test/test/testlib51
-rw-r--r--gcc/testsuite/lib/asan-dg.exp5
-rw-r--r--gcc/testsuite/lib/tsan-dg.exp114
-rw-r--r--gcc/toplev.c2
-rw-r--r--gcc/tracer.c8
-rw-r--r--gcc/trans-mem.c22
-rw-r--r--gcc/tree-affine.h17
-rw-r--r--gcc/tree-call-cdce.c2
-rw-r--r--gcc/tree-cfg.c108
-rw-r--r--gcc/tree-cfgcleanup.c16
-rw-r--r--gcc/tree-complex.c6
-rw-r--r--gcc/tree-core.h22
-rw-r--r--gcc/tree-data-ref.c87
-rw-r--r--gcc/tree-data-ref.h4
-rw-r--r--gcc/tree-dfa.c6
-rw-r--r--gcc/tree-eh.c8
-rw-r--r--gcc/tree-emutls.c12
-rw-r--r--gcc/tree-if-conv.c418
-rw-r--r--gcc/tree-inline.c36
-rw-r--r--gcc/tree-inline.h4
-rw-r--r--gcc/tree-into-ssa.c45
-rw-r--r--gcc/tree-iterator.h4
-rw-r--r--gcc/tree-loop-distribution.c4
-rw-r--r--gcc/tree-nrv.c6
-rw-r--r--gcc/tree-object-size.c2
-rw-r--r--gcc/tree-outof-ssa.c6
-rw-r--r--gcc/tree-pass.h28
-rw-r--r--gcc/tree-predcom.c3
-rw-r--r--gcc/tree-profile.c2
-rw-r--r--gcc/tree-scalar-evolution.c77
-rw-r--r--gcc/tree-scalar-evolution.h2
-rw-r--r--gcc/tree-sra.c14
-rw-r--r--gcc/tree-ssa-address.h2
-rw-r--r--gcc/tree-ssa-alias.h4
-rw-r--r--gcc/tree-ssa-ccp.c6
-rw-r--r--gcc/tree-ssa-coalesce.c6
-rw-r--r--gcc/tree-ssa-copy.c22
-rw-r--r--gcc/tree-ssa-copyrename.c4
-rw-r--r--gcc/tree-ssa-dce.c13
-rw-r--r--gcc/tree-ssa-dom.c8
-rw-r--r--gcc/tree-ssa-forwprop.c2
-rw-r--r--gcc/tree-ssa-live.c32
-rw-r--r--gcc/tree-ssa-loop-im.c8
-rw-r--r--gcc/tree-ssa-loop-ivopts.c31
-rw-r--r--gcc/tree-ssa-loop-manip.c24
-rw-r--r--gcc/tree-ssa-loop-niter.c10
-rw-r--r--gcc/tree-ssa-loop.h4
-rw-r--r--gcc/tree-ssa-math-opts.c10
-rw-r--r--gcc/tree-ssa-operands.h2
-rw-r--r--gcc/tree-ssa-phiopt.c165
-rw-r--r--gcc/tree-ssa-pre.c17
-rw-r--r--gcc/tree-ssa-propagate.c8
-rw-r--r--gcc/tree-ssa-reassoc.c24
-rw-r--r--gcc/tree-ssa-sccvn.c2
-rw-r--r--gcc/tree-ssa-sink.c4
-rw-r--r--gcc/tree-ssa-structalias.c121
-rw-r--r--gcc/tree-ssa-tail-merge.c32
-rw-r--r--gcc/tree-ssa-ter.c2
-rw-r--r--gcc/tree-ssa-threadupdate.c66
-rw-r--r--gcc/tree-ssa-uncprop.c9
-rw-r--r--gcc/tree-ssa-uninit.c4
-rw-r--r--gcc/tree-ssa.c6
-rw-r--r--gcc/tree-stdarg.c8
-rw-r--r--gcc/tree-switch-conversion.c2
-rw-r--r--gcc/tree-vect-data-refs.c109
-rw-r--r--gcc/tree-vect-generic.c2
-rw-r--r--gcc/tree-vect-loop-manip.c271
-rw-r--r--gcc/tree-vect-loop.c55
-rw-r--r--gcc/tree-vect-stmts.c445
-rw-r--r--gcc/tree-vectorizer.c129
-rw-r--r--gcc/tree-vectorizer.h17
-rw-r--r--gcc/tree-vrp.c63
-rw-r--r--gcc/tree.c6
-rw-r--r--gcc/tree.def10
-rw-r--r--gcc/tree.h5
-rw-r--r--gcc/tsan.c2
-rw-r--r--gcc/ubsan.c179
-rw-r--r--gcc/ubsan.h3
-rw-r--r--gcc/value-prof.c6
-rw-r--r--gcc/var-tracking.c28
-rw-r--r--gcc/varasm.c6
-rw-r--r--gcc/varpool.c66
-rw-r--r--gcc/vtable-verify.c49
-rw-r--r--gcc/web.c6
653 files changed, 16473 insertions, 3424 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 92a8651c562..a238cac62a6 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,8 +1,1625 @@
+2013-12-13 Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ * config/arc/arc.h (BITS_PER_UNIT): Removed.
+ * config/bfin/bfin.h (BITS_PER_UNIT): Removed.
+ * config/lm32/lm32.h (BITS_PER_UNIT): Removed.
+ * config/m32c/m32c.h (BITS_PER_UNIT): Removed.
+ * config/microblaze/microblaze.h (BITS_PER_UNIT): Removed.
+ * config/picochip/picochip.h (BITS_PER_UNIT): Removed.
+ * config/spu/spu.h (BITS_PER_UNIT): Removed.
+ * defaults.h (BITS_PER_UNIT): Removed.
+ * config/i386/i386-modes.def (MAX_BITSIZE_MODE_ANY_INT): New.
+ * doc/rtl (BITS_PER_UNIT): Moved from tm.texi.
+ (MAX_BITSIZE_MODE_ANY_INT): Updated.
+ * doc/tm.texi (BITS_PER_UNIT): Removed.
+ * doc/tm.texi.in (BITS_PER_UNIT): Removed.
+ * genmodes.c (bits_per_unit, max_bitsize_mode_any_int): New.
+ (create_modes): Added code to set bits_per_unit and
+ max_bitsize_mode_any_int.
+ (emit_max_int): Changed code generation.
+ * mkconfig.sh: Added insn-modes.h.
+
+2013-12-13 Jeff Law <law@redhat.com>
+
+ PR tree-optimization/45685
+ * tree-ssa-phiopt.c (neg_replacement): New function.
+ (tree_ssa_phiopt_worker): Call it.
+
+2013-12-13 Yuri Rumyantsev <ysrumyan@gmail.com>
+
+ * config/i386/i386.c (slm_cost): Fix imul cost for HI.
+
+2013-12-13 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/58296
+ PR tree-optimization/41488
+ * tree-scalar-evolution.c: Include necessary header files.
+ (simplify_peeled_chrec): New function.
+ (analyze_evolution_in_loop): New static variable.
+ Call simplify_peeled_chrec.
+ * tree-ssa-loop-ivopts.c (mark_bivs): Don't mark peeled IV as biv.
+ (add_old_iv_candidates): Don't add candidate for peeled IV.
+ * tree-affine.h (aff_combination_zero_p): New function.
+
+2013-12-13 Nick Clifton <nickc@redhat.com>
+
+ * config/msp430/msp430.c (is_wakeup_func): New function. Returns
+ true if the current function has the wakeup attribute.
+ (msp430_start_function): Note if the function has the wakeup
+ attribute.
+ (msp430_attribute_table): Add wakeup attribute.
+ (msp430_expand_epilogue): Add support for wakeup functions.
+ * config/msp430/msp430.md (disable_interrupts): Emit a NOP after
+ the DINT instruction.
+ * doc/extend.texi: Document the wakeup attribute.
+
+2013-12-13 Kai Tietz <kitetz@redhat.com>
+
+ PR c++/57897
+ * config/i386/i386.c (ix86_option_override_internal): Set for
+ x64 target flag_unwind_tables, if flag_asynchronous_unwind_tables
+ was explicit set.
+
+2013-12-12 Jeff Law <law@redhat.com>
+
+ * i386.md (simple LEA peephole2): Add missing mode to zero_extend
+ for zero-extended MULT simple LEA pattern.
+
+2013-12-12 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR middle-end/59470
+ * lra-coalesce.c (lra_coalesce): Invalidate inheritance pseudo
+ values if necessary.
+
+2013-12-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR libgomp/59467
+ * gimplify.c (omp_check_private): Add copyprivate argument, if it
+ is true, don't check omp_privatize_by_reference.
+ (gimplify_scan_omp_clauses): For OMP_CLAUSE_COPYPRIVATE verify
+ decl is private in outer context. Adjust omp_check_private caller.
+
+2013-12-11 Jeff Law <law@redhat.com>
+
+ PR rtl-optimization/59446
+ * tree-ssa-threadupdate.c (mark_threaded_blocks): Properly
+ test for crossing a loop header.
+
+2013-12-11 Sriraman Tallam <tmsriram@google.com>
+
+ PR target/59390
+ * config/i386/i386.c (get_builtin): New function.
+ (ix86_builtin_vectorized_function): Replace all instances of
+ ix86_builtins[...] with get_builtin(...).
+ (ix86_builtin_reciprocal): Ditto.
+
+2013-12-11 Balaji V. Iyer <balaji.v.iyer@intel.com>
+
+ * langhooks.h (lang_hooks_for_decls): Remove lang_hooks_for_cilkplus.
+ (lang_hooks_for_cilkplus): Remove.
+ * langhooks.c (lhd_cilk_detect_spawn): Likewise.
+ (lhd_install_body_with_frame_cleanup): Likewise.
+ * langhooks-def.h (LANG_HOOKS_CILKPLUS_FRAME_CLEANUP): Likewise.
+ (LANG_HOOKS_CILKPLUS_DETECT_SPAWN_AND_UNWRAP): Likewise.
+ (LANG_HOOKS_CILKPLUS_CILKPLUS_GIMPLIFY_SPAWN): Likewise.
+ (LANG_HOOKS_CILKPLUS): Likewise.
+ (LANG_HOOKS_DECLS): Remove LANG_HOOKS_CILKPLUS.
+ * gimplify.c (gimplify_expr): Removed CILK_SPAWN_STMT case.
+ (gimplify_modify_expr): Removed handling of _Cilk_spawn in expr.
+ (gimplify_call_expr): Likewise.
+
+2013-12-11 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ * expr.c (expand_assignment): Remove dependency on
+ flag_strict_volatile_bitfields. Always set the memory
+ access mode.
+ (expand_expr_real_1): Likewise.
+
+2013-12-11 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ PR middle-end/59134
+ * expmed.c (store_bit_field): Use narrow_bit_field_mem and
+ store_fixed_bit_field_1 for -fstrict-volatile-bitfields.
+ (store_fixed_bit_field): Split up. Call store_fixed_bit_field_1
+ to do the real work.
+ (store_fixed_bit_field_1): New function.
+ (store_split_bit_field): Limit the unit size to the memory mode size,
+ to prevent recursion.
+
+2013-12-11 Bernd Edlinger <bernd.edlinger@hotmail.de>
+ Sandra Loosemore <sandra@codesourcery.com>
+
+ PR middle-end/23623
+ PR middle-end/48784
+ PR middle-end/56341
+ PR middle-end/56997
+ * expmed.c (strict_volatile_bitfield_p): Add bitregion_start
+ and bitregion_end parameters. Test for compliance with C++
+ memory model.
+ (store_bit_field): Adjust call to strict_volatile_bitfield_p.
+ Add fallback logic for cases where -fstrict-volatile-bitfields
+ is supposed to apply, but cannot.
+ (extract_bit_field): Likewise. Use narrow_bit_field_mem and
+ extract_fixed_bit_field_1 to do the extraction.
+ (extract_fixed_bit_field): Revert to previous mode selection algorithm.
+ Call extract_fixed_bit_field_1 to do the real work.
+ (extract_fixed_bit_field_1): New function.
+
+2013-12-11 Sandra Loosemore <sandra@codesourcery.com>
+
+ PR middle-end/23623
+ PR middle-end/48784
+ PR middle-end/56341
+ PR middle-end/56997
+ * expmed.c (strict_volatile_bitfield_p): New function.
+ (store_bit_field_1): Don't special-case strict volatile
+ bitfields here.
+ (store_bit_field): Handle strict volatile bitfields here instead.
+ (store_fixed_bit_field): Don't special-case strict volatile
+ bitfields here.
+ (extract_bit_field_1): Don't special-case strict volatile
+ bitfields here.
+ (extract_bit_field): Handle strict volatile bitfields here instead.
+ (extract_fixed_bit_field): Don't special-case strict volatile
+ bitfields here. Simplify surrounding code to resemble that in
+ store_fixed_bit_field.
+ * doc/invoke.texi (Code Gen Options): Update
+ -fstrict-volatile-bitfields description.
+
+2013-12-11 Kugan Vivekanandarajah <kuganv@linaro.org>
+
+ * configure.ac: Add check for aarch64 assembler -mabi support.
+ * configure: Regenerate.
+ * config.in: Regenerate.
+ * config/aarch64/aarch64-elf.h (ASM_MABI_SPEC): New define.
+ (ASM_SPEC): Update to substitute -mabi with ASM_MABI_SPEC.
+ * config/aarch64/aarch64.h (aarch64_override_options): Issue error
+ if assembler does not support -mabi and option ilp32 is selected.
+ * doc/install.texi: Added note that building gcc 4.9 and after
+ with pre 2.24 binutils will not support -mabi=ilp32.
+
+2013-12-11 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59399
+ * expr.c (expand_expr_real_1): Remove assert dealing with
+ internal calls and turn that into a condition instead.
+
+2013-12-11 Yvan Roux <yvan.roux@linaro.org>
+
+ * config/arm/arm.opt (mlra): Enable LRA by default.
+
+2013-12-11 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/59417
+ * tree-ssa-copy.c (fini_copy_prop): If copy_of[i].value is defined
+ in a different bb rhan var, only duplicate points-to info and
+ not alignment info and don't duplicate range info.
+ * tree-ssa-loop-niter.c (determine_value_range): Instead of
+ assertion failure handle inconsistencies in range info by only
+ using var's range info and not PHI result range infos.
+
+ PR tree-optimization/59386
+ * tree-inline.c (remap_gimple_stmt): If not id->do_not_unshare,
+ unshare_expr (id->retval) before passing it to gimple_build_assign.
+
+2013-12-11 Bin Cheng <bin.cheng@arm.com>
+
+ Reverted:
+ 2013-12-10 Bin Cheng <bin.cheng@arm.com>
+ PR tree-optimization/41488
+ * tree-ssa-loop-ivopts.c (add_old_iv_candidates): Don't add cand
+ for PEELED_CHREC kind IV.
+ * tree-scalar-evolution.c: Include necessary header files.
+ (peeled_chrec_map, simplify_peeled_chrec): New.
+ (analyze_evolution_in_loop): New static variable.
+ Call simplify_peeled_chrec.
+ (scev_initialize): Initialize peeled_chrec_map.
+ (scev_reset, scev_finalize): Reset and release peeled_chrec_map.
+
+2013-12-10 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/59458
+ * config/i386/i386.md (*movsf_internal): Set mode to SI for
+ alternative 13.
+
+2013-12-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR rtl-optimization/58295
+ * simplify-rtx.c (simplify_truncation): Restrict the distribution for
+ WORD_REGISTER_OPERATIONS targets.
+
+2013-12-10 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * genrecog.c (validate_pattern): Treat all messages except missing
+ modes as errors.
+ * config/epiphany/epiphany.md: Remove constraints from
+ define_peephole2s.
+ * config/h8300/h8300.md: Remove constraints from define_splits.
+ * config/msp430/msp430.md: Likewise.
+ * config/mcore/mcore.md (movdi_i, movsf_i, movdf_k): Use
+ nonimmediate_operand rather than general_operand for operand 0.
+ * config/moxie/moxie.md (*movsi, *movqi, *movhi): Likewise.
+ * config/pdp11/predicates.md (float_operand, float_nonimm_operand):
+ Use match_operator rather than match_test to invoke general_operand.
+ * config/v850/v850.md (*movqi_internal, *movhi_internal)
+ (*movsi_internal_v850e, *movsi_internal, *movsf_internal): Likewise.
+
+2013-12-10 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * config/tilegx/tilegx.md (insn_ld_add<bitsuffix>): Use
+ register_operand rather than pointer_operand. Add modes to the
+ operands.
+ (insn_ldna_add<bitsuffix>): Likewise.
+ (insn_ld<I124MODE:n><s>_add<I48MODE:bitsuffix>): Likewise.
+ (insn_ldnt_add<bitsuffix>): Likewise.
+ (insn_ldnt<I124MODE:n><s>_add<I48MODE:bitsuffix>): Likewise.
+ (insn_ld_add_L2<bitsuffix>): Likewise.
+ (insn_ldna_add_L2<bitsuffix>): Likewise.
+ (insn_ld<I124MODE:n><s>_add_L2<I48MODE:bitsuffix>): Likewise.
+ (insn_ldnt_add_L2<bitsuffix>): Likewise.
+ (insn_ldnt<I124MODE:n><s>_add_L2<I48MODE:bitsuffix>): Likewise.
+ (insn_ld_add_miss<bitsuffix>): Likewise.
+ (insn_ldna_add_miss<bitsuffix>): Likewise.
+ (insn_ld<I124MODE:n><s>_add_miss<I48MODE:bitsuffix>): Likewise.
+ (insn_ldnt_add_miss<bitsuffix>): Likewise.
+ (insn_ldnt<I124MODE:n><s>_add_miss<I48MODE:bitsuffix>): Likewise.
+ (insn_st_add<bitsuffix>): Likewise.
+ (insn_st<I124MODE:n>_add<I48MODE:bitsuffix>): Likewise.
+ (*insn_st<I124MODE:n>_add<I48MODE:bitsuffix>): Likewise.
+ (insn_stnt_add<bitsuffix>): Likewise.
+ (insn_stnt<I124MODE:n>_add<I48MODE:bitsuffix>): Likewise.
+ (*insn_stnt<I124MODE:n>_add<I48MODE:bitsuffix>): Likewise.
+ (vec_pack_<pack_optab>_v4hi): Use register_operand rather than
+ reg_or_0_operand for operand 0.
+ (insn_v2<pack_insn>): Likewise.
+ (vec_pack_hipart_v4hi): Likewise.
+ (insn_v2packh): Likewise.
+ (vec_pack_ssat_v2si): Likewise.
+ (insn_v4packsc): Likewise.
+
+2013-12-10 H.J. Lu <hongjiu.lu@intel.com>
+
+ * basic-block.h (gcov_working_set_t): Put back typedef.
+ * gcov-io.h (gcov_bucket_type): Likewise.
+ (gcov_working_set_info, gcov_working_set_t): Likewise.
+
+2013-12-10 Oleg Endo <olegendo@gcc.gnu.org>
+
+ * cgraph.h (cgraph_node_set_iterator, varpool_node_set_iterator):
+ Remove typedef.
+ (cgraph_inline_failed_enum, cgraph_inline_failed_t): Remove typedef and
+ rename to cgraph_inline_failed_t.
+ * tree-ssa-alias.h (ao_ref_s, ao_ref): Remove typedef and rename
+ to ao_ref.
+ * reload.h (reg_equivs_s, reg_equivs_t): Remove typedef and rename
+ to reg_equivs_t.
+ * conditions.h (CC_STATUS): Remove typedef.
+ * bitmap.h (bitmap_obstack): Remove typedef.
+ (bitmap_element_def, bitmap_element): Remove typedef and rename to
+ bitmap_element.
+ (bitmap_head_def, bitmap_head): Remove typedef and rename to
+ bitmap_head.
+ (bitmap_iterator): Remove typedef.
+ * target.h (cumulative_args_t, print_switch_type,
+ secondary_reload_info): Remove typedef.
+ * dwarf2out.h (dw_cfi_oprnd_struct, dw_cfi_oprnd): Remove
+ dw_cfi_oprnd_struct alias.
+ (dw_cfi_struct, dw_cfi_node): Remove typedef and rename to dw_cfi_node.
+ (dw_fde_struct, dw_fde_node): Remove typedef and rename to dw_fde_node.
+ (cfa_loc, dw_cfa_location): Remove typedef and rename to
+ dw_cfa_location.
+ (dw_vec_struct, dw_vec_const): Remove typedef and rename to
+ dw_vec_const.
+ (dw_val_struct, dw_val_node): Remove typedef and rename to dw_val_node.
+ (dw_loc_descr_struct, dw_loc_descr_node): Remove typedef and rename to
+ dw_loc_descr_node.
+ * params.h (param_info, compiler_param): Remove typedef.
+ * opts.h (cl_deferred_param): Remove typedef.
+ * sreal.h (sreal): Remove typedef.
+ * ddg.h (dep_type, dep_data_type): Remove typedef.
+ * graphite-clast-to-gimple.h (cloog_prog_clast, bb_pbb_def): Remove
+ typedef.
+ * lto-streamer.h (lto_decl_stream_e_t, lto_encoder_entry,
+ lto_symtab_encoder_iterator, res_pair): Remove typedef.
+ * tree-affine.h (affine_tree_combination, aff_tree): Remove typedef
+ and rename to aff_tree.
+ * sched-int.h (region): Remove typedef.
+ * diagnostic.h (diagnostic_info,
+ diagnostic_classification_change_t): Remove typedef.
+ * tree-ssa-loop.h (affine_iv_d): Remove typedef and rename to
+ affine_iv.
+ * sbitmap.h (sbitmap_iterator): Remove typedef.
+ * ssa-iterators.h (immediate_use_iterator_d, imm_use_iterator):
+ Remove typedef and rename to imm_use_iterator.
+ (ssa_operand_iterator_d, ssa_op_iter): Remove typedef and rename to
+ ssa_op_iter.
+ * ggc-internal.h (ggc_statistics): Remove typedef.
+ * cselib.h (cselib_val_struct, cselib_val): Remove typedef and
+ rename to cselib_val.
+ * tree-core.h (alias_pair): Remove typedef.
+ (constructor_elt_d, constructor_elt): Remove typedef and rename to
+ constructor_elt.
+ (ssa_use_operand_d, ssa_use_operand_t): Remove typedef and rename to
+ ssa_use_operand_t.
+ * graphite-sese-to-poly.h (base_alias_pair): Remove typedef.
+ * tree-data-ref.h (conflict_function): Remove typedef.
+ * tree-inline.h (copy_body_data): Remove typedef.
+ * ipa-inline.h (condition, size_time_entry, inline_param_summary_t,
+ edge_growth_cache_entry): Remove typedef.
+ * regrename.h (operand_rr_info, insn_rr_info): Remove typedef.
+ * gimple-iterator.h (gimple_stmt_iterator_d, gimple_stmt_iterator):
+ Remove typedef and rename to gimple_stmt_iterator.
+ * basic-block.h (ce_if_block, ce_if_block_t): Remove typedef and
+ rename to ce_if_block.
+ (edge_iterator): Remove typedef.
+ * ipa-prop.h (ipa_agg_jf_item, ipa_agg_jf_item_t): Remove typedef
+ and rename to ipa_agg_jf_item.
+ (ipa_agg_jump_function_t, ipa_param_descriptor_t, ipa_node_params_t,
+ ipa_parm_adjustment_t): Remove typedef.
+ (ipa_jump_func, ipa_jump_func_t): Remove typedef and rename to
+ ipa_jump_func.
+ (ipa_edge_args, ipa_edge_args_t): Remove typedef and rename to
+ ipa_edge_args.
+ * gcov-io.h (gcov_bucket_type): Remove typedef.
+ (gcov_working_set_info, gcov_working_set_t): Remove typedef and rename
+ to gcov_working_set_t.
+ * ira-int.h (minmax_set_iterator, ira_allocno_iterator,
+ ira_object_iterator, ira_allocno_object_iterator, ira_pref_iterator,
+ ira_copy_iterator, ira_object_conflict_iterator): Remove typedef.
+ * tree-iterator.h (tree_stmt_iterator): Remove typedef.
+ * rtl.h (addr_diff_vec_flags, mem_attrs, reg_attrs,
+ replace_label_data): Remove typedef.
+ (rtunion_def, rtunion): Remove typedef and rename to rtunion.
+ * hard-reg-set.h (hard_reg_set_iterator): Remove typedef.
+ * sel-sched-ir.h (_list_iterator, sel_global_bb_info_def,
+ sel_region_bb_info_def, succ_iterator): Remove typedef.
+ (deps_where_def, deps_where_t): Remove typedef and rename to
+ deps_where_t.
+ * coretypes.h: Adapt forward declarations.
+ * tree-scalar-evolution.h: Likewise.
+ * tree-ssa-address.h: Likewise.
+ * tree-ssa-operands.h: Likewise.
+ * function.h: Likewise.
+ * config/frv/frv-protos.h: Likewise.
+ * targhooks.h: Likewise.
+ * basic_block.h: Likewise.
+ * rtl.def: Adapt documentation.
+ * doc/tm.texi: Likewise.
+ * ipa-cp.c: Adapt uses.
+ * bitmap.c: Likewise.
+ * dwarf2out.c: Likewise.
+ * target.def: Likewise.
+ * ipa-inline-analysis.c: Likewise.
+ * dwarf2cfi.c: Likewise.
+ * tree-ssa-loop-ivopts.c: Likewise.
+ * lto-cgraph.c: Likewise.
+ * config/frv/frv.c: Likewise.
+ * ifcvt.c: Likewise.
+ * ipa-prop.c: Likewise.
+
+2013-12-10 Kai Tietz <ktietz@redhat.com>
+
+ PR target/56807
+ * config/i386/i386.c (ix86_expand_prologue): Address saved
+ registers stack-relative, not via frame-pointer.
+
+2013-12-10 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/38474
+ * tree-ssa-structalias.c (solution_set_expand): Expand into
+ a different possibly cached bitmap and return the result.
+ (set_union_with_increment): Pass in a shared expanded bitmap
+ and adjust.
+ (do_sd_constraint): Likewise.
+ (do_ds_constraint): Likewise.
+ (do_complex_constraint): Likewise.
+ (solve_graph): Manage the shared expanded bitmap.
+
+2013-12-10 Jakub Jelinek <jakub@redhat.com>
+
+ * tree-vectorizer.h (struct _loop_vec_info): Add scalar_loop field.
+ (LOOP_VINFO_SCALAR_LOOP): Define.
+ (slpeel_tree_duplicate_loop_to_edge_cfg): Add scalar_loop argument.
+ * config/i386/sse.md (maskload<mode>, maskstore<mode>): New expanders.
+ * tree-data-ref.c (get_references_in_stmt): Handle MASK_LOAD and
+ MASK_STORE.
+ * internal-fn.def (LOOP_VECTORIZED, MASK_LOAD, MASK_STORE): New
+ internal fns.
+ * tree-if-conv.c: Include expr.h, optabs.h, tree-ssa-loop-ivopts.h and
+ tree-ssa-address.h.
+ (release_bb_predicate): New function.
+ (free_bb_predicate): Use it.
+ (reset_bb_predicate): Likewise. Don't unallocate bb->aux
+ just to immediately allocate it again.
+ (add_to_predicate_list): Add loop argument. If basic blocks that
+ dominate loop->latch don't insert any predicate.
+ (add_to_dst_predicate_list): Adjust caller.
+ (if_convertible_phi_p): Add any_mask_load_store argument, if true,
+ handle it like flag_tree_loop_if_convert_stores.
+ (insert_gimplified_predicates): Likewise.
+ (ifcvt_can_use_mask_load_store): New function.
+ (if_convertible_gimple_assign_stmt_p): Add any_mask_load_store
+ argument, check if some conditional loads or stores can't be
+ converted into MASK_LOAD or MASK_STORE.
+ (if_convertible_stmt_p): Add any_mask_load_store argument,
+ pass it down to if_convertible_gimple_assign_stmt_p.
+ (predicate_bbs): Don't return bool, only check if the last stmt
+ of a basic block is GIMPLE_COND and handle that. Adjust
+ add_to_predicate_list caller.
+ (if_convertible_loop_p_1): Only call predicate_bbs if
+ flag_tree_loop_if_convert_stores and free_bb_predicate in that case
+ afterwards, check gimple_code of stmts here. Replace is_predicated
+ check with dominance check. Add any_mask_load_store argument,
+ pass it down to if_convertible_stmt_p and if_convertible_phi_p,
+ call if_convertible_phi_p only after all if_convertible_stmt_p
+ calls.
+ (if_convertible_loop_p): Add any_mask_load_store argument,
+ pass it down to if_convertible_loop_p_1.
+ (predicate_mem_writes): Emit MASK_LOAD and/or MASK_STORE calls.
+ (combine_blocks): Add any_mask_load_store argument, pass
+ it down to insert_gimplified_predicates and call predicate_mem_writes
+ if it is set. Call predicate_bbs.
+ (version_loop_for_if_conversion): New function.
+ (tree_if_conversion): Adjust if_convertible_loop_p and combine_blocks
+ calls. Return todo flags instead of bool, call
+ version_loop_for_if_conversion if if-conversion should be just
+ for the vectorized loops and nothing else.
+ (main_tree_if_conversion): Adjust caller. Don't call
+ tree_if_conversion for dont_vectorize loops if if-conversion
+ isn't explicitly enabled.
+ * tree-vect-data-refs.c (vect_check_gather): Handle
+ MASK_LOAD/MASK_STORE.
+ (vect_analyze_data_refs, vect_supportable_dr_alignment): Likewise.
+ * gimple.h (gimple_expr_type): Handle MASK_STORE.
+ * internal-fn.c (expand_LOOP_VECTORIZED, expand_MASK_LOAD,
+ expand_MASK_STORE): New functions.
+ * tree-vectorizer.c: Include tree-cfg.h and gimple-fold.h.
+ (vect_loop_vectorized_call, fold_loop_vectorized_call): New functions.
+ (vectorize_loops): Don't try to vectorize loops with
+ loop->dont_vectorize set. Set LOOP_VINFO_SCALAR_LOOP for if-converted
+ loops, fold LOOP_VECTORIZED internal call depending on if loop
+ has been vectorized or not.
+ * tree-vect-loop-manip.c (slpeel_duplicate_current_defs_from_edges):
+ New function.
+ (slpeel_tree_duplicate_loop_to_edge_cfg): Add scalar_loop argument.
+ If non-NULL, copy basic blocks from scalar_loop instead of loop, but
+ still to loop's entry or exit edge.
+ (slpeel_tree_peel_loop_to_edge): Add scalar_loop argument, pass it
+ down to slpeel_tree_duplicate_loop_to_edge_cfg.
+ (vect_do_peeling_for_loop_bound, vect_do_peeling_for_loop_alignment):
+ Adjust callers.
+ (vect_loop_versioning): If LOOP_VINFO_SCALAR_LOOP, perform loop
+ versioning from that loop instead of LOOP_VINFO_LOOP, move it to the
+ right place in the CFG afterwards.
+ * tree-vect-loop.c (vect_determine_vectorization_factor): Handle
+ MASK_STORE.
+ * cfgloop.h (struct loop): Add dont_vectorize field.
+ * tree-loop-distribution.c (copy_loop_before): Adjust
+ slpeel_tree_duplicate_loop_to_edge_cfg caller.
+ * optabs.def (maskload_optab, maskstore_optab): New optabs.
+ * passes.def: Add a note that pass_vectorize must immediately follow
+ pass_if_conversion.
+ * tree-predcom.c (split_data_refs_to_components): Give up if
+ DR_STMT is a call.
+ * tree-vect-stmts.c (vect_mark_relevant): Don't crash if lhs
+ is NULL.
+ (exist_non_indexing_operands_for_use_p): Handle MASK_LOAD
+ and MASK_STORE.
+ (vectorizable_mask_load_store): New function.
+ (vectorizable_call): Call it for MASK_LOAD or MASK_STORE.
+ (vect_transform_stmt): Handle MASK_STORE.
+ * tree-ssa-phiopt.c (cond_if_else_store_replacement): Ignore
+ DR_STMT where lhs is NULL.
+ * optabs.h (can_vec_perm_p): Fix up comment typo.
+ (can_vec_mask_load_store_p): New prototype.
+ * optabs.c (can_vec_mask_load_store_p): New function.
+
+2013-12-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * expr.c (expand_expr_real_1) <normal_inner_ref>: Always return 0 for
+ the extraction of a bit-field of null size.
+
+2013-12-10 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59437
+ * vtable-verify.c (var_is_used_for_virtual_call_p): Check the
+ return value of gimple_call_fn. Use is_gimple_call/is_gimple_assign
+ instead of gimple_code.
+
+2013-12-10 Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * config.gcc (mips*-mti-linux*, mips64*-*-linux*):
+ Add android definitions.
+ (s390x-*-linux*): Use linux-protos.h.
+
+2013-12-10 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/41488
+ * tree-ssa-loop-ivopts.c (add_old_iv_candidates): Don't add cand
+ for PEELED_CHREC kind IV.
+ * tree-scalar-evolution.c: Include necessary header files.
+ (peeled_chrec_map, simplify_peeled_chrec): New.
+ (analyze_evolution_in_loop): New static variable.
+ Call simplify_peeled_chrec.
+ (scev_initialize): Initialize peeled_chrec_map.
+ (scev_reset, scev_finalize): Reset and release peeled_chrec_map.
+
+2013-12-09 Andrew Pinski <apinski@cavium.com>
+
+ * config/aarch64/t-aarch64 (MULTILIB_OPTIONS): Fix definition so
+ that options are conflicting ones.
+
+2013-12-09 Eric Botcazou <ebotcazou@adacore.com>
+
+ * optabs.c (gen_int_libfunc): Do not compare modes directly.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (FOR_ALL_BB): Eliminate macro.
+
+ * cfg.c (alloc_aux_for_blocks, clear_aux_for_blocks): Replace
+ uses of FOR_ALL_BB with FOR_ALL_BB_FN, making uses of cfun explicit.
+
+ * cfganal.c (inverted_post_order_compute): Likewise.
+ * cfgcleanup.c (try_optimize_cfg): Likewise.
+ * cfgexpand.c (add_scope_conflicts): Likewise.
+ * cfghooks.c (dump_flow_info, account_profile_record): Likewise.
+ * cfgrtl.c (relink_block_chain): Likewise.
+ * dce.c (mark_artificial_uses): Likewise.
+ * df-core.c (df_set_blocks, df_compute_cfg_image, df_dump): Likewise.
+ * df-problems.c (df_lr_verify_solution_start,
+ df_lr_verify_solution_end, df_lr_verify_transfer_functions,
+ df_live_verify_solution_start, df_live_verify_solution_end,
+ df_live_set_all_dirty, df_live_verify_transfer_functions,
+ df_md_local_comput): Likewise.
+ * df-scan.c (df_scan_free_internal, df_scan_alloc)
+ df_reorganize_refs_by_insn, df_scan_verify): Likewise.
+ * dominance.c (compute_dom_fast_query, calculate_dominance_info,
+ free_dominance_info): Likewise.
+ * dse.c (dse_step1, dse_step3, dse_step4, dse_step6): Likewise.
+ * graph.c (draw_cfg_edges): Likewise.
+ * graphite-scop-detection.c (print_graphite_scop_statistics,
+ dot_all_scops_1): Likewise.
+ * graphite.c (print_global_statistics,
+ print_graphite_scop_statistics): Likewise.
+ * ira.c (do_reload): Likewise.
+ * loop-init.c (loop_optimizer_finalize): Likewise.
+ * lto-streamer-in.c (input_function): Likewise.
+ * lto-streamer-out.c (output_function): Likewise.
+ * mcf.c (adjust_cfg_counts): Likewise.
+ * predict.c (estimate_loops): Likewise.
+ * sched-rgn.c (haifa_find_rgns): Likewise.
+ * tree-cfg.c (split_critical_edges): Likewise.
+ * tree-dfa.c (renumber_gimple_stmt_uids): Likewise.
+ * tree-loop-distribution.c (tree_loop_distribution): Likewise.
+ * tree-ssa-pre.c (compute_antic, insert, init_pre): Likewise.
+ * tree-ssa-propagate.c (ssa_prop_init): Likewise.
+ * var-tracking.c (vt_initialize, vt_finalize): Likewise.
+ * vtable-verify.c (vtable_verify_main): Likewise.
+ * web.c (web_main): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (FOR_EACH_BB_REVERSE): Eliminate macro.
+
+ * cfghooks.c (verify_flow_info): Replace uses of FOR_EACH_BB_REVERSE
+ with FOR_EACH_BB_REVERSE_FN, making uses of cfun explicit.
+ * cfgrtl.c (print_rtl_with_bb, rtl_verify_edges,
+ rtl_verify_bb_insns, rtl_verify_bb_pointers,
+ rtl_verify_bb_insn_chain, rtl_verify_fallthru): Likewise.
+ * config/ia64/ia64.c (emit_predicate_relation_info): Likewise.
+ * config/sh/sh.c (sh_md_init_global): Likewise.
+ * config/sh/sh_optimize_sett_clrt.cc
+ (sh_optimize_sett_clrt::execute): Likewise.
+ * dce.c (reset_unmarked_insns_debug_uses, delete_unmarked_insns):
+ Likewise.
+ * dominance.c (calc_dfs_tree): Likewise.
+ * final.c (final): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns): Likewise.
+ * gcse.c (compute_code_hoist_vbeinout): Likewise.
+ * ira.c (update_equiv_regs, build_insn_chain): Likewise.
+ * lcm.c (compute_antinout_edge): Likewise.
+ * mode-switching.c (optimize_mode_switching): Likewise.
+ * postreload.c (reload_combine): Likewise.
+ * recog.c (split_all_insns, peephole2_optimize): Likewise.
+ * tree-ssa-live.c (live_worklist): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (FOR_EACH_BB): Eliminate macro.
+
+ * asan.c (transform_statements, execute_sanopt): Eliminate
+ use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
+ explicit.
+ * auto-inc-dec.c (rest_of_handle_auto_inc_dec): Likewise.
+ * bb-reorder.c (find_rarely_executed_basic_blocks_and_crossing_edges,
+ set_edge_can_fallthru_flag, fix_up_fall_thru_edges,
+ fix_crossing_unconditional_branches, add_reg_crossing_jump_notes,
+ insert_section_boundary_note, rest_of_handle_reorder_blocks,
+ duplicate_computed_gotos): Likewise.
+ * cfg.c (clear_edges, compact_blocks, brief_dump_cfg): Likewise.
+ * cfganal.c (find_unreachable_blocks, add_noreturn_fake_exit_edges,
+ compute_dominance_frontiers_1, single_pred_before_succ_order): Likewise.
+ * cfgbuild.c (find_many_sub_basic_blocks): Likewise.
+ * cfgcleanup.c (try_optimize_cfg, delete_dead_jumptables): Likewise.
+ * cfgexpand.c (add_scope_conflicts, discover_nonconstant_array_refs):
+ Likewise.
+ * cfgloop.c (flow_loops_cfg_dump, get_loop_body, record_loop_exits,
+ verify_loop_structure): Likewise.
+ * cfgloopanal.c (mark_loop_exit_edges): Likewise.
+ * cfgrtl.c (compute_bb_for_insn, find_partition_fixes,
+ verify_hot_cold_block_grouping, purge_all_dead_edges,
+ fixup_abnormal_edges, record_effective_endpoints,
+ outof_cfg_layout_mode, fixup_reorder_chain, force_one_exit_fallthru,
+ break_superblocks): Likewise.
+ * cgraphbuild.c (build_cgraph_edges, rebuild_cgraph_edges,
+ cgraph_rebuild_references): Likewise.
+ * combine-stack-adj.c (combine_stack_adjustments): Likewise.
+ * combine.c (delete_noop_moves, create_log_links,
+ combine_instructions): Likewise.
+ * config/arm/arm.c (thumb1_reorg, thumb2_reorg): Likewise.
+ * config/bfin/bfin.c (bfin_gen_bundles, reorder_var_tracking_notes):
+ Likewise.
+ * config/c6x/c6x.c (c6x_gen_bundles, conditionalize_after_sched,
+ c6x_reorg): Likewise.
+ * config/epiphany/resolve-sw-modes.c (resolve_sw_modes): Likewise.
+ * config/frv/frv.c (frv_optimize_membar): Likewise.
+ * config/i386/i386.c (ix86_finalize_stack_realign_flags): Likewise.
+ * config/ia64/ia64.c (ia64_reorg): Likewise.
+ * config/mips/mips.c (mips_annotate_pic_calls): Likewise.
+ * config/picochip/picochip.c (reorder_var_tracking_notes): Likewise.
+ * config/rs6000/rs6000.c (rs6000_alloc_sdmode_stack_slot): Likewise.
+ * config/s390/s390.c (s390_regs_ever_clobbered): Likewise.
+ * config/sh/sh_treg_combine.cc (sh_treg_combine::execute): Likewise.
+ * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+ * config/tilegx/tilegx.c (tilegx_gen_bundles,
+ reorder_var_tracking_notes): Likewise.
+ * config/tilepro/tilepro.c (tilepro_gen_bundles,
+ reorder_var_tracking_notes): Likewise.
+ * coverage.c (coverage_compute_cfg_checksum): Likewise.
+ * cprop.c (compute_hash_table_work, compute_cprop_data,
+ local_cprop_pass, find_implicit_sets): Likewise.
+ * cse.c (cse_condition_code_reg): Likewise.
+ * dce.c (prescan_insns_for_dce): Likewise.
+ * df-core.c (df_compact_blocks): Likewise.
+ * df-problems.c (df_word_lr_alloc): Likewise.
+ * df-scan.c (df_scan_start_dump, df_scan_blocks, df_insn_rescan_all,
+ df_update_entry_exit_and_calls): Likewise.
+ * dominance.c (calculate_dominance_info, verify_dominators,
+ debug_dominance_info): Likewise.
+ * dse.c (dse_step5_nospill): Likewise.
+ * except.c (finish_eh_generation): Likewise.
+ * final.c (compute_alignments): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns,
+ rest_of_match_asm_constraints): Likewise.
+ * gcse.c (compute_hash_table_work, prune_expressions,
+ compute_pre_data, compute_code_hoist_vbeinout, hoist_code,
+ calculate_bb_reg_pressure, compute_ld_motion_mems): Likewise.
+ * gimple-iterator.c (gsi_commit_edge_inserts): Likewise.
+ * gimple-ssa-isolate-paths.c (find_implicit_erroneous_behaviour,
+ find_explicit_erroneous_behaviour): Likewise.
+ * graphite-sese-to-poly.c (rewrite_reductions_out_of_ssa,
+ rewrite_cross_bb_scalar_deps_out_of_ssa): Likewise.
+ * haifa-sched.c (haifa_sched_init): Likewise.
+ * hw-doloop.c (discover_loops, set_bb_indices, reorder_loops):
+ Likewise.
+ * ifcvt.c (if_convert): Likewise.
+ * init-regs.c (initialize_uninitialized_regs): Likewise.
+ * ipa-prop.c (ipcp_transform_function): Likewise.
+ * ipa-pure-const.c (analyze_function): Likewise.
+ * ipa-split.c (find_split_points, execute_split_functions): Likewise.
+ * ira-build.c (form_loop_tree): Likewise.
+ * ira-costs.c (find_costs_and_classes): Likewise.
+ * ira-emit.c (emit_moves, add_ranges_and_copies, ira_emit): Likewise.
+ * ira.c (decrease_live_ranges_number, compute_regs_asm_clobbered,
+ mark_elimination, update_equiv_regs, find_moveable_pseudos,
+ split_live_ranges_for_shrink_wrap, allocate_initial_values): Likewise.
+ * jump.c (mark_all_labels): Likewise.
+ * lcm.c (compute_laterin, compute_insert_delete, compute_available,
+ compute_nearerout, compute_rev_insert_delete): Likewise.
+ * loop-init.c (fix_loop_structure): Likewise.
+ * loop-invariant.c (calculate_loop_reg_pressure): Likewise.
+ * lower-subreg.c (decompose_multiword_subregs,
+ decompose_multiword_subregs): Likewise.
+ * lra-assigns.c (assign_by_spills): Likewise.
+ * lra-coalesce.c (lra_coalesce): Likewise.
+ * lra-constraints.c (lra_inheritance, remove_inheritance_pseudos):
+ Likewise.
+ * lra-eliminations.c (lra_init_elimination): Likewise.
+ * lra-spills.c (assign_spill_hard_regs, spill_pseudos,
+ lra_final_code_change): Likewise.
+ * lra.c (remove_scratches, check_rtl, has_nonexceptional_receiver,
+ update_inc_notes): Likewise.
+ * mcf.c (adjust_cfg_counts): Likewise.
+ * mode-switching.c (optimize_mode_switching): Likewise.
+ * modulo-sched.c (rest_of_handle_sms): Likewise.
+ * omp-low.c (optimize_omp_library_calls, expand_omp_taskreg,
+ expand_omp_target): Likewise.
+ * postreload-gcse.c (alloc_mem, compute_hash_table): Likewise.
+ * postreload.c (reload_cse_regs_1): Likewise.
+ * predict.c (strip_predict_hints, tree_bb_level_predictions,
+ tree_estimate_probability, expensive_function_p,
+ estimate_bb_frequencies, compute_function_frequency): Likewise.
+ * profile.c (is_inconsistent, compute_branch_probabilities,
+ branch_prob): Likewise.
+ * ree.c (find_removable_extensions): Likewise.
+ * reg-stack.c (compensate_edges, convert_regs, reg_to_stack): Likewise.
+ * regcprop.c (copyprop_hardreg_forward): Likewise.
+ * reginfo.c (init_subregs_of_mode): Likewise.
+ * regrename.c (regrename_analyze): Likewise.
+ * regstat.c (regstat_compute_ri, regstat_compute_calls_crossed):
+ Likewise.
+ * reload1.c (has_nonexceptional_receiver, reload,
+ calculate_elim_costs_all_insns): Likewise.
+ * resource.c (init_resource_info, free_resource_info): Likewise.
+ * sched-ebb.c (schedule_ebbs): Likewise.
+ * sched-rgn.c (is_cfg_nonregular, find_single_block_region,
+ haifa_find_rgns, sched_rgn_local_init): Likewise.
+ * sel-sched-dump.c (sel_dump_cfg_2): Likewise.
+ * sel-sched-ir.c (init_lv_sets, free_lv_sets,
+ make_regions_from_the_rest): Likewise.
+ * sese.c (build_sese_loop_nests, sese_build_liveouts): Likewise.
+ * stack-ptr-mod.c (notice_stack_pointer_modification): Likewise.
+ * store-motion.c (compute_store_table, build_store_vectors,
+ one_store_motion_pass): Likewise.
+ * tracer.c (tail_duplicate): Likewise.
+ * trans-mem.c (compute_transaction_bits): Likewise.
+ * tree-call-cdce.c (tree_call_cdce): Likewise.
+ * tree-cfg.c (replace_loop_annotate, factor_computed_gotos,
+ fold_cond_expr_cond, make_edges, assign_discriminators,
+ make_abnormal_goto_edges, cleanup_dead_labels, group_case_labels,
+ dump_cfg_stats, gimple_verify_flow_info, print_loop,
+ execute_fixup_cfg): Likewise.
+ * tree-cfgcleanup.c (cleanup_tree_cfg_1, merge_phi_nodes): Likewise.
+ * tree-complex.c (init_dont_simulate_again, tree_lower_complex):
+ Likewise.
+ * tree-dfa.c (collect_dfa_stats, dump_enumerated_decls): Likewise.
+ * tree-eh.c (execute_lower_resx, execute_lower_eh_dispatch,
+ mark_reachable_handlers): Likewise.
+ * tree-emutls.c (lower_emutls_function_body): Likewise.
+ * tree-if-conv.c (main_tree_if_conversion): Likewise.
+ * tree-inline.c (optimize_inline_calls): Likewise.
+ * tree-into-ssa.c (rewrite_into_ssa, update_ssa): Likewise.
+ * tree-nrv.c (tree_nrv, execute_return_slot_opt): Likewise.
+ * tree-object-size.c (compute_object_sizes): Likewise.
+ * tree-outof-ssa.c (eliminate_useless_phis, rewrite_trees,
+ insert_backedge_copies, tree_profiling): Likewise.
+ * tree-scalar-evolution.c (scev_const_prop): Likewise.
+ * tree-sra.c (scan_function, sra_modify_function_body,
+ propagate_dereference_distances, ipa_sra_modify_function_body,
+ convert_callers): Likewise.
+ * tree-ssa-ccp.c (ccp_initialize, execute_fold_all_builtins): Likewise.
+ * tree-ssa-coalesce.c (build_ssa_conflict_graph): Likewise.
+ create_outofssa_var_map, coalesce_partitions): Likewise.
+ * tree-ssa-copy.c (init_copy_prop): Likewise.
+ * tree-ssa-copyrename.c (rename_ssa_copies): Likewise.
+ * tree-ssa-dce.c (find_obviously_necessary_stmts,
+ eliminate_unnecessary_stmts): Likewise.
+ * tree-ssa-dom.c (free_all_edge_infos, tree_ssa_dominator_optimize):
+ Likewise.
+ * tree-ssa-forwprop.c (ssa_forward_propagate_and_combine): Likewise.
+ * tree-ssa-live.c (clear_unused_block_pointer, remove_unused_locals,
+ new_tree_live_info, calculate_live_on_exit, dump_live_info,
+ analyze_memory_references, fill_always_executed_in,
+ tree_ssa_lim_finalize): Likewise.
+ * tree-ssa-loop-manip.c (find_uses_to_rename, verify_loop_closed_ssa):
+ Likewise.
+ * tree-ssa-math-opts.c (execute_cse_reciprocals, execute_cse_sincos,
+ execute_optimize_bswap, execute_optimize_widening_mul): Likewise.
+ * tree-ssa-propagate.c (substitute_and_fold): Likewise.
+ * tree-ssa-structalias.c (compute_points_to_sets): Likewise.
+ * tree-ssa-tail-merge.c (find_same_succ, reset_cluster_vectors):
+ Likewise.
+ * tree-ssa-ter.c (find_replaceable_exprs): Likewise.
+ * tree-ssa-threadupdate.c (thread_through_all_blocks): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges,
+ tree_ssa_uncprop): Likewise.
+ * tree-ssa-uninit.c (warn_uninitialized_vars,
+ execute_late_warn_uninitialized): Likewise.
+ * tree-ssa.c (verify_ssa, execute_update_addresses_taken): Likewise.
+ * tree-stdarg.c (check_all_va_list_escapes, execute_optimize_stdarg):
+ Likewise.
+ * tree-switch-conversion.c (do_switchconv): Likewise.
+ * tree-vect-generic.c (expand_vector_operations): Likewise.
+ * tree-vectorizer.c (adjust_simduid_builtins, note_simd_array_uses,
+ execute_vect_slp): Likewise.
+ * tree-vrp.c (check_all_array_refs, remove_range_assertions,
+ vrp_initialize, identify_jump_threads, instrument_memory_accesses):
+ Likewise.
+ * ubsan.c (ubsan_pass): Likewise.
+ * value-prof.c (verify_histograms, gimple_value_profile_transformations,
+ gimple_find_values_to_profile): Likewise.
+ * var-tracking.c (vt_find_locations, dump_dataflow_sets, vt_emit_notes,
+ vt_initialize, delete_debug_insns, vt_finalize): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (last_basic_block): Eliminate macro.
+
+ * asan.c (transform_statements): Eliminate use of last_basic_block
+ in favor of last_basic_block_for_fn, in order to make use of cfun
+ explicit.
+ * bb-reorder.c (copy_bb, reorder_basic_blocks): Likewise.
+ * bt-load.c (compute_defs_uses_and_gen, compute_kill, compute_out,
+ link_btr_uses, build_btr_def_use_webs, migrate_btr_defs): Likewise.
+ * cfg.c (compact_blocks): Likewise.
+ * cfganal.c (mark_dfs_back_edges,
+ control_dependences::control_dependences, post_order_compute,
+ pre_and_rev_post_order_compute_fn, dfs_enumerate_from, compute_idf,
+ single_pred_before_succ_order): Likewise.
+ * cfgbuild.c (make_edges): Likewise.
+ * cfgexpand.c (add_scope_conflicts, gimple_expand_cfg): Likewise.
+ * cfghooks.c (verify_flow_info): Likewise.
+ * cfgloop.c (verify_loop_structure): Likewise.
+ * cfgloopanal.c (just_once_each_iteration_p,
+ mark_irreducible_loops): Likewise.
+ * cfgloopmanip.c (fix_bb_placements, remove_path,
+ update_dominators_in_loop): Likewise.
+ * cfgrtl.c (create_basic_block_structure, rtl_create_basic_block,
+ break_superblocks, rtl_flow_call_edges_add): Likewise.
+ * config/epiphany/resolve-sw-modes.c (resolve_sw_modes): Likewise.
+ * config/frv/frv.c (frv_optimize_membar): Likewise.
+ * config/mips/mips.c (r10k_insert_cache_barriers): Likewise.
+ * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+ * cprop.c (compute_local_properties, find_implicit_sets,
+ bypass_conditional_jumps, one_cprop_pass): Likewise.
+ * cse.c (cse_main): Likewise.
+ * df-core.c (rest_of_handle_df_initialize, df_worklist_dataflow,
+ df_analyze, df_grow_bb_info, df_compact_blocks): Likewise.
+ * df-problems.c (df_lr_verify_solution_start,
+ df_live_verify_solution_start, df_md_local_compute): Likewise.
+ * dominance.c (init_dom_info, calc_dfs_tree_nonrec, calc_dfs_tree,
+ calc_idoms): Likewise.
+ * domwalk.c (dom_walker::walk): Likewise.
+ * dse.c (dse_step0, dse_step3): Likewise.
+ * function.c (epilogue_done): Likewise.
+ * gcse.c (alloc_gcse_mem, compute_local_properties,
+ prune_insertions_deletions, compute_pre_data,
+ pre_expr_reaches_here_p, one_pre_gcse_pass,
+ compute_code_hoist_vbeinout, should_hoist_expr_to_dom, hoist_code,
+ one_code_hoisting_pass): Likewise.
+ * graph.c (draw_cfg_nodes_no_loops): Likewise.
+ * graphite-sese-to-poly.c (build_scop_bbs): Likewise.
+ * haifa-sched.c (unlink_bb_notes): Likewise.
+ * ipa-split.c (execute_split_functions): Likewise.
+ * ira-build.c (create_loop_tree_nodes,
+ remove_unnecessary_regions): Likewise.
+ * ira-emit.c (ira_emit): Likewise.
+ * ira.c (find_moveable_pseudos, ira): Likewise.
+ * lcm.c (compute_antinout_edge, compute_laterin,
+ compute_insert_delete, pre_edge_lcm, compute_available,
+ compute_nearerout, compute_rev_insert_delete,
+ pre_edge_rev_lcm): Likewise.
+ * loop-unroll.c (opt_info_start_duplication,
+ apply_opt_in_copies): Likewise.
+ * lower-subreg.c (decompose_multiword_subregs): Likewise.
+ * lra-lives.c (lra_create_live_ranges): Likewise.
+ * lra.c (lra): Likewise.
+ * mode-switching.c (optimize_mode_switching): Likewise.
+ * recog.c (split_all_insns): Likewise.
+ * regcprop.c (copyprop_hardreg_forward): Likewise.
+ * regrename.c (regrename_analyze): Likewise.
+ * reload1.c (reload): Likewise.
+ * resource.c (init_resource_info): Likewise.
+ * sched-rgn.c (haifa_find_rgns, extend_rgns, compute_trg_info,
+ realloc_bb_state_array, schedule_region, extend_regions): Likewise.
+ * sel-sched-ir.c (sel_extend_global_bb_info, extend_region_bb_info,
+ recompute_rev_top_order, sel_init_pipelining,
+ make_regions_from_the_rest): Likewise.
+ * store-motion.c (remove_reachable_equiv_notes,build_store_vectors)
+ Likewise.
+ * tracer.c (tail_duplicate): Likewise.
+ * trans-mem.c (tm_region_init, get_bb_regions_instrumented): Likewise.
+ * tree-cfg.c (create_bb, cleanup_dead_labels, gimple_dump_cfg,
+ gimple_flow_call_edges_add): Likewise.
+ * tree-cfgcleanup.c (split_bbs_on_noreturn_calls,
+ cleanup_tree_cfg_1): Likewise.
+ * tree-complex.c (tree_lower_complex): Likewise.
+ * tree-inline.c (copy_cfg_body): Likewise.
+ * tree-into-ssa.c (mark_phi_for_rewrite, rewrite_into_ssa,
+ prepare_def_site_for, update_ssa): Likewise.
+ * tree-ssa-dce.c (tree_dce_init, perform_tree_ssa_dce): Likewise.
+ * tree-ssa-dom.c (record_edge_info): Likewise.
+ * tree-ssa-live.c (new_tree_live_info, live_worklist): Likewise.
+ * tree-ssa-loop-im.c (fill_always_executed_in_1): Likewise.
+ * tree-ssa-loop-manip.c (copy_phi_node_args
+ gimple_duplicate_loop_to_header_edge): Likewise.
+ * tree-ssa-pre.c (compute_antic): Likewise.
+ * tree-ssa-propagate.c (ssa_prop_init): Likewise.
+ * tree-ssa-reassoc.c (init_reassoc): Likewise.
+ * tree-ssa-sccvn.c (init_scc_vn): Likewise.
+ * tree-ssa-tail-merge.c (init_worklist): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
+ * tree-stdarg.c (reachable_at_most_once): Likewise.
+ * tree-vrp.c (find_assert_locations): Likewise.
+ * var-tracking.c (vt_find_locations): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (profile_status): Eliminate macro.
+
+ * cfgbuild.c (find_many_sub_basic_blocks): Eliminate use of
+ profile_status macro in favor of profile_status_for_fn, making
+ use of cfun explicit.
+ * cfghooks.c (account_profile_record): Likewise.
+ * cfgloopanal.c (single_likely_exit):
+ * cfgrtl.c (rtl_verify_edges, rtl_account_profile_record): Likewise.
+ * graphite.c (graphite_finalize):
+ * internal-fn.c (ubsan_expand_si_overflow_addsub_check,
+ ubsan_expand_si_overflow_neg_check,
+ ubsan_expand_si_overflow_mul_check): Likewise.
+ * ipa-split.c (consider_split, execute_split_functions):
+ * loop-unroll.c (decide_peel_simple):
+ * optabs.c (emit_cmp_and_jump_insn_1):
+ * predict.c (maybe_hot_edge_p, probably_never_executed,
+ predictable_edge_p, probability_reliable_p, gimple_predict_edge,
+ tree_estimate_probability_driver, estimate_bb_frequencies,
+ compute_function_frequency, rebuild_frequencies): Likewise.
+ * profile.c (compute_branch_probabilities): Likewise.
+ * tree-cfg.c (gimple_account_profile_record): Likewise.
+ * tree-inline.c (optimize_inline_calls): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (label_to_block_map): Eliminate macro.
+
+ * gimple.c (gimple_set_bb): Replace uses of label_to_block_map
+ with uses of label_to_block_map_for_fn, making uses of cfun be
+ explicit.
+ * tree-cfg.c (delete_tree_cfg_annotations): Likewise.
+ (verify_gimple_label): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (basic_block_info): Eliminate macro.
+
+ * cfgrtl.c (rtl_create_basic_block): Replace uses of
+ basic_block_info with basic_block_info_for_fn, making uses
+ of cfun be explicit.
+ * tree-cfg.c (build_gimple_cfg, create_bb): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (BASIC_BLOCK): Eliminate macro.
+
+ * alias.c (init_alias_analysis): Eliminate BASIC_BLOCK macro in
+ favor of uses of BASIC_BLOCK_FOR_FN, making uses of cfun explicit.
+ * bt-load.c (compute_defs_uses_and_gen, compute_out, link_btr_uses,
+ block_at_edge_of_live_range_p, migrate_btr_defs): Likewise.
+ * caller-save.c (insert_one_insn): Likewise.
+ * cfg.c (debug_bb, get_bb_original, get_bb_copy): Likewise.
+ * cfgexpand.c (add_scope_conflicts): Likewise.
+ * cfghooks.c (verify_flow_info): Likewise.
+ * cfgloop.c (flow_loops_find): Likewise.
+ * cfgrtl.c (rtl_flow_call_edges_add): Likewise.
+ * config/mips/mips.c (r10k_insert_cache_barriers): Likewise.
+ * config/s390/s390.c (s390_optimize_nonescaping_tx): Likewise.
+ * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+ * cse.c (cse_main): Likewise.
+ * dce.c (fast_dce): Likewise.
+ * df-core.c (df_set_blocks, df_worklist_propagate_forward,
+ df_worklist_propagate_backward, df_worklist_dataflow_doublequeue,
+ df_bb_replace, df_dump_region): Likewise.
+ * df-problems.c (df_rd_bb_local_compute, df_lr_bb_local_compute,
+ df_live_bb_local_compute, df_chain_remove_problem)
+ df_chain_create_bb, df_word_lr_bb_local_compute, df_note_bb_compute,
+ df_md_bb_local_compute, df_md_local_compute,
+ df_md_transfer_function): Likewise.
+ * df-scan.c (df_scan_blocks, df_reorganize_refs_by_reg_by_insn,
+ df_reorganize_refs_by_insn, df_bb_refs_collect,
+ df_record_entry_block_defs, df_update_entry_block_defs,
+ df_record_exit_block_uses): Likewise.
+ * dominance.c (nearest_common_dominator_for_set): Likewise.
+ * gcse.c (hoist_code): Likewise.
+ * graph.c (draw_cfg_nodes_no_loops): Likewise.
+ * ipa-inline-analysis.c (param_change_prob,
+ estimate_function_body_sizes): Likewise.
+ * ipa-split.c (dominated_by_forbidden): Likewise.
+ * loop-unroll.c (apply_opt_in_copies): Likewise.
+ * lower-subreg.c (decompose_multiword_subregs): Likewise.
+ * lra-lives.c (lra_create_live_ranges): Likewise.
+ * predict.c (propagate_freq): Likewise.
+ * regrename.c (regrename_analyze): Likewise.
+ * regstat.c (regstat_bb_compute_ri,
+ regstat_bb_compute_calls_crossed): Likewise.
+ * resource.c (mark_target_live_regs): Likewise.
+ * sched-ebb.c (ebb_fix_recovery_cfg): Likewise.
+ * sched-int.h (EBB_FIRST_BB, EBB_LAST_BB): Likewise.
+ * sched-rgn.c (debug_region, dump_region_dot, too_large,
+ haifa_find_rgns, extend_rgns, compute_dom_prob_ps, update_live,
+ propagate_deps, sched_is_disabled_for_current_region_p): Likewise.
+ * sched-vis.c (debug_bb_n_slim): Likewise.
+ * sel-sched-ir.c (sel_finish_global_and_expr, verify_backedges,
+ purge_empty_blocks, sel_remove_loop_preheader): Likewise.
+ * sel-sched.c (remove_insns_that_need_bookkeeping)
+ (current_region_empty_p, sel_region_init,
+ simplify_changed_insns): Likewise.
+ * trans-mem.c (execute_tm_mark, execute_tm_edges,
+ tm_memopt_compute_antic, ipa_tm_scan_irr_function): Likewise.
+ * tree-cfg.c (make_edges, end_recording_case_labels,
+ label_to_block_fn, gimple_debug_bb, gimple_flow_call_edges_add,
+ remove_edge_and_dominated_blocks, remove_edge_and_dominated_blocks,
+ gimple_purge_all_dead_eh_edges,
+ gimple_purge_all_dead_abnormal_call_edges): Likewise.
+ * tree-cfgcleanup.c (fixup_noreturn_call,
+ split_bbs_on_noreturn_calls, cleanup_tree_cfg_1): Likewise.
+ * tree-inline.c (copy_cfg_body, fold_marked_statements): Likewise.
+ * tree-into-ssa.c (set_livein_block, prune_unused_phi_nodes,
+ insert_phi_nodes_for, insert_updated_phi_nodes_for): Likewise.
+ * tree-ssa-dom.c (tree_ssa_dominator_optimize): Likewise.
+ * tree-ssa-live.c (live_worklist): Likewise.
+ * tree-ssa-loop-manip.c (compute_live_loop_exits,
+ add_exit_phis_var, find_uses_to_rename, copy_phi_node_args): Likewise.
+ * tree-ssa-pre.c (compute_antic): Likewise.
+ * tree-ssa-reassoc.c (update_range_test, optimize_range_tests): Likewise.
+ * tree-ssa-sink.c (nearest_common_dominator_of_uses): Likewise.
+ * tree-ssa-tail-merge.c (same_succ_hash, same_succ_def::equal,
+ same_succ_flush_bbs, update_worklist, set_cluster,
+ same_phi_alternatives, find_clusters_1, apply_clusters,
+ update_debug_stmts): Likewise.
+ * tree-ssa-threadupdate.c (mark_threaded_blocks,
+ thread_through_all_blocks): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
+ * tree-vrp.c (find_assert_locations): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (SET_BASIC_BLOCK): Eliminate macro.
+
+ * cfg.c (compact_blocks): Replace uses of SET_BASIC_BLOCK
+ with SET_BASIC_BLOCK_FOR_FN, making use of cfun explicit.
+ (expunge_block): Likewise.
+ * cfgrtl.c (create_basic_block_structure): Likewise.
+ * df-core.c (df_compact_blocks, df_bb_replace): Likewise.
+ * sel-sched.c (create_block_for_bookkeeping): Likewise.
+ * tree-cfg.c (create_bb): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (profile_status_for_function): Rename to...
+ (profile_status_for_fn): ...this.
+
+ * cfg.c (check_bb_profile): Update for renaming.
+ * cgraphbuild.c (compute_call_stmt_bb_frequency): Likewise.
+ * lto-streamer-in.c (input_cfg): Likewise.
+ * lto-streamer-out.c (output_cfg): Likewise.
+ * predict.c (maybe_hot_frequency_p, maybe_hot_count_p,
+ maybe_hot_bb_p, probably_never_executed)
+ (handle_missing_profiles): Likewise.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ * tree-inline.c (copy_bb, initialize_cfun): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (label_to_block_map_for_function): Rename to...
+ (label_to_block_map_for_fn): ...this.
+
+ * lto-streamer-in.c (input_cfg): Update for renaming.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (last_basic_block_for_function): Rename to...
+ (last_basic_block_for_fn): ...this.
+
+ * ipa-utils.c (ipa_merge_profiles): Update for renaming of
+ last_basic_block_for_function to last_basic_block_for_fn.
+ * lto-streamer-in.c (input_cfg): Likewise.
+ * lto-streamer-out.c (output_cfg): Likewise.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ * tree-sra.c (propagate_dereference_distances, ipa_early_sra):
+ Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (basic_block_info_for_function): Rename to...
+ (basic_block_info_for_fn): ...this.
+ (BASIC_BLOCK_FOR_FUNCTION): Rename to...
+ (BASIC_BLOCK_FOR_FN): ...this.
+ (SET_BASIC_BLOCK_FOR_FUNCTION): Rename to...
+ (SET_BASIC_BLOCK_FOR_FN): ...this.
+
+ * gimple-streamer-in.c (input_phi, input_bb): Update for renaming
+ of BASIC_BLOCK_FOR_FUNCTION to BASIC_BLOCK_FOR_FN.
+ * ipa-utils.c (ipa_merge_profiles): Likewise.
+ * lto-streamer-in.c (make_new_block): Update for renaming of
+ SET_BASIC_BLOCK_FOR_FUNCTION to SET_BASIC_BLOCK_FOR_FN.
+ (input_cfg): Update for renamings.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ (dump_function_to_file): Update for renaming of
+ basic_block_info_for_function to basic_block_info_for_fn.
+
+2013-12-09 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/38474
+ * tree-ssa-structalias.c (set_union_with_increment): Remove
+ unreachable code.
+ (do_complex_constraint): Call set_union_with_increment with
+ the solution delta, not the full solution.
+ (make_transitive_closure_constraints): Merge the two
+ constraints.
+
+2013-12-09 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.c (mem_ok_for_ldrd_strd): Rename first argument as MEM. Do
+ more address validation checks.
+
+2013-12-09 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59415
+ * vtable-verify.c (verify_bb_vtables): Check the return value
+ of gimple_call_fn. Use is_gimple_call instead of gimple_code.
+
+2013-12-09 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/arm.md (generic_sched): Add cortexa12.
+ (generic_vfp): Likewise.
+ * config/arm/arm.c (cortexa12_extra_costs): New cost table.
+ (arm_cortex_a12_tune): New tuning struct.
+ * config/arm/arm-cores.def: Add cortex-a12.
+ * config/arm/arm-tables.opt: Regenerate.
+ * config/arm/arm-tune.md: Likewise.
+ * config/arm/bpabi.h: Add cortex-a12.
+ * doc/invoke.texi: Document -mcpu=cortex-a12.
+
+2013-12-09 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * doc/install.texi (Prerequisites): Explicitly mention C library
+ and its headers for multilib builds.
+
+2013-12-08 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/52898
+ PR target/51697
+ * common/config/sh/sh-common.c (sh_option_optimization_table): Remove
+ OPT_mcbranchdi entry.
+ * config/sh/sh.opt (mcbranchdi, mcmpeqdi): Mark as undocumented and
+ emit a warning.
+ * config/sh/sh.c (sh_option_override): Initialize TARGET_CBRANCHDI4
+ and TARGET_CMPEQDI_T variables.
+ * doc/invoke.texi (SH options): Undocument -mcbranchdi and -mcmpeqdi.
+
+2013-12-07 Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * config/linux.h: Fix typo in a comment.
+
+2013-12-07 Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * config.gcc (*linux*): Split libc selection from Android support.
+ Add libc selection to all *linux* targets. Add Android support to
+ architectures that support it.
+ (arm*-*-linux-*, i[34567]86-*-linux*, x86_64-*-linux*,)
+ (mips*-*-linux*): Add Android support.
+
+2013-12-07 Alexander Ivchenko <alexander.ivchenko@intel.com>
+ Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * config/bfin/uclinux.h, config/c6x/uclinux-elf.h,
+ * config/lm32/uclinux-elf.h, config/m68k/uclinux.h,
+ * config/moxie/uclinux.h (TARGET_LIBC_HAS_FUNCTION): Move definitions
+ to linux.h.
+ * config/linux-android.h (TARGET_HAS_IFUNC_P): Move definition
+ to linux.h.
+ * config/linux.h (TARGET_LIBC_HAS_FUNCTION, TARGET_HAS_IFUNC_P):
+ Define appropriately for Linux and uClinux targets.
+
+2013-12-07 Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * config/linux.c (linux_has_ifunc_p): Use correct test.
+
+2013-12-07 Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * config/linux.c (linux_android_has_ifunc_p): Rename to
+ linux_has_ifunc_p.
+ (linux_android_libc_has_function): Rename to linux_libc_has_function.
+ * config/linux-protos.h (linux_android_has_ifunc_p,)
+ (linux_android_libc_has_function): Update declarations.
+ * config/linux.h, config/linux-android.h, config/alpha/linux.h,
+ * config/rs6000/linux.h, config/rs6000/linux64.h: Update.
+
+2013-12-07 Maxim Kuvyrkov <maxim@kugelworks.com>
+
+ * linux-android.c: Rename to linux.c.
+ * t-linux-android: Rename to t-linux. Update references
+ to linux-android.c
+ * config.gcc: Update references to t-linux-android and linux-android.o.
+
+2013-12-07 Alan Modra <amodra@gmail.com>
+
+ * config/rs6000/rs6000.md (bswapdi2_32bit): Remove ?? from r->r
+ alternative.
+
+2013-12-07 Ralf Corsépius <ralf.corsepius@rtems.org>
+
+ * config.gcc (microblaze*-*-rtems*): Add TARGET_BIG_ENDIAN_DEFAULT.
+
+2013-12-06 Vladimir Makarov <vmakarov@redhat.com>
+
+ * config/rs6000/rs600.md (*bswapdi2_64bit): Remove ?? from the
+ constraint.
+
+2013-12-06 Caroline Tice <cmtice@google.com>
+
+ Submitting patch from Stephen Checkoway, s@cs.jhu.edu
+ * vtable-verify.c (verify_bb_vtables): Replace all uses of verified
+ vtable pointer with the results of the verification call, rather than
+ only the uses in the next statement.
+
+2013-12-06 Andrew Pinski <apinski@cavium.com>
+
+ PR target/59092
+ * config/aarch64/aarch64.md (trap): New pattern.
+
+2013-12-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/59388
+ * tree-ssa-reassoc.c (update_range_test): If op == range->exp,
+ gimplify tem after stmt rather than before it.
+
+ * tree-data-ref.c (struct data_ref_loc_d): Replace pos field with ref.
+ (get_references_in_stmt): Don't record operand addresses, but
+ operands themselves.
+ (find_data_references_in_stmt, graphite_find_data_references_in_stmt):
+ Adjust for the pos -> ref change.
+
+2013-12-06 H.J. Lu <hongjiu.lu@intel.com>
+
+ * config.gcc: Change --with-cpu=ia to --with-cpu=intel.
+
+ * config/i386/i386.c (cpu_names): Replace "ia" with "intel".
+ (processor_alias_table): Likewise.
+ (ix86_option_override_internal): Likewise.
+ * config/i386/i386.h (target_cpu_default): Replace
+ TARGET_CPU_DEFAULT_ia with TARGET_CPU_DEFAULT_intel.
+
+ * doc/invoke.texi: Replace -mtune=ia with -mtune=intel.
+
+2013-12-06 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/59405
+ * config/i386/i386.c (type_natural_mode): Properly handle
+ size 8 for !TARGET_64BIT.
+
+2013-12-06 Trevor Saunders <tsaunders@mozilla.com>
+
+ * tree-ssa-pre.c (compute_antic_aux): Remove redundant call to
+ vec::release.
+
+2013-12-06 Ian Bolton <ian.bolton@arm.com>
+ Mark Mitchell <mark@codesourcery.com>
+
+ PR target/59091
+ * config/arm/arm.md (trap): New pattern.
+ * config/arm/types.md: Added a type for trap.
+
+2013-12-06 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ * expr.c (expand_assignment): Update bitregion_start and bitregion_end.
+
+2013-12-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR target/59316
+ * config/sparc/sparc.h (SPARC_LOW_FE_EXCEPT_VALUES): Define.
+ * config/sparc/sol2.h (SPARC_LOW_FE_EXCEPT_VALUES): Redefine.
+ * config/sparc/sparc.c (TARGET_INIT_BUILTINS): Move around.
+ (TARGET_BUILTIN_DECL): Define.
+ (TARGET_ATOMIC_ASSIGN_EXPAND_FENV): Likewise.
+ (sparc32_initialize_trampoline): Adjust call to gen_flush.
+ (enum sparc_builtins): New enumeral type.
+ (sparc_builtins): New static array.
+ (sparc_builtins_icode): Likewise.
+ (def_builtin): Accept a separate icode and save the result.
+ (def_builtin_const): Likewise.
+ (sparc_fpu_init_builtins): New function.
+ (sparc_vis_init_builtins): Pass the builtin code.
+ (sparc_init_builtins): Call it if TARGET_FPU.
+ (sparc_builtin_decl): New function.
+ (sparc_expand_builtin): Deal with SPARC_BUILTIN_{LD,ST}FSR.
+ (sparc_handle_vis_mul8x16): Use the builtin code.
+ (sparc_fold_builtin): Likewise. Deal with SPARC_BUILTIN_{LD,ST}FSR
+ and SPARC_BUILTIN_PDISTN.
+ (compound_expr): New helper function.
+ (sparc_atomic_assign_expand_fenv): New function.
+ * config/sparc/sparc.md (unspecv): Reorder values, add UNSPECV_LDFSR
+ and UNSPECV_STFSR.
+ (flush, flushdi): Merge into single pattern.
+ (ldfsr): New instruction.
+ (stfsr): Likewise.
+
+2013-12-06 Oleg Endo <olegendo@gcc.gnu.org>
+
+ * asan.c: Remove struct tags when referring to class varpool_node.
+ * cgraph.h: Likewise.
+ * cgraphbuild.c: Likewise.
+ * cgraphunit.c: Likewise.
+ * dbxout.c: Likewise.
+ * dwarf2out.c: Likewise.
+ * gimple-fold.c: Likewise.
+ * ipa-devirt.c: Likewise.
+ * ipa-ref-inline.h: Likewise.
+ * ipa-ref.h: Likewise.
+ * ipa-reference.c: Likewise.
+ * ipa-utils.c: Likewise.
+ * ipa.c: Likewise.
+ * lto-cgraph.c: Likewise.
+ * lto-streamer-out.c: Likewise.
+ * lto-streamer.h: Likewise.
+ * passes.c: Likewise.
+ * toplev.c: Likewise.
+ * tree-eh.c: Likewise.
+ * tree-emutls.c: Likewise.
+ * tree-pass.h: Likewise.
+ * tree-ssa-structalias.c: Likewise.
+ * tree-vectorizer.c: Likewise.
+ * tree.c: Likewise.
+ * varasm.c: Likewise.
+ * varpool.c: Likewise.
+
+2013-12-06 Oleg Endo <olegendo@gcc.gnu.org>
+
+ * cgraphunit.c: Remove struct tags when referring to class
+ ipa_opt_pass_d or class opt_pass.
+ * function.h: Likewise.
+ * lto-cgraph.c: Likewise.
+ * pass_manager.h: Likewise.
+ * passes.c: Likewise.
+ * tree-pass.h: Likewise.
+
+2013-12-06 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59058
+ * tree-vectorizer.h (struct _loop_vec_info): Add num_itersm1 member.
+ (LOOP_VINFO_NITERSM1): New macro.
+ * tree-vect-loop-manip.c (slpeel_tree_peel_loop_to_edge): Express
+ the vector loop entry test in terms of scalar latch executions.
+ (vect_do_peeling_for_alignment): Update LOOP_VINFO_NITERSM1.
+ * tree-vect-loop.c (vect_get_loop_niters): Also return the
+ number of latch executions.
+ (new_loop_vec_info): Initialize LOOP_VINFO_NITERSM1.
+ (vect_analyze_loop_form): Likewise.
+ (vect_generate_tmps_on_preheader): Compute the number of
+ vectorized iterations differently.
+
+2013-12-05 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+
+ * config/score/score.c (score_force_temporary): Delete function.
+ (score_split_symbol): Ditto.
+ * config/score/score.h (ASM_OUTPUT_ADDR_DIFF_ELT): Add extra
+ parentheses to silence ambiguity warning and reindent.
+
+2013-12-05 Marek Polacek <polacek@redhat.com>
+
+ * doc/invoke.texi: Document -fsanitize=signed-integer-overflow.
+
+2013-12-05 H.J. Lu <hongjiu.lu@intel.com>
+
+ * config.gcc: Support --with-cpu=ia.
+
+ * config/i386/i386.c (cpu_names): Add "ia".
+ (processor_alias_table): Likewise.
+ (ix86_option_override_internal): Disallow -march=ia.
+ * config/i386/i386.h (target_cpu_default): Add TARGET_CPU_DEFAULT_ia.
+
+ * doc/invoke.texi: Document -mtune=ia.
+
+2013-12-05 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR rtl-optimization/59317
+ * lra-constraints.c (in_class_p): Don't ignore insn with constant
+ as a source.
+
+2013-12-05 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/58253
+ * ipa-prop.c (ipa_modify_formal_parameters): Create decls of
+ non-BLKmode in their naturally aligned type.
+
+2013-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59333
+ PR sanitizer/59397
+ * ubsan.c: Include rtl.h and expr.h.
+ (ubsan_encode_value): Add new parameter. If expanding, assign
+ a stack slot for DECL_RTL of the temporary and call expand_assignment.
+ Handle BOOLEAN_TYPE and ENUMERAL_TYPE.
+ (ubsan_build_overflow_builtin): Adjust ubsan_encode_value call.
+ * ubsan.h (ubsan_encode_value): Adjust declaration.
+ * internal-fn.c (ubsan_expand_si_overflow_addsub_check): Move
+ ubsan_build_overflow_builtin above expand_normal call. Surround
+ this call with push_temp_slots and pop_temp_slots.
+ (ubsan_expand_si_overflow_neg_check): Likewise.
+ (ubsan_expand_si_overflow_mul_check): Likewise.
+
+2013-12-05 Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * gimple-ssa-strength-reduction.c (find_basis_for_candidate): Guard
+ the get_alternative_base call with flag_expensive_optimizations.
+ (alloc_cand_and_find_basis): Likewise.
+
+2013-12-05 Tejas Belagod <tejas.belagod@arm.com>
+
+ * rtlanal.c (set_noop_p): Return nonzero in case of redundant
+ vec_select for overlapping register lanes.
+
+2013-12-05 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ * config/i386/i386.c (ix86_expand_builtin): Generate
+ reg for readflags built-in when optimizing.
+ * config/i386/i386.md (*pushfl<mode>): Rename to ...
+ (pushfl<mode>2): This. Fix iterator.
+ (*popfl<mode>): Rename to ...
+ (*popfl<mode>1): This. Fix iterator.
+
+2013-12-05 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ * config/i386/i386.c (IX86_BUILTIN_READ_FLAGS): New.
+ (IX86_BUILTIN_WRITE_FLAGS): Ditto.
+ (ix86_init_mmx_sse_builtins): Define
+ __builtin_ia32_writeeflags_u32, __builtin_ia32_writeeflags_u64,
+ __builtin_ia32_readeflags_u32, __builtin_ia32_readeflags_u64.
+ (ix86_expand_builtin): Expand them.
+ * config/i386/ia32intrin.h (__readeflags): New.
+ (__writeeflags): Ditto.
+ * config/i386/i386.md (*pushfl<mode>): Ditto.
+ (*popfl<mode>1): Ditto.
+
+2013-12-05 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59374
+ * tree-vect-data-refs.c (vect_slp_analyze_data_ref_dependence):
+ Commonize known and unknown dependence case fixing the allowed
+ read-write dependence case and dropping code that should not matter.
+
+2013-12-05 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ * config/ia64/ia64.md (prologue_allocate_stack): Block auto-
+ generation of predicated version.
+ (epilogue_deallocate_stack): Ditto.
+ (prologue_allocate_stack_pr): Add explicit predicated version.
+ (epilogue_deallocate_stack_pr): Ditto.
+ * config/ia64/ia64.c (ia64_single_set): Use explicit version.
+
+2013-12-05 Alan Modra <amodra@gmail.com>
+
+ * configure.ac (BUILD_CXXFLAGS) Don't use ALL_CXXFLAGS for
+ build != host.
+ <recursive call for build != host>: Clear GMPINC. Don't bother
+ saving CFLAGS.
+
+2013-12-04 Jakub Jelinek <jakub@redhat.com>
+ Marek Polacek <polacek@redhat.com>
+
+ * opts.c (common_handle_option): Handle
+ -fsanitize=signed-integer-overflow.
+ * config/i386/i386.md (addv<mode>4, subv<mode>4, mulv<mode>4,
+ negv<mode>3, negv<mode>3_1): Define expands.
+ (*addv<mode>4, *subv<mode>4, *mulv<mode>4, *negv<mode>3): Define insns.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW,
+ BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW,
+ BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW,
+ BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW): Define.
+ * ubsan.h (PROB_VERY_UNLIKELY, PROB_EVEN, PROB_VERY_LIKELY,
+ PROB_ALWAYS): Define.
+ (ubsan_build_overflow_builtin): Declare.
+ * gimple-fold.c (gimple_fold_stmt_to_constant_1): Add folding of
+ internal functions.
+ * ubsan.c (PROB_VERY_UNLIKELY): Don't define here.
+ (ubsan_build_overflow_builtin): New function.
+ (instrument_si_overflow): Likewise.
+ (ubsan_pass): Add signed integer overflow checking.
+ (gate_ubsan): Enable the pass also when SANITIZE_SI_OVERFLOW.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_SI_OVERFLOW.
+ * internal-fn.c: Include ubsan.h and target.h.
+ (ubsan_expand_si_overflow_addsub_check): New function.
+ (ubsan_expand_si_overflow_neg_check): Likewise.
+ (ubsan_expand_si_overflow_mul_check): Likewise.
+ (expand_UBSAN_CHECK_ADD): Likewise.
+ (expand_UBSAN_CHECK_SUB): Likewise.
+ (expand_UBSAN_CHECK_MUL): Likewise.
+ * fold-const.c (fold_binary_loc): Don't fold A + (-B) -> A - B and
+ (-A) + B -> B - A when doing the signed integer overflow checking.
+ * internal-fn.def (UBSAN_CHECK_ADD, UBSAN_CHECK_SUB, UBSAN_CHECK_MUL):
+ Define.
+ * tree-vrp.c (extract_range_basic): Handle internal calls.
+ * optabs.def (addv4_optab, subv4_optab, mulv4_optab, negv4_optab): New
+ optabs.
+ * asan.c: Include predict.h.
+ (PROB_VERY_UNLIKELY, PROB_ALWAYS): Don't define here.
+ * predict.c: Move the PROB_* macros...
+ * predict.h (enum br_predictor): ...here.
+ (PROB_LIKELY, PROB_UNLIKELY): Define.
+ * trans-mem.c: Include predict.h.
+ (PROB_VERY_UNLIKELY, PROB_ALWAYS, PROB_VERY_LIKELY,
+ PROB_LIKELY, PROB_UNLIKELY): Don't define here.
+
+2013-12-04 Jeff Law <law@redhat.com>
+
+ * common.opt: Split up -fisolate-erroneous-paths into
+ -fisolate-erroneous-paths-dereference and
+ -fisolate-erroneous-paths-attribute.
+ * invoke.texi: Corresponding changes.
+ * gimple.c (infer_nonnull_range): Add and use new arguments to control
+ what kind of statements can be used to infer a non-null range.
+ * gimple.h (infer_nonnull_range): Update prototype.
+ * tree-vrp.c (infer_value_range): Corresponding changes.
+ * opts.c (default_options_table): Update due to option split.
+ * gimple-ssa-isolate-paths.c: Fix trailing whitespace.
+ (find_implicit_erroneous_behaviour): Pass additional arguments
+ to infer_nonnull_range.
+ (find_explicit_erroneous_behaviour): Similarly.
+ (gate_isolate_erroneous_paths): Check both of the new options.
+
+2013-12-04 Jeff Law <law@redhat.com>
+
+ * expr.c (expand_assignment): Update comments.
+
+2013-12-04 Tobias Burnus <burnus@net-b.de>
+
+ PR debug/37132
+ * lto-streamer.h (LTO_tags): Add LTO_namelist_decl_ref.
+ * tree.def (NAMELIST_DECL): Add.
+ * tree.h (NAMELIST_DECL_ASSOCIATED_DECL): New macro.
+ * tree.c (initialize_tree_contains_struct): Add asserts for it.
+ * dwarf2out.c (gen_namelist_decl): New function.
+ (gen_decl_die, dwarf2out_decl): Call it.
+ (dwarf2out_imported_module_or_decl_1): Handle NAMELIST_DECL.
+ * lto-streamer-in.c (lto_input_tree_ref): Handle NAMELIST_DECL.
+ (lto_input_tree_ref, lto_input_tree_1): Update lto_tag_check_range
+ call.
+ * lto-streamer-out.c (lto_output_tree_ref): Handle NAMELIST_DECL.
+
+2013-12-03 Xinliang David Li <davidxl@google.com>
+
+ * tree-ssa-structalias.c (constraint_set_union): Change return type
+ from void to bool.
+ (merge_node_constraints): Ditto.
+ (unify_nodes): Update changed set when constraints set changes.
+
+2013-12-04 H.J. Lu <hongjiu.lu@intel.com>
+
+ * configure.ac: Append gdbasan.in to .gdbinit if CFLAGS contains
+ -fsanitize=address.
+ * configure: Regenerated.
+
+ * gdbasan.in: New file.
+
+2013-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/58726
+ * combine.c (force_to_mode): Fix comment typo. Don't destructively
+ modify x for ROTATE, ROTATERT and IF_THEN_ELSE.
+
+2013-12-04 Jakub Jelinek <jakub@redhat.com>
+ Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/59163
+ * config/i386/i386.c (ix86_legitimate_combined_insn): If for
+ !TARGET_AVX there is misaligned MEM operand with vector mode
+ and get_attr_ssememalign is 0, return false.
+ (ix86_expand_special_args_builtin): Add get_pointer_alignment
+ computed alignment and for non-temporal loads/stores also
+ at least GET_MODE_ALIGNMENT as MEM_ALIGN.
+ * config/i386/sse.md
+ (<sse>_loadu<ssemodesuffix><avxsizesuffix><mask_name>,
+ <sse>_storeu<ssemodesuffix><avxsizesuffix>,
+ <sse2_avx_avx512f>_loaddqu<mode><mask_name>,
+ <sse2_avx_avx512f>_storedqu<mode>, <sse3>_lddqu<avxsizesuffix>,
+ sse_vmrcpv4sf2, sse_vmrsqrtv4sf2, sse2_cvtdq2pd, sse_movhlps,
+ sse_movlhps, sse_storehps, sse_loadhps, sse_loadlps,
+ *vec_interleave_highv2df, *vec_interleave_lowv2df,
+ *vec_extractv2df_1_sse, sse2_movsd, sse4_1_<code>v8qiv8hi2,
+ sse4_1_<code>v4qiv4si2, sse4_1_<code>v4hiv4si2,
+ sse4_1_<code>v2qiv2di2, sse4_1_<code>v2hiv2di2,
+ sse4_1_<code>v2siv2di2, sse4_2_pcmpestr, *sse4_2_pcmpestr_unaligned,
+ sse4_2_pcmpestri, sse4_2_pcmpestrm, sse4_2_pcmpestr_cconly,
+ sse4_2_pcmpistr, *sse4_2_pcmpistr_unaligned, sse4_2_pcmpistri,
+ sse4_2_pcmpistrm, sse4_2_pcmpistr_cconly): Add ssememalign attribute.
+ * config/i386/i386.md (ssememalign): New define_attr.
+
2013-12-04 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/59355
- * ipa-devirt.c (gate_ipa_devirt): Return false if
- !flag_devirtualize.
+ * ipa-devirt.c (gate_ipa_devirt): Return false if !flag_devirtualize.
* opts.c (common_handle_option): Fix comment spelling.
2013-12-04 Yufeng Zhang <yufeng.zhang@arm.com>
@@ -47,9 +1664,9 @@
2013-12-03 Senthil Kumar Selvaraj <senthil_kumar.selvaraj@atmel.com>
- * config/avr/avr.c (avr_option_override): Warn if asked to generate
- position independent code.
- * config/avr/avr.h: Modify LINK_SPEC to reject -shared.
+ * config/avr/avr.c (avr_option_override): Warn if asked to generate
+ position independent code.
+ * config/avr/avr.h: Modify LINK_SPEC to reject -shared.
2013-12-03 H.J. Lu <hongjiu.lu@intel.com>
@@ -61,8 +1678,7 @@
2013-12-03 Marek Polacek <polacek@redhat.com>
PR middle-end/56344
- * calls.c (expand_call): Disallow passing huge arguments
- by value.
+ * calls.c (expand_call): Disallow passing huge arguments by value.
2013-12-03 Jakub Jelinek <jakub@redhat.com>
@@ -133,9 +1749,8 @@
(ix86_tune_memset_strategy): Ditto.
(ix86_tune_no_default): Ditto.
(ix86_veclibabi_type): Ditto.
- * config/i386/i386.c
- (function_specific_save): Save the above variables
- in gcc_options to cl_target_option.
+ * config/i386/i386.c (function_specific_save): Save the above
+ variables in gcc_options to cl_target_option.
(function_specific_restore): Do the reverse done in
function_specific_save.
(ix86_valid_target_attribute_tree): Change ix86_arch_string
@@ -152,9 +1767,8 @@
2013-12-02 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/59358
- * tree-vrp.c (union_ranges): To check for the partially
- overlapping ranges or adjacent ranges, also compare *vr0max
- with vr1max.
+ * tree-vrp.c (union_ranges): To check for the partially overlapping
+ ranges or adjacent ranges, also compare *vr0max with vr1max.
2013-12-02 Sterling Augustine  <saugustine@google.com>
@@ -198,9 +1812,8 @@
PR middle-end/59199
* tree-ssa-operands.c (opf_implicit): Remove.
(opf_address_taken): New flag.
- (get_expr_operands): Remove early out, pass down
- opf_address_taken for ADDR_EXPRs, add a use operand only
- for non-opf_address_taken bases.
+ (get_expr_operands): Remove early out, pass down opf_address_taken for
+ ADDR_EXPRs, add a use operand only for non-opf_address_taken bases.
(get_indirect_ref_operands): Rename to ...
(get_mem_ref_operands): ... this.
(get_asm_expr_operands): Rename to ...
@@ -222,9 +1835,9 @@
* config/arm/arm.c (arm_preferred_reload_class): Only return LO_REGS
when rclass is GENERAL_REGS.
-2013-12-02 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
+2013-12-02 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
- * loop-unroll.c (decide_unroll_constant_iterations): Check macro
+ * loop-unroll.c (decide_unroll_constant_iterations): Check macro
TARGET_LOOP_UNROLL_ADJUST while deciding unroll factor.
2013-12-01 Eric Botcazou <ebotcazou@adacore.com>
@@ -297,15 +1910,14 @@
2013-11-29 Yvan Roux <yvan.roux@linaro.org>
- * config/arm/arm.md (store_minmaxsi): Use only when
+ * config/arm/arm.md (store_minmaxsi): Use only when
optimize_function_for_size_p.
2013-11-29 Jakub Jelinek <jakub@redhat.com>
Yury Gribov <y.gribov@samsung.com>
PR sanitizer/59063
- * config/gnu-user.h: Removed old code for setting up sanitizer
- libs.
+ * config/gnu-user.h: Removed old code for setting up sanitizer libs.
* gcc.c: Using libsanitizer spec instead of explicit libs.
2013-11-29 Ilya Enkovich <ilya.enkovich@intel.com>
@@ -588,17 +2200,14 @@
(enum omp_clause_depend_kind): Add OMP_CLAUSE_DEPEND_LAST.
(enum omp_clause_map_kind): Add OMP_CLAUSE_MAP_LAST.
(enum omp_clause_proc_bind_kind): Add OMP_CLAUSE_PROC_BIND_LAST.
- * lto-streamer-out.c (lto_is_streamable): Allow streaming
- OMP_CLAUSE.
+ * lto-streamer-out.c (lto_is_streamable): Allow streaming OMP_CLAUSE.
(DFS_write_tree_body): Handle OMP_CLAUSE.
- * tree-streamer-out.c (pack_ts_omp_clause_value_fields): New
- function.
+ * tree-streamer-out.c (pack_ts_omp_clause_value_fields): New function.
(streamer_pack_tree_bitfields): Call it for OMP_CLAUSE.
(write_ts_omp_clause_tree_pointers): New function.
(streamer_write_tree_body): Call it for OMP_CLAUSE.
(streamer_write_tree_header): For OMP_CLAUSE stream OMP_CLAUSE_CODE.
- * tree-streamer-in.c (unpack_ts_omp_clause_value_fields): New
- function.
+ * tree-streamer-in.c (unpack_ts_omp_clause_value_fields): New function.
(unpack_value_fields): Call it for OMP_CLAUSE.
(streamer_alloc_tree): Handle OMP_CLAUSE.
(lto_input_ts_omp_clause_tree_pointers): New function.
@@ -619,8 +2228,7 @@
2013-11-28 Jakub Jelinek <jakub@redhat.com>
PR middle-end/59327
- * cfgexpand.c (expand_used_vars): Avoid warning on 32-bit
- HWI hosts.
+ * cfgexpand.c (expand_used_vars): Avoid warning on 32-bit HWI hosts.
2013-11-28 Vladimir Makarov <vmakarov@redhat.com>
@@ -669,8 +2277,7 @@
lower. Switch off lra_in_progress temporarily to generate regs
involved into elimination.
(lra_init_elimination): Rename to init_elimination. Make it
- static. Set up insn sp offset, check the offsets at the end of
- BBs.
+ static. Set up insn sp offset, check the offsets at the end of BBs.
(process_insn_for_elimination): Add parameter. Pass its value to
eliminate_regs_in_insn.
(lra_eliminate): : Add parameter. Pass its value to
@@ -765,7 +2372,7 @@
* tree-vect-loop.c (vect_estimate_min_profitable_iters): Ditto
plus added openmp-simd warining.
-2013-11-27 H.J. Lu <hongjiu.lu@intel.com>
+2013-11-27 H.J. Lu <hongjiu.lu@intel.com>
PR rtl-optimization/59311
* dwarf2cfi.c (dwf_regno): Assert reg isn't pseudo register.
@@ -793,8 +2400,8 @@
2013-11-27 Kenneth Zadeck <zadeck@naturalbridge.com>
- * fold-const.c (int_const_binop_1): Make INT_MIN % -1 return 0 with the
- overflow bit set.
+ * fold-const.c (int_const_binop_1): Make INT_MIN % -1 return 0 with
+ the overflow bit set.
2013-11-27 Richard Biener <rguenther@suse.de>
@@ -868,8 +2475,7 @@
(make_pass_omp_simd_clone): New function.
* passes.def (pass_omp_simd_clone): New.
* target.def (TARGET_SIMD_CLONE_COMPUTE_VECSIZE_AND_SIMDLEN,
- TARGET_SIMD_CLONE_ADJUST, TARGET_SIMD_CLONE_USABLE): New target
- hooks.
+ TARGET_SIMD_CLONE_ADJUST, TARGET_SIMD_CLONE_USABLE): New target hooks.
* target.h (struct cgraph_node, struct cgraph_simd_node): Declare.
* tree-core.h (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE): Document.
* tree.h (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE): Define.
@@ -882,8 +2488,8 @@
the call arguments or in lhs.
* tree-vect-loop.c (vect_determine_vectorization_factor): Handle
calls with no lhs.
- (vect_transform_loop): Allow NULL STMT_VINFO_VECTYPE for calls without
- lhs.
+ (vect_transform_loop): Allow NULL STMT_VINFO_VECTYPE for calls
+ without lhs.
* tree-vectorizer.h (enum stmt_vec_info_type): Add
call_simd_clone_vec_info_type.
(struct _stmt_vec_info): Add simd_clone_fndecl field.
@@ -965,10 +2571,8 @@
(GSS_OMP_PARALLEL_LAYOUT, gimple_statement_omp_parallel_layout):
...these.
(GSS_OMP_SINGLE, gimple_statement_omp_single): Rename to...
- (GSS_OMP_SINGLE_LAYOUT, gimple_statement_omp_single_layout):
- ...these.
- (GSS_OMP_ATOMIC_STORE, gimple_statement_omp_atomic_store): Rename
- to...
+ (GSS_OMP_SINGLE_LAYOUT, gimple_statement_omp_single_layout): ...these.
+ (GSS_OMP_ATOMIC_STORE, gimple_statement_omp_atomic_store): Rename to...
(GSS_OMP_ATOMIC_STORE_LAYOUT, gimple_statement_omp_atomic_store):
...these.
@@ -1057,8 +2661,7 @@
GIMPLE_OMP_RETURN.
(gimple_resx_region, gimple_resx_set_region): Replace bogus
downcasts to gimple_statement_eh_ctrl with downcasts to
- gimple_statement_resx, thus requiring that the code be
- GIMPLE_RESX.
+ gimple_statement_resx, thus requiring that the code be GIMPLE_RESX.
(gimple_eh_dispatch_region, gimple_eh_dispatch_set_region):
Replace bogus downcasts to const gimple_statement_eh_ctrl with
downcasts to gimple_statement_eh_dispatch, thus requiring that
@@ -1151,14 +2754,13 @@
* config/arm/arm.c (arm_new_rtx_costs): Initialise cost correctly
for zero_extend case.
-2013-11-26 H.J. Lu <hongjiu.lu@intel.com>
+2013-11-26 H.J. Lu <hongjiu.lu@intel.com>
PR bootstrap/55552
* configure.ac (install_gold_as_default): New. Set to yes for
--disable-ld or --enable-gold=default.
(gcc_cv_ld_gold_srcdir): New.
- (gcc_cv_ld): Also check in-tree gold if install_gold_as_default
- is yes.
+ (gcc_cv_ld): Also check in-tree gold if install_gold_as_default is yes.
(ORIGINAL_LD_BFD_FOR_TARGET): New AC_SUBST.
(ORIGINAL_LD_GOLD_FOR_TARGET): Likewise.
* configure: Regenerated.
@@ -1248,8 +2850,7 @@
* config/aarch64/aarch64-builtins.c
(VAR1): Use new naming scheme for aarch64_builtins.
- (aarch64_builtin_vectorized_function): Use new
- aarch64_builtins names.
+ (aarch64_builtin_vectorized_function): Use new aarch64_builtins names.
2013-11-26 Richard Biener <rguenther@suse.de>
@@ -1305,8 +2906,8 @@
* doc/invoke.texi (-mslow-flash-data): Document new option.
* config/arm/arm.opt (mslow-flash-data): New option.
- * config/arm/arm-protos.h (arm_max_const_double_inline_cost): Declare
- it.
+ * config/arm/arm-protos.h (arm_max_const_double_inline_cost):
+ Declare it.
* config/arm/arm.h (TARGET_USE_MOVT): Always true when literal pools
are disabled.
(arm_disable_literal_pool): Declare it.
@@ -1561,14 +3162,14 @@
2013-11-22 Yuri Rumyantsev <ysrumyan@gmail.com>
- * config/i386/i386.c(processor_alias_table): Enable PTA_AES,
+ * config/i386/i386.c (processor_alias_table): Enable PTA_AES,
PTA_PCLMUL and PTA_RDRND for Silvermont.
* config/i386/driver-i386.c (host_detect_local_cpu): Set up cpu
for Silvermont.
* doc/invoke.texi: Mention AES, PCLMUL and RDRND for Silvermont.
-2013-11-22 Andrew MacLeod <amacleod@redhat.com>
+2013-11-22 Andrew MacLeod <amacleod@redhat.com>
* hooks.h (hook_uint_mode_0): Add Prototype.
* hooks.c (hook_uint_mode_0): New default function.
@@ -1578,7 +3179,7 @@
* doc/tm.texi.in (TARGET_ATOMIC_ALIGN_FOR_MODE): Define.
* doc/tm.texi (TARGET_ATOMIC_ALIGN_FOR_MODE): Add description.
-2013-11-22 Andrew MacLeod <amacleod@redhat.com>
+2013-11-22 Andrew MacLeod <amacleod@redhat.com>
* gimple.h: Remove all includes.
(recalculate_side_effects): Move prototype to gimplify.h.
@@ -2337,7 +3938,7 @@
2013-11-20 James Greenhalgh <james.greenhalgh@arm.com>
- * gcc/config/aarch64/aarch64-builtins.c
+ * config/aarch64/aarch64-builtins.c
(aarch64_simd_itype): Remove.
(aarch64_simd_builtin_datum): Remove itype, add qualifiers pointer.
(VAR1): Use qualifiers.
@@ -3815,7 +5416,7 @@
* profile.c (compute_branch_probabilities): Do not sanity check
run_max.
-2013-11-18 Kenneth Zadeck <zadeck@naturalbridge.com>
+2013-11-18 Kenneth Zadeck <zadeck@naturalbridge.com>
* tree.c (int_fits_type_p): Change GET_MODE_BITSIZE to
GET_MODE_PRECISION.
@@ -4317,7 +5918,7 @@
* lto-streamer-in.c (input function): Call cgraph_create_node if
cgraph_get_node failed.
-2013-11-14 Olivier Hainque <hainque@adacore.com>
+2013-11-14 Olivier Hainque <hainque@adacore.com>
* cfgexpand.c (defer_stack_allocation): When optimization is enabled,
defer allocation of DECL_IGNORED_P variables at toplevel unless really
@@ -10134,7 +11735,7 @@
* config/iq2000/iq2000.c (init_cumulative_args): Likewise.
* config/rs6000/rs6000.c (init_cumulative_args): Likewise.
-2013-10-16 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
+2013-10-16 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
* config/i386/i386.c (ix86_option_override_internal): Enable FMA4
for AMD bdver3.
@@ -10144,7 +11745,7 @@
* config/cris/t-elfmulti (MULTILIB_OPTIONS, MULTILIB_DIRNAMES)
(MULTILIB_MATCHES): Add multilib for -march=v8.
-2013-10-15 Sriraman Tallam <tmsriram@google.com>
+2013-10-15 Sriraman Tallam <tmsriram@google.com>
PR target/57756
* optc-save-gen.awk: Add extra parameter to the save and restore
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index c026c978121..66b970db28d 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20131204
+20131213
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index f1f73169933..47279366d82 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,32 @@
+2013-12-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/Makefile.in (ARM linux, GNU eabi): Tweak regexp.
+
+2013-12-12 Eric Botcazou <ebotcazou@adacore.com>
+ Iain Sandoe <iain@codesourcery.com>
+
+ PR ada/55946
+ * gcc-interface/Make-lang.in (ada/doctools/xgnatugn): Use gnatmake.
+ * gcc-interface/Makefile.in (GCC_LINK): Add LDFLAGS.
+ (../../gnatmake): Remove LDFLAGS.
+ (../../gnatlink): Likewise.
+
+2013-12-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR ada/59382
+ * indepsw-darwin.adb: New file.
+
+2013-12-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.c (components_to_record): Add specific handling
+ for fields with zero size and no representation clause.
+
+2013-12-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/trans.c (Case_Statement_to_gnu): Do not push a binding
+ level for each branch if this is a case expression in Ada 2012.
+ (gnat_to_gnu) <case N_Expression_With_Actions>: Adjust comment.
+
2013-11-29 Eric Botcazou <ebotcazou@adacore.com>
PR ada/54040
diff --git a/gcc/ada/gcc-interface/Make-lang.in b/gcc/ada/gcc-interface/Make-lang.in
index cd3676f9447..8c87b2bba26 100644
--- a/gcc/ada/gcc-interface/Make-lang.in
+++ b/gcc/ada/gcc-interface/Make-lang.in
@@ -658,7 +658,7 @@ ada.tags: force
ada/doctools/xgnatugn$(build_exeext): ada/xgnatugn.adb
-$(MKDIR) ada/doctools
$(CP) $^ ada/doctools
- cd ada/doctools && $(GNATMAKE) -q xgnatugn
+ cd ada/doctools && gnatmake -q xgnatugn
# Note that doc/gnat_ugn.texi and doc/projects.texi do not depend on
# xgnatugn being built so we can distribute a pregenerated doc/gnat_ugn.info
diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in
index 885a5edce89..02f6cb2832d 100644
--- a/gcc/ada/gcc-interface/Makefile.in
+++ b/gcc/ada/gcc-interface/Makefile.in
@@ -1903,7 +1903,7 @@ ifeq ($(strip $(filter-out powerpc% linux%,$(target_cpu) $(target_os))),)
endif
# ARM linux, GNU eabi
-ifeq ($(strip $(filter-out arm% linux-gnueabi,$(target_cpu) $(target_os))),)
+ifeq ($(strip $(filter-out arm% linux-gnueabi%,$(target_cpu) $(target_os))),)
LIBGNAT_TARGET_PAIRS = \
a-intnam.ads<a-intnam-linux.ads \
s-inmaop.adb<s-inmaop-posix.adb \
@@ -2415,7 +2415,7 @@ TOOLS_FLAGS_TO_PASS= \
"GNATLINK=$(GNATLINK)" \
"GNATBIND=$(GNATBIND)"
-GCC_LINK=$(CXX) $(GCC_LINK_FLAGS) $(ADA_INCLUDES)
+GCC_LINK=$(CXX) $(GCC_LINK_FLAGS) $(ADA_INCLUDES) $(LDFLAGS)
# Build directory for the tools. Let's copy the target-dependent
# sources using the same mechanism as for gnatlib. The other sources are
@@ -2537,12 +2537,10 @@ gnatlink-re: ../stamp-tools gnatmake-re
# Likewise for the tools
../../gnatmake$(exeext): $(P) b_gnatm.o $(GNATMAKE_OBJS)
- +$(GCC_LINK) $(ALL_CFLAGS) $(LDFLAGS) -o $@ b_gnatm.o $(GNATMAKE_OBJS) \
- $(TOOLS_LIBS)
+ +$(GCC_LINK) $(ALL_CFLAGS) -o $@ b_gnatm.o $(GNATMAKE_OBJS) $(TOOLS_LIBS)
../../gnatlink$(exeext): $(P) b_gnatl.o $(GNATLINK_OBJS)
- +$(GCC_LINK) $(ALL_CFLAGS) $(LDFLAGS) -o $@ b_gnatl.o $(GNATLINK_OBJS) \
- $(TOOLS_LIBS)
+ +$(GCC_LINK) $(ALL_CFLAGS) -o $@ b_gnatl.o $(GNATLINK_OBJS) $(TOOLS_LIBS)
../stamp-gnatlib-$(RTSDIR):
@if [ ! -f stamp-gnatlib-$(RTSDIR) ] ; \
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index 61b2239132b..7d2da96d191 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -6932,6 +6932,7 @@ components_to_record (tree gnu_record_type, Node_Id gnat_component_list,
tree gnu_rep_list = NULL_TREE;
tree gnu_var_list = NULL_TREE;
tree gnu_self_list = NULL_TREE;
+ tree gnu_zero_list = NULL_TREE;
/* For each component referenced in a component declaration create a GCC
field and add it to the list, skipping pragmas in the GNAT list. */
@@ -7262,6 +7263,10 @@ components_to_record (tree gnu_record_type, Node_Id gnat_component_list,
to do this in a separate pass since we want to handle the discriminants
but can't play with them until we've used them in debugging data above.
+ Similarly, pull out the fields with zero size and no rep clause, as they
+ would otherwise modify the layout and thus very likely run afoul of the
+ Ada semantics, which are different from those of C here.
+
??? If we reorder them, debugging information will be wrong but there is
nothing that can be done about this at the moment. */
gnu_last = NULL_TREE;
@@ -7300,6 +7305,19 @@ components_to_record (tree gnu_record_type, Node_Id gnat_component_list,
continue;
}
+ if (DECL_SIZE (gnu_field) && integer_zerop (DECL_SIZE (gnu_field)))
+ {
+ DECL_FIELD_OFFSET (gnu_field) = size_zero_node;
+ SET_DECL_OFFSET_ALIGN (gnu_field, BIGGEST_ALIGNMENT);
+ DECL_FIELD_BIT_OFFSET (gnu_field) = bitsize_zero_node;
+ if (field_is_aliased (gnu_field))
+ TYPE_ALIGN (gnu_record_type)
+ = MAX (TYPE_ALIGN (gnu_record_type),
+ TYPE_ALIGN (TREE_TYPE (gnu_field)));
+ MOVE_FROM_FIELD_LIST_TO (gnu_zero_list);
+ continue;
+ }
+
gnu_last = gnu_field;
}
@@ -7392,6 +7410,11 @@ components_to_record (tree gnu_record_type, Node_Id gnat_component_list,
finish_record_type (gnu_record_type, gnu_field_list, layout_with_rep ? 1 : 0,
debug_info && !maybe_unused);
+ /* Chain the fields with zero size at the beginning of the field list. */
+ if (gnu_zero_list)
+ TYPE_FIELDS (gnu_record_type)
+ = chainon (gnu_zero_list, TYPE_FIELDS (gnu_record_type));
+
return (gnu_rep_list && !p_gnu_rep_list) || variants_have_rep;
}
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index e533de6dcbf..2eae39976d8 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -2348,12 +2348,17 @@ Case_Statement_to_gnu (Node_Id gnat_node)
}
}
- /* Push a binding level here in case variables are declared as we want
- them to be local to this set of statements instead of to the block
- containing the Case statement. */
+ /* This construct doesn't define a scope so we shouldn't push a binding
+ level around the statement list. Except that we have always done so
+ historically and this makes it possible to reduce stack usage. As a
+ compromise, we keep doing it for case statements, for which this has
+ never been problematic, but not for case expressions in Ada 2012. */
if (choices_added_p)
{
- tree group = build_stmt_group (Statements (gnat_when), true);
+ const bool is_case_expression
+ = (Nkind (Parent (gnat_node)) == N_Expression_With_Actions);
+ tree group
+ = build_stmt_group (Statements (gnat_when), !is_case_expression);
bool group_may_fallthru = block_may_fallthru (group);
add_stmt (group);
if (group_may_fallthru)
@@ -7002,8 +7007,8 @@ gnat_to_gnu (Node_Id gnat_node)
/****************/
case N_Expression_With_Actions:
- /* This construct doesn't define a scope so we don't wrap the statement
- list in a BIND_EXPR; however, we wrap it in a SAVE_EXPR to protect it
+ /* This construct doesn't define a scope so we don't push a binding level
+ around the statement list; but we wrap it in a SAVE_EXPR to protect it
from unsharing. */
gnu_result = build_stmt_group (Actions (gnat_node), false);
gnu_result = build1 (SAVE_EXPR, void_type_node, gnu_result);
diff --git a/gcc/ada/indepsw-darwin.adb b/gcc/ada/indepsw-darwin.adb
new file mode 100644
index 00000000000..e25e9049200
--- /dev/null
+++ b/gcc/ada/indepsw-darwin.adb
@@ -0,0 +1,67 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT COMPILER COMPONENTS --
+-- --
+-- I N D E P S W --
+-- --
+-- B o d y --
+-- (Darwin version) --
+-- --
+-- Copyright (C) 2013, Free Software Foundation, Inc. --
+-- --
+-- GNAT is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNAT was originally developed by the GNAT team at New York University. --
+-- Extensive contributions were provided by Ada Core Technologies Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This is the Darwin version
+
+package body Indepsw is
+
+ Map_Switch : aliased constant String := "-Wl,-map,";
+
+ -------------
+ -- Convert --
+ -------------
+
+ procedure Convert
+ (Switch : Switch_Kind;
+ Argument : String;
+ To : out String_List_Access)
+ is
+ begin
+ case Switch is
+ when Map_File =>
+ To := new Argument_List'(1 => new String'(Map_Switch & Argument));
+ end case;
+ end Convert;
+
+ ------------------
+ -- Is_Supported --
+ ------------------
+
+ function Is_Supported (Switch : Switch_Kind) return Boolean is
+ begin
+ case Switch is
+ when Map_File =>
+ return True;
+ end case;
+ end Is_Supported;
+
+end Indepsw;
diff --git a/gcc/alias.c b/gcc/alias.c
index 97815f00fba..ce53089fa19 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -2995,7 +2995,7 @@ init_alias_analysis (void)
/* Walk the insns adding values to the new_reg_base_value array. */
for (i = 0; i < rpo_cnt; i++)
{
- basic_block bb = BASIC_BLOCK (rpo[i]);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
FOR_BB_INSNS (bb, insn)
{
if (NONDEBUG_INSN_P (insn))
diff --git a/gcc/asan.c b/gcc/asan.c
index 2245d6dd807..1394e1314c5 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -52,6 +52,7 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
#include "gimple-builder.h"
#include "ubsan.h"
+#include "predict.h"
/* AddressSanitizer finds out-of-bounds and use-after-free bugs
with <2x slowdown on average.
@@ -1311,9 +1312,6 @@ report_error_func (bool is_store, int size_in_bytes)
return builtin_decl_implicit (report[is_store][exact_log2 (size_in_bytes)]);
}
-#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
-#define PROB_ALWAYS (REG_BR_PROB_BASE)
-
/* Split the current basic block and create a condition statement
insertion point right before or after the statement pointed to by
ITER. Return an iterator to the point at which the caller might
@@ -1634,7 +1632,7 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
{
/* For static vars if they are known not to be dynamically
initialized, they will be always accessible. */
- struct varpool_node *vnode = varpool_get_node (inner);
+ varpool_node *vnode = varpool_get_node (inner);
if (vnode && !vnode->dynamically_initialized)
return;
}
@@ -2043,9 +2041,9 @@ transform_statements (void)
{
basic_block bb, last_bb = NULL;
gimple_stmt_iterator i;
- int saved_last_basic_block = last_basic_block;
+ int saved_last_basic_block = last_basic_block_for_fn (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
basic_block prev_bb = bb;
@@ -2214,7 +2212,7 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
fold_convert (const_ptr_type_node, str_cst));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
fold_convert (const_ptr_type_node, module_name_cst));
- struct varpool_node *vnode = varpool_get_node (decl);
+ varpool_node *vnode = varpool_get_node (decl);
int has_dynamic_init = vnode ? vnode->dynamically_initialized : 0;
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
build_int_cst (uptr, has_dynamic_init));
@@ -2380,7 +2378,7 @@ static GTY(()) tree asan_ctor_statements;
void
asan_finish_file (void)
{
- struct varpool_node *vnode;
+ varpool_node *vnode;
unsigned HOST_WIDE_INT gcount = 0;
if (shadow_ptr_types[0] == NULL_TREE)
@@ -2559,7 +2557,7 @@ execute_sanopt (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c
index 6006b70c085..be7fdf81f18 100644
--- a/gcc/auto-inc-dec.c
+++ b/gcc/auto-inc-dec.c
@@ -1480,7 +1480,7 @@ rest_of_handle_auto_inc_dec (void)
reg_next_use = XCNEWVEC (rtx, max_reg);
reg_next_inc_use = XCNEWVEC (rtx, max_reg);
reg_next_def = XCNEWVEC (rtx, max_reg);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
merge_in_block (max_reg, bb);
free (reg_next_use);
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 58bacc33f87..3fa319b4d4c 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -314,26 +314,17 @@ struct GTY(()) control_flow_graph {
/* Defines for accessing the fields of the CFG structure for function FN. */
#define ENTRY_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_entry_block_ptr)
#define EXIT_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_exit_block_ptr)
-#define basic_block_info_for_function(FN) ((FN)->cfg->x_basic_block_info)
+#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info)
#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
-#define last_basic_block_for_function(FN) ((FN)->cfg->x_last_basic_block)
-#define label_to_block_map_for_function(FN) ((FN)->cfg->x_label_to_block_map)
-#define profile_status_for_function(FN) ((FN)->cfg->x_profile_status)
+#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block)
+#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map)
+#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status)
-#define BASIC_BLOCK_FOR_FUNCTION(FN,N) \
- ((*basic_block_info_for_function (FN))[(N)])
-#define SET_BASIC_BLOCK_FOR_FUNCTION(FN,N,BB) \
- ((*basic_block_info_for_function (FN))[(N)] = (BB))
-
-/* Defines for textual backward source compatibility. */
-#define basic_block_info (cfun->cfg->x_basic_block_info)
-#define last_basic_block (cfun->cfg->x_last_basic_block)
-#define label_to_block_map (cfun->cfg->x_label_to_block_map)
-#define profile_status (cfun->cfg->x_profile_status)
-
-#define BASIC_BLOCK(N) ((*basic_block_info)[(N)])
-#define SET_BASIC_BLOCK(N,BB) ((*basic_block_info)[(N)] = (BB))
+#define BASIC_BLOCK_FOR_FN(FN,N) \
+ ((*basic_block_info_for_fn (FN))[(N)])
+#define SET_BASIC_BLOCK_FOR_FN(FN,N,BB) \
+ ((*basic_block_info_for_fn (FN))[(N)] = (BB))
/* For iterating over basic blocks. */
#define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \
@@ -342,13 +333,9 @@ struct GTY(()) control_flow_graph {
#define FOR_EACH_BB_FN(BB, FN) \
FOR_BB_BETWEEN (BB, (FN)->cfg->x_entry_block_ptr->next_bb, (FN)->cfg->x_exit_block_ptr, next_bb)
-#define FOR_EACH_BB(BB) FOR_EACH_BB_FN (BB, cfun)
-
#define FOR_EACH_BB_REVERSE_FN(BB, FN) \
FOR_BB_BETWEEN (BB, (FN)->cfg->x_exit_block_ptr->prev_bb, (FN)->cfg->x_entry_block_ptr, prev_bb)
-#define FOR_EACH_BB_REVERSE(BB) FOR_EACH_BB_REVERSE_FN (BB, cfun)
-
/* For iterating over insns in basic block. */
#define FOR_BB_INSNS(BB, INSN) \
for ((INSN) = BB_HEAD (BB); \
@@ -375,9 +362,6 @@ struct GTY(()) control_flow_graph {
/* Cycles through _all_ basic blocks, even the fake ones (entry and
exit block). */
-#define FOR_ALL_BB(BB) \
- for (BB = ENTRY_BLOCK_PTR_FOR_FN (cfun); BB; BB = BB->next_bb)
-
#define FOR_ALL_BB_FN(BB, FN) \
for (BB = ENTRY_BLOCK_PTR_FOR_FN (FN); BB; BB = BB->next_bb)
@@ -434,7 +418,7 @@ extern void scale_bbs_frequencies_gcov_type (basic_block *, int, gcov_type,
needs to be in a public file in case the IFCVT macros call
functions passing the ce_if_block data structure. */
-typedef struct ce_if_block
+struct ce_if_block
{
basic_block test_bb; /* First test block. */
basic_block then_bb; /* THEN block. */
@@ -449,7 +433,7 @@ typedef struct ce_if_block
int num_then_insns; /* # of insns in THEN block. */
int num_else_insns; /* # of insns in ELSE block. */
int pass; /* Pass number. */
-} ce_if_block_t;
+};
/* This structure maintains an edge list vector. */
/* FIXME: Make this a vec<edge>. */
@@ -580,10 +564,10 @@ single_pred (const_basic_block bb)
/* Iterator object for edges. */
-typedef struct {
+struct edge_iterator {
unsigned index;
vec<edge, va_gc> **container;
-} edge_iterator;
+};
static inline vec<edge, va_gc> *
ei_container (edge_iterator i)
@@ -797,8 +781,8 @@ extern int pre_and_rev_post_order_compute (int *, int *, bool);
extern int dfs_enumerate_from (basic_block, int,
bool (*)(const_basic_block, const void *),
basic_block *, int, const void *);
-extern void compute_dominance_frontiers (struct bitmap_head_def *);
-extern bitmap compute_idf (bitmap, struct bitmap_head_def *);
+extern void compute_dominance_frontiers (struct bitmap_head *);
+extern bitmap compute_idf (bitmap, struct bitmap_head *);
extern basic_block * single_pred_before_succ_order (void);
/* In cfgrtl.c */
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index fc7b5b758ea..7f8ea075e1b 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -826,12 +826,13 @@ copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
"Duplicated bb %d (created bb %d)\n",
old_bb->index, new_bb->index);
- if (new_bb->index >= array_size || last_basic_block > array_size)
+ if (new_bb->index >= array_size
+ || last_basic_block_for_fn (cfun) > array_size)
{
int i;
int new_size;
- new_size = MAX (last_basic_block, new_bb->index + 1);
+ new_size = MAX (last_basic_block_for_fn (cfun), new_bb->index + 1);
new_size = GET_ARRAY_SIZE (new_size);
bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
for (i = array_size; i < new_size; i++)
@@ -1565,7 +1566,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (void)
vec<basic_block> bbs_in_hot_partition = vNULL;
/* Mark which partition (hot/cold) each basic block belongs in. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool cold_bb = false;
@@ -1657,7 +1658,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (void)
/* Mark every edge that crosses between sections. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_EACH_EDGE (e, ei, bb->succs)
{
unsigned int flags = e->flags;
@@ -1690,7 +1691,7 @@ set_edge_can_fallthru_flag (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
@@ -1791,7 +1792,7 @@ fix_up_fall_thru_edges (void)
rtx old_jump;
rtx fall_thru_label;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
fall_thru = NULL;
if (EDGE_COUNT (cur_bb->succs) > 0)
@@ -1991,7 +1992,7 @@ fix_crossing_conditional_branches (void)
rtx old_label = NULL_RTX;
rtx new_label;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
crossing_edge = NULL;
if (EDGE_COUNT (cur_bb->succs) > 0)
@@ -2122,7 +2123,7 @@ fix_crossing_unconditional_branches (void)
rtx cur_insn;
edge succ;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
last_insn = BB_END (cur_bb);
@@ -2200,7 +2201,7 @@ add_reg_crossing_jump_notes (void)
edge e;
edge_iterator ei;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_CROSSING)
&& JUMP_P (BB_END (e->src))
@@ -2234,7 +2235,7 @@ reorder_basic_blocks (void)
uncond_jump_length = get_uncond_jump_length ();
/* We need to know some information for each basic block. */
- array_size = GET_ARRAY_SIZE (last_basic_block);
+ array_size = GET_ARRAY_SIZE (last_basic_block_for_fn (cfun));
bbd = XNEWVEC (bbro_basic_block_data, array_size);
for (i = 0; i < array_size; i++)
{
@@ -2285,7 +2286,7 @@ insert_section_boundary_note (void)
if (!crtl->has_bb_partition)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (!current_partition)
current_partition = BB_PARTITION (bb);
@@ -2320,7 +2321,7 @@ rest_of_handle_reorder_blocks (void)
reorder_basic_blocks ();
cleanup_cfg (CLEANUP_EXPENSIVE);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
cfg_layout_finalize ();
@@ -2409,7 +2410,7 @@ duplicate_computed_gotos (void)
/* Look for blocks that end in a computed jump, and see if such blocks
are suitable for unfactoring. If a block is a candidate for unfactoring,
mark it in the candidates. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
edge e;
@@ -2456,7 +2457,7 @@ duplicate_computed_gotos (void)
goto done;
/* Duplicate computed gotos. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->flags & BB_VISITED)
continue;
diff --git a/gcc/bitmap.c b/gcc/bitmap.c
index ecaca42d00e..f1a845915f9 100644
--- a/gcc/bitmap.c
+++ b/gcc/bitmap.c
@@ -244,7 +244,7 @@ bitmap_element_allocate (bitmap head)
/* Inner list was just a singleton. */
bitmap_ggc_free = element->prev;
else
- element = ggc_alloc_bitmap_element_def ();
+ element = ggc_alloc_bitmap_element ();
}
if (GATHER_STATISTICS)
@@ -370,7 +370,7 @@ bitmap_obstack_alloc_stat (bitmap_obstack *bit_obstack MEM_STAT_DECL)
bit_obstack = &bitmap_default_obstack;
map = bit_obstack->heads;
if (map)
- bit_obstack->heads = (struct bitmap_head_def *) map->first;
+ bit_obstack->heads = (struct bitmap_head *) map->first;
else
map = XOBNEW (&bit_obstack->obstack, bitmap_head);
bitmap_initialize_stat (map, bit_obstack PASS_MEM_STAT);
@@ -388,7 +388,7 @@ bitmap_gc_alloc_stat (ALONE_MEM_STAT_DECL)
{
bitmap map;
- map = ggc_alloc_bitmap_head_def ();
+ map = ggc_alloc_bitmap_head ();
bitmap_initialize_stat (map, NULL PASS_MEM_STAT);
if (GATHER_STATISTICS)
@@ -2207,13 +2207,13 @@ dump_bitmap_statistics (void)
}
DEBUG_FUNCTION void
-debug (const bitmap_head_def &ref)
+debug (const bitmap_head &ref)
{
dump_bitmap (stderr, &ref);
}
DEBUG_FUNCTION void
-debug (const bitmap_head_def *ptr)
+debug (const bitmap_head *ptr)
{
if (ptr)
debug (*ptr);
diff --git a/gcc/bitmap.h b/gcc/bitmap.h
index b3cb5da98e9..2c14080e089 100644
--- a/gcc/bitmap.h
+++ b/gcc/bitmap.h
@@ -24,7 +24,7 @@ along with GCC; see the file COPYING3. If not see
This sparse set representation is suitable for sparse sets with an
unknown (a priori) universe. The set is represented as a double-linked
- list of container nodes (struct bitmap_element_def). Each node consists
+ list of container nodes (struct bitmap_element). Each node consists
of an index for the first member that could be held in the container,
a small array of integers that represent the members in the container,
and pointers to the next and previous element in the linked list. The
@@ -149,11 +149,11 @@ typedef unsigned long BITMAP_WORD;
#define BITMAP_ELEMENT_ALL_BITS (BITMAP_ELEMENT_WORDS * BITMAP_WORD_BITS)
/* Obstack for allocating bitmaps and elements from. */
-typedef struct GTY (()) bitmap_obstack {
- struct bitmap_element_def *elements;
- struct bitmap_head_def *heads;
+struct GTY (()) bitmap_obstack {
+ struct bitmap_element *elements;
+ struct bitmap_head *heads;
struct obstack GTY ((skip)) obstack;
-} bitmap_obstack;
+};
/* Bitmap set element. We use a linked list to hold only the bits that
are set. This allows for use to grow the bitset dynamically without
@@ -167,17 +167,17 @@ typedef struct GTY (()) bitmap_obstack {
bitmap_elt_clear_from to be implemented in unit time rather than
linear in the number of elements to be freed. */
-typedef struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) bitmap_element_def {
- struct bitmap_element_def *next; /* Next element. */
- struct bitmap_element_def *prev; /* Previous element. */
+struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) bitmap_element {
+ struct bitmap_element *next; /* Next element. */
+ struct bitmap_element *prev; /* Previous element. */
unsigned int indx; /* regno/BITMAP_ELEMENT_ALL_BITS. */
BITMAP_WORD bits[BITMAP_ELEMENT_WORDS]; /* Bits that are set. */
-} bitmap_element;
+};
/* Head of bitmap linked list. The 'current' member points to something
already pointed to by the chain started by first, so GTY((skip)) it. */
-typedef struct GTY(()) bitmap_head_def {
+struct GTY(()) bitmap_head {
unsigned int indx; /* Index of last element looked at. */
unsigned int descriptor_id; /* Unique identifier for the allocation
site of this bitmap, for detailed
@@ -186,7 +186,7 @@ typedef struct GTY(()) bitmap_head_def {
bitmap_element * GTY((skip(""))) current; /* Last element looked at. */
bitmap_obstack *obstack; /* Obstack to allocate elements from.
If NULL, then use GGC allocation. */
-} bitmap_head;
+};
/* Global data */
extern bitmap_element bitmap_zero_bits; /* Zero bitmap element */
@@ -293,8 +293,8 @@ inline void dump_bitmap (FILE *file, const_bitmap map)
{
bitmap_print (file, map, "", "\n");
}
-extern void debug (const bitmap_head_def &ref);
-extern void debug (const bitmap_head_def *ptr);
+extern void debug (const bitmap_head &ref);
+extern void debug (const bitmap_head *ptr);
extern unsigned bitmap_first_set_bit (const_bitmap);
extern unsigned bitmap_last_set_bit (const_bitmap);
@@ -314,7 +314,7 @@ extern hashval_t bitmap_hash (const_bitmap);
/* Iterator for bitmaps. */
-typedef struct
+struct bitmap_iterator
{
/* Pointer to the current bitmap element. */
bitmap_element *elt1;
@@ -329,7 +329,7 @@ typedef struct
it is shifted right, so that the actual bit is always the least
significant bit of ACTUAL. */
BITMAP_WORD bits;
-} bitmap_iterator;
+};
/* Initialize a single bitmap iterator. START_BIT is the first bit to
iterate from. */
diff --git a/gcc/bt-load.c b/gcc/bt-load.c
index 09eea06e379..83b3ebabee2 100644
--- a/gcc/bt-load.c
+++ b/gcc/bt-load.c
@@ -457,10 +457,10 @@ compute_defs_uses_and_gen (fibheap_t all_btr_defs, btr_def *def_array,
btr_def_group all_btr_def_groups = NULL;
defs_uses_info info;
- bitmap_vector_clear (bb_gen, last_basic_block);
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ bitmap_vector_clear (bb_gen, last_basic_block_for_fn (cfun));
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
int reg;
btr_def defs_this_bb = NULL;
rtx insn;
@@ -618,8 +618,8 @@ compute_kill (sbitmap *bb_kill, sbitmap *btr_defset,
/* For each basic block, form the set BB_KILL - the set
of definitions that the block kills. */
- bitmap_vector_clear (bb_kill, last_basic_block);
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ bitmap_vector_clear (bb_kill, last_basic_block_for_fn (cfun));
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
{
for (regno = first_btr; regno <= last_btr; regno++)
if (TEST_HARD_REG_BIT (all_btrs, regno)
@@ -642,16 +642,16 @@ compute_out (sbitmap *bb_out, sbitmap *bb_gen, sbitmap *bb_kill, int max_uid)
int changed;
sbitmap bb_in = sbitmap_alloc (max_uid);
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
bitmap_copy (bb_out[i], bb_gen[i]);
changed = 1;
while (changed)
{
changed = 0;
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
{
- bitmap_union_of_preds (bb_in, bb_out, BASIC_BLOCK (i));
+ bitmap_union_of_preds (bb_in, bb_out, BASIC_BLOCK_FOR_FN (cfun, i));
changed |= bitmap_ior_and_compl (bb_out[i], bb_gen[i],
bb_in, bb_kill[i]);
}
@@ -668,13 +668,13 @@ link_btr_uses (btr_def *def_array, btr_user *use_array, sbitmap *bb_out,
/* Link uses to the uses lists of all of their reaching defs.
Count up the number of reaching defs of each use. */
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
rtx insn;
rtx last;
- bitmap_union_of_preds (reaching_defs, bb_out, BASIC_BLOCK (i));
+ bitmap_union_of_preds (reaching_defs, bb_out, BASIC_BLOCK_FOR_FN (cfun, i));
for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb));
insn != last;
insn = NEXT_INSN (insn))
@@ -780,8 +780,10 @@ build_btr_def_use_webs (fibheap_t all_btr_defs)
btr_user *use_array = XCNEWVEC (btr_user, max_uid);
sbitmap *btr_defset = sbitmap_vector_alloc (
(last_btr - first_btr) + 1, max_uid);
- sbitmap *bb_gen = sbitmap_vector_alloc (last_basic_block, max_uid);
- HARD_REG_SET *btrs_written = XCNEWVEC (HARD_REG_SET, last_basic_block);
+ sbitmap *bb_gen = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
+ max_uid);
+ HARD_REG_SET *btrs_written = XCNEWVEC (HARD_REG_SET,
+ last_basic_block_for_fn (cfun));
sbitmap *bb_kill;
sbitmap *bb_out;
@@ -790,11 +792,11 @@ build_btr_def_use_webs (fibheap_t all_btr_defs)
compute_defs_uses_and_gen (all_btr_defs, def_array, use_array, btr_defset,
bb_gen, btrs_written);
- bb_kill = sbitmap_vector_alloc (last_basic_block, max_uid);
+ bb_kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), max_uid);
compute_kill (bb_kill, btr_defset, btrs_written);
free (btrs_written);
- bb_out = sbitmap_vector_alloc (last_basic_block, max_uid);
+ bb_out = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), max_uid);
compute_out (bb_out, bb_gen, bb_kill, max_uid);
sbitmap_vector_free (bb_gen);
@@ -814,13 +816,14 @@ build_btr_def_use_webs (fibheap_t all_btr_defs)
static int
block_at_edge_of_live_range_p (int bb, btr_def def)
{
- if (def->other_btr_uses_before_def && BASIC_BLOCK (bb) == def->bb)
+ if (def->other_btr_uses_before_def
+ && BASIC_BLOCK_FOR_FN (cfun, bb) == def->bb)
return 1;
else if (def->other_btr_uses_after_use)
{
btr_user user;
for (user = def->uses; user != NULL; user = user->next)
- if (BASIC_BLOCK (bb) == user->bb)
+ if (BASIC_BLOCK_FOR_FN (cfun, bb) == user->bb)
return 1;
}
return 0;
@@ -1404,9 +1407,9 @@ migrate_btr_defs (enum reg_class btr_class, int allow_callee_save)
{
int i;
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
fprintf (dump_file,
"Basic block %d: count = " HOST_WIDEST_INT_PRINT_DEC
" loop-depth = %d idom = %d\n",
@@ -1427,8 +1430,8 @@ migrate_btr_defs (enum reg_class btr_class, int allow_callee_save)
first_btr = reg;
}
- btrs_live = XCNEWVEC (HARD_REG_SET, last_basic_block);
- btrs_live_at_end = XCNEWVEC (HARD_REG_SET, last_basic_block);
+ btrs_live = XCNEWVEC (HARD_REG_SET, last_basic_block_for_fn (cfun));
+ btrs_live_at_end = XCNEWVEC (HARD_REG_SET, last_basic_block_for_fn (cfun));
build_btr_def_use_webs (all_btr_defs);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 616a74209c0..462b4b18dd0 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,44 @@
+2013-12-11 Balaji V. Iyer <balaji.v.iyer@intel.com>
+
+ * cilk.c (cilk_outline): Made this function non-static.
+ (gimplify_cilk_spawn): Removed pre_p and post_p arguments.
+ (create_cilk_wrapper): Added a new parameter: a function pointer.
+ (c_install_body_w_frame_cleanup): Remove
+ (extract_free_variables): Added VEC_INIT_EXPR and CONSTRUCTOR case.
+ * c-common.h (cilk_outline): New prototype.
+ (gimplify_cilk_spawn): Removed two parameters.
+ (cilk_install_body_with_frame_cleanup): New prototype.
+ * c-gimplify.c (c_gimplify_expr): Added MODIFY_EXPR, CALL_EXPR and
+ CILK_SPAWN_STMT case.
+
+2013-12-11 Bernd Schmidt <bernds@codesourcery.com>
+
+ * c-common.c (c_fully_fold_internal): Handle ADDR_SPACE_CONVERT_EXPR.
+
+ * c-common.h (enum c_tree_index): Remove CTI_INT_ARRAY_TYPE.
+ (int_array_type_node): Remove.
+ * c-common.c (c_common_nodes_and_builtins): Don't build it.
+
+2013-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR c/52023
+ * c-common.c (c_sizeof_or_alignof_type): Move a declaration into
+ [ADJUST_FIELD_ALIGN].
+
+2013-12-04 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/52023
+ * c-common.c (c_sizeof_or_alignof_type): Add parameter min_alignof
+ and check field alignment if set.
+ * c-common.h (c_sizeof_or_alignof_type): Update prototype.
+ (c_sizeof, c_alignof): Update calls to c_sizeof_or_alignof_type.
+
+2013-12-04 Jakub Jelinek <jakub@redhat.com>
+ Marek Polacek <polacek@redhat.com>
+
+ * c-gimplify.c (c_gimplify_expr): If doing the integer-overflow
+ sanitization, call unsigned_type_for only when !TYPE_OVERFLOW_WRAPS.
+
2013-11-29 H.J. Lu <hongjiu.lu@intel.com>
PR c/59309
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index da4837c1dfe..b8dac842cf8 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -122,11 +122,6 @@ cpp_reader *parse_in; /* Declared in c-pragma.h. */
tree char_array_type_node;
- Type `int[SOMENUMBER]' or something like it.
- Used when an array of int needed and the size is irrelevant.
-
- tree int_array_type_node;
-
Type `wchar_t[SOMENUMBER]' or something like it.
Used when a wide string literal is created.
@@ -1329,6 +1324,7 @@ c_fully_fold_internal (tree expr, bool in_init, bool *maybe_const_operands,
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
+ case ADDR_SPACE_CONVERT_EXPR:
case VIEW_CONVERT_EXPR:
case NON_LVALUE_EXPR:
case NEGATE_EXPR:
@@ -4914,14 +4910,17 @@ c_common_get_alias_set (tree t)
}
/* Compute the value of 'sizeof (TYPE)' or '__alignof__ (TYPE)', where
- the second parameter indicates which OPERATOR is being applied.
+ the IS_SIZEOF parameter indicates which operator is being applied.
The COMPLAIN flag controls whether we should diagnose possibly
ill-formed constructs or not. LOC is the location of the SIZEOF or
- TYPEOF operator. */
+ TYPEOF operator. If MIN_ALIGNOF, the least alignment required for
+ a type in any context should be returned, rather than the normal
+ alignment for that type. */
tree
c_sizeof_or_alignof_type (location_t loc,
- tree type, bool is_sizeof, int complain)
+ tree type, bool is_sizeof, bool min_alignof,
+ int complain)
{
const char *op_name;
tree value = NULL;
@@ -4987,6 +4986,22 @@ c_sizeof_or_alignof_type (location_t loc,
value = size_binop_loc (loc, CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type),
size_int (TYPE_PRECISION (char_type_node)
/ BITS_PER_UNIT));
+ else if (min_alignof)
+ {
+ unsigned int align = TYPE_ALIGN (type);
+ align = MIN (align, BIGGEST_ALIGNMENT);
+#ifdef BIGGEST_FIELD_ALIGNMENT
+ align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
+#endif
+ unsigned int field_align = align;
+#ifdef ADJUST_FIELD_ALIGN
+ tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
+ type);
+ field_align = ADJUST_FIELD_ALIGN (field, field_align);
+#endif
+ align = MIN (align, field_align);
+ value = size_int (align / BITS_PER_UNIT);
+ }
else
value = size_int (TYPE_ALIGN_UNIT (type));
}
@@ -5493,10 +5508,6 @@ c_common_nodes_and_builtins (void)
char_array_type_node
= build_array_type (char_type_node, array_domain_type);
- /* Likewise for arrays of ints. */
- int_array_type_node
- = build_array_type (integer_type_node, array_domain_type);
-
string_type_node = build_pointer_type (char_type_node);
const_string_type_node
= build_pointer_type (build_qualified_type
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index 664e9287a1b..4357d1fab93 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -281,7 +281,6 @@ enum c_tree_index
CTI_CHAR16_ARRAY_TYPE,
CTI_CHAR32_ARRAY_TYPE,
CTI_WCHAR_ARRAY_TYPE,
- CTI_INT_ARRAY_TYPE,
CTI_STRING_TYPE,
CTI_CONST_STRING_TYPE,
@@ -421,7 +420,6 @@ extern const unsigned int num_c_common_reswords;
#define char16_array_type_node c_global_trees[CTI_CHAR16_ARRAY_TYPE]
#define char32_array_type_node c_global_trees[CTI_CHAR32_ARRAY_TYPE]
#define wchar_array_type_node c_global_trees[CTI_WCHAR_ARRAY_TYPE]
-#define int_array_type_node c_global_trees[CTI_INT_ARRAY_TYPE]
#define string_type_node c_global_trees[CTI_STRING_TYPE]
#define const_string_type_node c_global_trees[CTI_CONST_STRING_TYPE]
@@ -759,7 +757,7 @@ extern tree c_wrap_maybe_const (tree, bool);
extern tree c_save_expr (tree);
extern tree c_common_truthvalue_conversion (location_t, tree);
extern void c_apply_type_quals_to_decl (int, tree);
-extern tree c_sizeof_or_alignof_type (location_t, tree, bool, int);
+extern tree c_sizeof_or_alignof_type (location_t, tree, bool, bool, int);
extern tree c_alignof_expr (location_t, tree);
/* Print an error message for invalid operands to arith operation CODE.
NOP_EXPR is used as a special case (see truthvalue_conversion). */
@@ -792,8 +790,8 @@ extern bool keyword_is_type_qualifier (enum rid);
extern bool keyword_is_decl_specifier (enum rid);
extern bool cxx_fundamental_alignment_p (unsigned);
-#define c_sizeof(LOC, T) c_sizeof_or_alignof_type (LOC, T, true, 1)
-#define c_alignof(LOC, T) c_sizeof_or_alignof_type (LOC, T, false, 1)
+#define c_sizeof(LOC, T) c_sizeof_or_alignof_type (LOC, T, true, false, 1)
+#define c_alignof(LOC, T) c_sizeof_or_alignof_type (LOC, T, false, false, 1)
/* Subroutine of build_binary_op, used for certain operations. */
extern tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise);
@@ -1378,8 +1376,8 @@ extern vec <tree, va_gc> *fix_sec_implicit_args
/* In cilk.c. */
extern tree insert_cilk_frame (tree);
extern void cilk_init_builtins (void);
-extern int gimplify_cilk_spawn (tree *, gimple_seq *, gimple_seq *);
-extern void c_cilk_install_body_w_frame_cleanup (tree, tree);
+extern int gimplify_cilk_spawn (tree *);
+extern void cilk_install_body_with_frame_cleanup (tree, tree, void *);
extern bool cilk_detect_spawn_and_unwrap (tree *);
extern bool cilk_set_spawn_marker (location_t, tree);
extern tree build_cilk_sync (void);
@@ -1387,5 +1385,5 @@ extern tree build_cilk_spawn (location_t, tree);
extern tree make_cilk_frame (tree);
extern tree create_cilk_function_exit (tree, bool, bool);
extern tree cilk_install_body_pedigree_operations (tree);
-
+extern void cilk_outline (tree, tree *, void *);
#endif /* ! GCC_C_COMMON_H */
diff --git a/gcc/c-family/c-gimplify.c b/gcc/c-family/c-gimplify.c
index d047c65b4ee..b919737b8ee 100644
--- a/gcc/c-family/c-gimplify.c
+++ b/gcc/c-family/c-gimplify.c
@@ -44,7 +44,7 @@ along with GCC; see the file COPYING3. If not see
#include "dumpfile.h"
#include "c-pretty-print.h"
#include "cgraph.h"
-
+#include "cilk.h"
/* The gimplification pass converts the language-dependent trees
(ld-trees) emitted by the parser into language-independent trees
@@ -199,12 +199,34 @@ c_gimplify_expr (tree *expr_p, gimple_seq *pre_p ATTRIBUTE_UNUSED,
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 0));
if (INTEGRAL_TYPE_P (type) && c_promoting_integer_type_p (type))
{
- if (TYPE_OVERFLOW_UNDEFINED (type))
+ if (TYPE_OVERFLOW_UNDEFINED (type)
+ || ((flag_sanitize & SANITIZE_SI_OVERFLOW)
+ && !TYPE_OVERFLOW_WRAPS (type)))
type = unsigned_type_for (type);
return gimplify_self_mod_expr (expr_p, pre_p, post_p, 1, type);
}
break;
}
+
+ case CILK_SPAWN_STMT:
+ gcc_assert
+ (fn_contains_cilk_spawn_p (cfun)
+ && cilk_detect_spawn_and_unwrap (expr_p));
+
+ /* If errors are seen, then just process it as a CALL_EXPR. */
+ if (!seen_error ())
+ return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
+
+ case MODIFY_EXPR:
+ case INIT_EXPR:
+ case CALL_EXPR:
+ if (fn_contains_cilk_spawn_p (cfun)
+ && cilk_detect_spawn_and_unwrap (expr_p)
+ /* If an error is found, the spawn wrapper is removed and the
+ original expression (MODIFY/INIT/CALL_EXPR) is processes as
+ it is supposed to be. */
+ && !seen_error ())
+ return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
default:;
}
diff --git a/gcc/c-family/cilk.c b/gcc/c-family/cilk.c
index 13b27f1787b..1cc000a75ac 100644
--- a/gcc/c-family/cilk.c
+++ b/gcc/c-family/cilk.c
@@ -477,9 +477,10 @@ wrapper_local_cb (const void *k_v, void **vp, void *data)
/* Alter a tree STMT from OUTER_FN to form the body of INNER_FN. */
-static void
-cilk_outline (tree inner_fn, tree *stmt_p, struct wrapper_data *wd)
+void
+cilk_outline (tree inner_fn, tree *stmt_p, void *w)
{
+ struct wrapper_data *wd = (struct wrapper_data *) w;
const tree outer_fn = wd->context;
const bool nested = (wd->type == CILK_BLOCK_FOR);
copy_body_data id;
@@ -512,8 +513,7 @@ cilk_outline (tree inner_fn, tree *stmt_p, struct wrapper_data *wd)
/* We don't want the private variables any more. */
pointer_map_traverse (wd->decl_map, nested ? for_local_cb : wrapper_local_cb,
&id);
-
- walk_tree (stmt_p, copy_tree_body_r, &id, NULL);
+ walk_tree (stmt_p, copy_tree_body_r, (void *) &id, NULL);
/* See if this function can throw or calls something that should
not be spawned. The exception part is only necessary if
@@ -554,10 +554,8 @@ create_cilk_wrapper_body (tree stmt, struct wrapper_data *wd)
for (p = wd->parms; p; p = TREE_CHAIN (p))
DECL_CONTEXT (p) = fndecl;
- cilk_outline (fndecl, &stmt, wd);
- stmt = fold_build_cleanup_point_expr (void_type_node, stmt);
gcc_assert (!DECL_SAVED_TREE (fndecl));
- lang_hooks.cilkplus.install_body_with_frame_cleanup (fndecl, stmt);
+ cilk_install_body_with_frame_cleanup (fndecl, stmt, (void *) wd);
gcc_assert (DECL_SAVED_TREE (fndecl));
pop_cfun_to (outer);
@@ -732,8 +730,7 @@ create_cilk_wrapper (tree exp, tree *args_out)
and GS_UNHANDLED, otherwise. */
int
-gimplify_cilk_spawn (tree *spawn_p, gimple_seq *before ATTRIBUTE_UNUSED,
- gimple_seq *after ATTRIBUTE_UNUSED)
+gimplify_cilk_spawn (tree *spawn_p)
{
tree expr = *spawn_p;
tree function, call1, call2, new_args;
@@ -877,30 +874,6 @@ cilk_install_body_pedigree_operations (tree frame_ptr)
return body_list;
}
-/* Inserts "cleanup" functions after the function-body of FNDECL. FNDECL is a
- spawn-helper and BODY is the newly created body for FNDECL. */
-
-void
-c_cilk_install_body_w_frame_cleanup (tree fndecl, tree body)
-{
- tree list = alloc_stmt_list ();
- tree frame = make_cilk_frame (fndecl);
- tree dtor = create_cilk_function_exit (frame, false, true);
- add_local_decl (cfun, frame);
-
- DECL_SAVED_TREE (fndecl) = list;
- tree frame_ptr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (frame)),
- frame);
- tree body_list = cilk_install_body_pedigree_operations (frame_ptr);
- gcc_assert (TREE_CODE (body_list) == STATEMENT_LIST);
-
- tree detach_expr = build_call_expr (cilk_detach_fndecl, 1, frame_ptr);
- append_to_statement_list (detach_expr, &body_list);
- append_to_statement_list (body, &body_list);
- append_to_statement_list (build_stmt (EXPR_LOCATION (body), TRY_FINALLY_EXPR,
- body_list, dtor), &list);
-}
-
/* Add a new variable, VAR to a variable list in WD->DECL_MAP. HOW indicates
whether the variable is previously defined, currently defined, or a variable
that is being written to. */
@@ -1061,6 +1034,7 @@ extract_free_variables (tree t, struct wrapper_data *wd,
extract_free_variables (TREE_OPERAND (t, 0), wd, ADD_READ);
return;
+ case VEC_INIT_EXPR:
case INIT_EXPR:
extract_free_variables (TREE_OPERAND (t, 0), wd, ADD_BIND);
extract_free_variables (TREE_OPERAND (t, 1), wd, ADD_READ);
@@ -1221,6 +1195,15 @@ extract_free_variables (tree t, struct wrapper_data *wd,
break;
}
+ case CONSTRUCTOR:
+ {
+ unsigned HOST_WIDE_INT idx = 0;
+ constructor_elt *ce;
+ for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (t), idx, &ce); idx++)
+ extract_free_variables (ce->value, wd, ADD_READ);
+ break;
+ }
+
default:
if (is_expr)
{
@@ -1237,7 +1220,6 @@ extract_free_variables (tree t, struct wrapper_data *wd,
}
}
-
/* Add appropriate frames needed for a Cilk spawned function call, FNDECL.
Returns the __cilkrts_stack_frame * variable. */
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index b97e65ec249..17ca2c5d8dd 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,24 @@
+2013-12-11 Balaji V. Iyer <balaji.v.iyer@intel.com>
+
+ * c-objc-common.h (LANG_HOOKS_CILKPLUS_FRAME_CLEANUP): Remove.
+ (LANG_HOOKS_CILKPLUS_DETECT_SPAWN_AND_UNWRAP): Likewise.
+ (LANG_HOOKS_CILKPLUS_CILKPLUS_GIMPLIFY_SPAWN): Likewise.
+ * c-typeck.c (cilk_install_body_with_frame_cleanup): New function.
+
+2013-12-04 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/52023
+ * c-parser.c (c_parser_alignas_specifier): Use
+ c_sizeof_or_alignof_type instead of c_alignof.
+ (c_parser_alignof_expression): Likewise, with min_alignof
+ parameter depending on alignof spelling used.
+
+2013-12-04 Marek Polacek <polacek@redhat.com>
+
+ PR c/54113
+ * c-decl.c (start_function): Don't warn for missing prototype for
+ inline functions.
+
2013-12-03 Marek Polacek <polacek@redhat.com>
PR c/59351
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index d31361b33a3..8adb41244ac 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -7975,7 +7975,8 @@ start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
&& old_decl != error_mark_node
&& TREE_PUBLIC (decl1)
&& !MAIN_NAME_P (DECL_NAME (decl1))
- && C_DECL_ISNT_PROTOTYPE (old_decl))
+ && C_DECL_ISNT_PROTOTYPE (old_decl)
+ && !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_prototypes,
"no previous prototype for %qD", decl1);
/* Optionally warn of any def with no previous prototype
diff --git a/gcc/c/c-objc-common.h b/gcc/c/c-objc-common.h
index 6ae7b3e0fb7..4b7987d0d2b 100644
--- a/gcc/c/c-objc-common.h
+++ b/gcc/c/c-objc-common.h
@@ -104,14 +104,4 @@ along with GCC; see the file COPYING3. If not see
#undef LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P
#define LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P c_vla_unspec_p
-
-#undef LANG_HOOKS_CILKPLUS_GIMPLIFY_SPAWN
-#define LANG_HOOKS_CILKPLUS_GIMPLIFY_SPAWN gimplify_cilk_spawn
-
-#undef LANG_HOOKS_CILKPLUS_FRAME_CLEANUP
-#define LANG_HOOKS_CILKPLUS_FRAME_CLEANUP c_cilk_install_body_w_frame_cleanup
-
-#undef LANG_HOOKS_CILKPLUS_DETECT_SPAWN_AND_UNWRAP
-#define LANG_HOOKS_CILKPLUS_DETECT_SPAWN_AND_UNWRAP \
- cilk_detect_spawn_and_unwrap
#endif /* GCC_C_OBJC_COMMON */
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index fb3b01cd4a2..af425b2aea9 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -3045,7 +3045,8 @@ c_parser_alignas_specifier (c_parser * parser)
{
struct c_type_name *type = c_parser_type_name (parser);
if (type != NULL)
- ret = c_alignof (loc, groktypename (type, NULL, NULL));
+ ret = c_sizeof_or_alignof_type (loc, groktypename (type, NULL, NULL),
+ false, true, 1);
}
else
ret = c_parser_expr_no_commas (parser, NULL).value;
@@ -6446,11 +6447,12 @@ c_parser_alignof_expression (c_parser *parser)
location_t loc = c_parser_peek_token (parser)->location;
tree alignof_spelling = c_parser_peek_token (parser)->value;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF));
+ bool is_c11_alignof = strcmp (IDENTIFIER_POINTER (alignof_spelling),
+ "_Alignof") == 0;
/* A diagnostic is not required for the use of this identifier in
the implementation namespace; only diagnose it for the C11
spelling because of existing code using the other spellings. */
- if (!flag_isoc11
- && strcmp (IDENTIFIER_POINTER (alignof_spelling), "_Alignof") == 0)
+ if (!flag_isoc11 && is_c11_alignof)
{
if (flag_isoc99)
pedwarn (loc, OPT_Wpedantic, "ISO C99 does not support %qE",
@@ -6494,7 +6496,9 @@ c_parser_alignof_expression (c_parser *parser)
/* alignof ( type-name ). */
c_inhibit_evaluation_warnings--;
in_alignof--;
- ret.value = c_alignof (loc, groktypename (type_name, NULL, NULL));
+ ret.value = c_sizeof_or_alignof_type (loc, groktypename (type_name,
+ NULL, NULL),
+ false, is_c11_alignof, 1);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index d3753fbcca6..de98f4d1b2d 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -49,6 +49,7 @@ along with GCC; see the file COPYING3. If not see
#include "c-family/c-objc.h"
#include "c-family/c-common.h"
#include "c-family/c-ubsan.h"
+#include "cilk.h"
#include "wide-int.h"
/* Possible cases of implicit bad conversions. Used to select
@@ -12438,3 +12439,31 @@ c_tree_equal (tree t1, tree t2)
/* We can get here with --disable-checking. */
return false;
}
+
+/* Inserts "cleanup" functions after the function-body of FNDECL. FNDECL is a
+ spawn-helper and BODY is the newly created body for FNDECL. */
+
+void
+cilk_install_body_with_frame_cleanup (tree fndecl, tree body, void *w)
+{
+ tree list = alloc_stmt_list ();
+ tree frame = make_cilk_frame (fndecl);
+ tree dtor = create_cilk_function_exit (frame, false, true);
+ add_local_decl (cfun, frame);
+
+ DECL_SAVED_TREE (fndecl) = list;
+ tree frame_ptr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (frame)),
+ frame);
+ tree body_list = cilk_install_body_pedigree_operations (frame_ptr);
+ gcc_assert (TREE_CODE (body_list) == STATEMENT_LIST);
+
+ tree detach_expr = build_call_expr (cilk_detach_fndecl, 1, frame_ptr);
+ append_to_statement_list (detach_expr, &body_list);
+
+ cilk_outline (fndecl, &body, (struct wrapper_data *) w);
+ body = fold_build_cleanup_point_expr (void_type_node, body);
+
+ append_to_statement_list (body, &body_list);
+ append_to_statement_list (build_stmt (EXPR_LOCATION (body), TRY_FINALLY_EXPR,
+ body_list, dtor), &list);
+}
diff --git a/gcc/caller-save.c b/gcc/caller-save.c
index b134cde1131..628fc0b3e3e 100644
--- a/gcc/caller-save.c
+++ b/gcc/caller-save.c
@@ -1414,8 +1414,8 @@ insert_one_insn (struct insn_chain *chain, int before_p, int code, rtx pat)
&new_chain->live_throughout);
CLEAR_REG_SET (&new_chain->dead_or_set);
- if (chain->insn == BB_HEAD (BASIC_BLOCK (chain->block)))
- BB_HEAD (BASIC_BLOCK (chain->block)) = new_chain->insn;
+ if (chain->insn == BB_HEAD (BASIC_BLOCK_FOR_FN (cfun, chain->block)))
+ BB_HEAD (BASIC_BLOCK_FOR_FN (cfun, chain->block)) = new_chain->insn;
}
else
{
@@ -1434,8 +1434,8 @@ insert_one_insn (struct insn_chain *chain, int before_p, int code, rtx pat)
note_stores (PATTERN (chain->insn), add_stored_regs,
&new_chain->live_throughout);
CLEAR_REG_SET (&new_chain->dead_or_set);
- if (chain->insn == BB_END (BASIC_BLOCK (chain->block)))
- BB_END (BASIC_BLOCK (chain->block)) = new_chain->insn;
+ if (chain->insn == BB_END (BASIC_BLOCK_FOR_FN (cfun, chain->block)))
+ BB_END (BASIC_BLOCK_FOR_FN (cfun, chain->block)) = new_chain->insn;
}
new_chain->block = chain->block;
new_chain->is_caller_save_insn = 1;
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 6bceca5ed26..d4d00a48424 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -101,7 +101,7 @@ clear_edges (void)
edge e;
edge_iterator ei;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
free_edge (e);
@@ -153,8 +153,8 @@ compact_blocks (void)
{
int i;
- SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
- SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
+ SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (df)
df_compact_blocks ();
@@ -163,18 +163,18 @@ compact_blocks (void)
basic_block bb;
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- SET_BASIC_BLOCK (i, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
bb->index = i;
i++;
}
gcc_assert (i == n_basic_blocks_for_fn (cfun));
- for (; i < last_basic_block; i++)
- SET_BASIC_BLOCK (i, NULL);
+ for (; i < last_basic_block_for_fn (cfun); i++)
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
}
- last_basic_block = n_basic_blocks_for_fn (cfun);
+ last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
}
/* Remove block B from the basic block array. */
@@ -183,7 +183,7 @@ void
expunge_block (basic_block b)
{
unlink_block (b);
- SET_BASIC_BLOCK (b->index, NULL);
+ SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
n_basic_blocks_for_fn (cfun)--;
/* We should be able to ggc_free here, but we are not.
The dead SSA_NAMES are left pointing to dead statements that are pointing
@@ -408,7 +408,7 @@ check_bb_profile (basic_block bb, FILE * file, int indent, int flags)
memset ((void *) s_indent, ' ', (size_t) indent);
s_indent[indent] = '\0';
- if (profile_status_for_function (fun) == PROFILE_ABSENT)
+ if (profile_status_for_fn (fun) == PROFILE_ABSENT)
return;
if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
@@ -576,7 +576,7 @@ alloc_aux_for_blocks (int size)
{
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
alloc_aux_for_block (bb, size);
}
}
@@ -588,7 +588,7 @@ clear_aux_for_blocks (void)
{
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bb->aux = NULL;
}
@@ -690,7 +690,7 @@ debug_bb (basic_block bb)
DEBUG_FUNCTION basic_block
debug_bb_n (int n)
{
- basic_block bb = BASIC_BLOCK (n);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
debug_bb (bb);
return bb;
}
@@ -828,7 +828,7 @@ brief_dump_cfg (FILE *file, int flags)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
dump_bb_info (file, bb, 0,
flags & (TDF_COMMENT | TDF_DETAILS),
@@ -1139,7 +1139,7 @@ get_bb_original (basic_block bb)
key.index1 = bb->index;
entry = bb_original.find (&key);
if (entry)
- return BASIC_BLOCK (entry->index2);
+ return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
else
return NULL;
}
@@ -1164,7 +1164,7 @@ get_bb_copy (basic_block bb)
key.index1 = bb->index;
entry = bb_copy.find (&key);
if (entry)
- return BASIC_BLOCK (entry->index2);
+ return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
else
return NULL;
}
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index ad5928a40b4..d7e03822fb8 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -72,15 +72,15 @@ mark_dfs_back_edges (void)
bool found = false;
/* Allocate the preorder and postorder number arrays. */
- pre = XCNEWVEC (int, last_basic_block);
- post = XCNEWVEC (int, last_basic_block);
+ pre = XCNEWVEC (int, last_basic_block_for_fn (cfun));
+ post = XCNEWVEC (int, last_basic_block_for_fn (cfun));
/* Allocate stack for back-tracking up CFG. */
stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
@@ -159,7 +159,7 @@ find_unreachable_blocks (void)
/* Clear all the reachability flags. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_REACHABLE;
/* Add our starting points to the worklist. Almost always there will
@@ -428,8 +428,8 @@ control_dependences::control_dependences (struct edge_list *edges)
: m_el (edges)
{
timevar_push (TV_CONTROL_DEPENDENCES);
- control_dependence_map.create (last_basic_block);
- for (int i = 0; i < last_basic_block; ++i)
+ control_dependence_map.create (last_basic_block_for_fn (cfun));
+ for (int i = 0; i < last_basic_block_for_fn (cfun); ++i)
control_dependence_map.quick_push (BITMAP_ALLOC (NULL));
for (int i = 0; i < NUM_EDGES (m_el); ++i)
find_control_dependence (i);
@@ -554,7 +554,7 @@ add_noreturn_fake_exit_edges (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (EDGE_COUNT (bb->succs) == 0)
make_single_succ_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
@@ -622,7 +622,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
@@ -778,13 +778,13 @@ inverted_post_order_compute (int *post_order)
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
/* Put all blocks that have no successor into the initial work list. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
if (EDGE_COUNT (bb->succs) == 0)
{
/* Push the initial edge on to the stack. */
@@ -931,7 +931,7 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
rev_post_order_num -= NUM_FIXED_BLOCKS;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
@@ -1062,7 +1062,7 @@ flow_dfs_compute_reverse_init (depth_first_search_ds data)
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- data->visited_blocks = sbitmap_alloc (last_basic_block);
+ data->visited_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (data->visited_blocks);
@@ -1147,7 +1147,7 @@ dfs_enumerate_from (basic_block bb, int reverse,
#define VISITED_P(BB) (bitmap_bit_p (visited, (BB)->index))
/* Resize the VISITED sbitmap if necessary. */
- size = last_basic_block;
+ size = last_basic_block_for_fn (cfun);
if (size < 10)
size = 10;
@@ -1236,7 +1236,7 @@ compute_dominance_frontiers_1 (bitmap_head *frontiers)
edge p;
edge_iterator ei;
basic_block b;
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
if (EDGE_COUNT (b->preds) >= 2)
{
@@ -1313,7 +1313,8 @@ compute_idf (bitmap def_blocks, bitmap_head *dfs)
form, the basic blocks where new and/or old names are defined
may have disappeared by CFG cleanup calls. In this case,
we may pull a non-existing block from the work stack. */
- gcc_checking_assert (bb_index < (unsigned) last_basic_block);
+ gcc_checking_assert (bb_index
+ < (unsigned) last_basic_block_for_fn (cfun));
EXECUTE_IF_AND_COMPL_IN_BITMAP (&dfs[bb_index], phi_insertion_points,
0, i, bi)
@@ -1508,7 +1509,7 @@ single_pred_before_succ_order (void)
basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
unsigned np, i;
- sbitmap visited = sbitmap_alloc (last_basic_block);
+ sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
#define MARK_VISITED(BB) (bitmap_set_bit (visited, (BB)->index))
#define VISITED_P(BB) (bitmap_bit_p (visited, (BB)->index))
@@ -1516,7 +1517,7 @@ single_pred_before_succ_order (void)
bitmap_clear (visited);
MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- FOR_EACH_BB (x)
+ FOR_EACH_BB_FN (x, cfun)
{
if (VISITED_P (x))
continue;
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index 08534d4bdde..acfc73be640 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -209,7 +209,7 @@ make_edges (basic_block min, basic_block max, int update_p)
nearly fully-connected CFGs. In that case we spend a significant
amount of time searching the edge lists for duplicates. */
if (forced_labels || cfun->cfg->max_jumptable_ents > 100)
- edge_cache = sbitmap_alloc (last_basic_block);
+ edge_cache = sbitmap_alloc (last_basic_block_for_fn (cfun));
/* By nature of the way these get numbered, ENTRY_BLOCK_PTR->next_bb block
is always the entry. */
@@ -595,15 +595,15 @@ find_many_sub_basic_blocks (sbitmap blocks)
{
basic_block bb, min, max;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
SET_STATE (bb,
bitmap_bit_p (blocks, bb->index) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (STATE (bb) == BLOCK_TO_SPLIT)
find_bb_boundaries (bb);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (STATE (bb) != BLOCK_ORIGINAL)
break;
@@ -618,7 +618,7 @@ find_many_sub_basic_blocks (sbitmap blocks)
/* Update branch probabilities. Expect only (un)conditional jumps
to be created with only the forward edges. */
- if (profile_status != PROFILE_ABSENT)
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
edge e;
@@ -640,6 +640,6 @@ find_many_sub_basic_blocks (sbitmap blocks)
compute_outgoing_frequencies (bb);
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
SET_STATE (bb, 0);
}
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index 234e5b64fe7..684ab0fa22a 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -2613,7 +2613,7 @@ try_optimize_cfg (int mode)
crossjumps_occured = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
update_forwarder_flag (bb);
if (! targetm.cannot_modify_jumps_p ())
@@ -2864,7 +2864,7 @@ try_optimize_cfg (int mode)
while (changed);
}
- FOR_ALL_BB (b)
+ FOR_ALL_BB_FN (b, cfun)
b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
return changed_overall;
@@ -2955,7 +2955,7 @@ delete_dead_jumptables (void)
/* A dead jump table does not belong to any basic block. Scan insns
between two adjacent basic blocks. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 853ace2c2a1..a73bd411926 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -498,10 +498,10 @@ add_scope_conflicts (void)
We then do a mostly classical bitmap liveness algorithm. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bb->aux = BITMAP_ALLOC (&stack_var_bitmap_obstack);
- rpo = XNEWVEC (int, last_basic_block);
+ rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
n_bbs = pre_and_rev_post_order_compute (NULL, rpo, false);
changed = true;
@@ -512,7 +512,7 @@ add_scope_conflicts (void)
for (i = 0; i < n_bbs; i++)
{
bitmap active;
- bb = BASIC_BLOCK (rpo[i]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
active = (bitmap)bb->aux;
add_scope_conflicts_1 (bb, work, false);
if (bitmap_ior_into (active, work))
@@ -520,12 +520,12 @@ add_scope_conflicts (void)
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
add_scope_conflicts_1 (bb, work, true);
free (rpo);
BITMAP_FREE (work);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
BITMAP_FREE (bb->aux);
}
@@ -5378,7 +5378,7 @@ discover_nonconstant_array_refs (void)
basic_block bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
@@ -5809,7 +5809,7 @@ gimple_expand_cfg (void)
}
}
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
sbitmap_free (blocks);
diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c
index 2535c9027be..7a16887e458 100644
--- a/gcc/cfghooks.c
+++ b/gcc/cfghooks.c
@@ -98,15 +98,15 @@ verify_flow_info (void)
basic_block *last_visited;
timevar_push (TV_CFG_VERIFY);
- last_visited = XCNEWVEC (basic_block, last_basic_block);
- edge_checksum = XCNEWVEC (size_t, last_basic_block);
+ last_visited = XCNEWVEC (basic_block, last_basic_block_for_fn (cfun));
+ edge_checksum = XCNEWVEC (size_t, last_basic_block_for_fn (cfun));
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
{
if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && bb != BASIC_BLOCK (bb->index))
+ && bb != BASIC_BLOCK_FOR_FN (cfun, bb->index))
{
error ("bb %d on wrong place", bb->index);
err = 1;
@@ -123,7 +123,7 @@ verify_flow_info (void)
}
/* Now check the basic blocks (boundaries etc.) */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
int n_fallthru = 0;
edge e;
@@ -325,7 +325,7 @@ dump_flow_info (FILE *file, int flags)
fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks_for_fn (cfun),
n_edges_for_fn (cfun));
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
dump_bb (file, bb, 0, flags);
putc ('\n', file);
@@ -1408,10 +1408,10 @@ account_profile_record (struct profile_record *record, int after_pass)
int sum;
gcov_type lsum;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && profile_status != PROFILE_ABSENT)
+ && profile_status_for_fn (cfun) != PROFILE_ABSENT)
{
sum = 0;
FOR_EACH_EDGE (e, ei, bb->succs)
@@ -1426,7 +1426,7 @@ account_profile_record (struct profile_record *record, int after_pass)
record->num_mismatched_count_out[after_pass]++;
}
if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
- && profile_status != PROFILE_ABSENT)
+ && profile_status_for_fn (cfun) != PROFILE_ABSENT)
{
sum = 0;
FOR_EACH_EDGE (e, ei, bb->preds)
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index 94238badacf..4182262e415 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -50,7 +50,7 @@ flow_loops_cfg_dump (FILE *file)
if (!file)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge succ;
edge_iterator ei;
@@ -440,7 +440,7 @@ flow_loops_find (struct loops *loops)
auto_vec<loop_p> larray (loops->larray->length ());
for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
{
- basic_block header = BASIC_BLOCK (rc_order[b]);
+ basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
if (bb_loop_header_p (header))
{
struct loop *loop;
@@ -835,7 +835,7 @@ get_loop_body (const struct loop *loop)
gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
body[tv++] = loop->header;
body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
body[tv++] = bb;
}
else
@@ -1083,7 +1083,7 @@ record_loop_exits (void)
loop_exit_hash, loop_exit_eq,
loop_exit_free);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
@@ -1344,7 +1344,7 @@ verify_loop_structure (void)
verify_dominators (CDI_DOMINATORS);
/* Check the headers. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_loop_header_p (bb))
{
if (bb->loop_father->header == NULL)
@@ -1365,7 +1365,7 @@ verify_loop_structure (void)
}
/* Check the recorded loop father and sizes of loops. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
@@ -1479,8 +1479,8 @@ verify_loop_structure (void)
if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
{
/* Record old info. */
- irreds = sbitmap_alloc (last_basic_block);
- FOR_EACH_BB (bb)
+ irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->flags & BB_IRREDUCIBLE_LOOP)
@@ -1496,7 +1496,7 @@ verify_loop_structure (void)
mark_irreducible_loops ();
/* Compare. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
@@ -1579,7 +1579,7 @@ verify_loop_structure (void)
sizes = XCNEWVEC (unsigned, num);
memset (sizes, 0, sizeof (unsigned) * num);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->loop_father == current_loops->tree_root)
diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
index 69fa996c6ff..f02166c5f35 100644
--- a/gcc/cfgloop.h
+++ b/gcc/cfgloop.h
@@ -177,6 +177,9 @@ struct GTY ((chain_next ("%h.next"))) loop {
/* True if we should try harder to vectorize this loop. */
bool force_vect;
+ /* True if this loop should never be vectorized. */
+ bool dont_vectorize;
+
/* For SIMD loops, this is a unique identifier of the loop, referenced
by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
builtins. */
diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c
index 0cee6c68b28..5e89cb1cd79 100644
--- a/gcc/cfgloopanal.c
+++ b/gcc/cfgloopanal.c
@@ -64,7 +64,7 @@ just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
LOOPS is the loop tree. */
-#define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block)
+#define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block_for_fn (cfun))
#define BB_REPR(BB) ((BB)->index + 1)
bool
@@ -94,7 +94,7 @@ mark_irreducible_loops (void)
}
/* Create the edge lists. */
- g = new_graph (last_basic_block + num);
+ g = new_graph (last_basic_block_for_fn (cfun) + num);
FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
@@ -432,7 +432,7 @@ mark_loop_exit_edges (void)
if (number_of_loops (cfun) <= 1)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
@@ -470,7 +470,7 @@ single_likely_exit (struct loop *loop)
ruled out by this test. The static branch prediction algorithm
will not assign such a low probability to conditionals for usual
reasons. */
- if (profile_status != PROFILE_ABSENT
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT
&& ex->probability < 5 && !ex->count)
continue;
if (!found)
diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c
index 7a6b20172f4..2bb8b6a2c75 100644
--- a/gcc/cfgloopmanip.c
+++ b/gcc/cfgloopmanip.c
@@ -204,7 +204,7 @@ fix_bb_placements (basic_block from,
|| from == base_loop->header)
return;
- in_queue = sbitmap_alloc (last_basic_block);
+ in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_queue);
bitmap_set_bit (in_queue, from->index);
/* Prevent us from going out of the base_loop. */
@@ -348,7 +348,7 @@ remove_path (edge e)
n_bord_bbs = 0;
bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
- seen = sbitmap_alloc (last_basic_block);
+ seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (seen);
/* Find "border" hexes -- i.e. those with predecessor in removed path. */
@@ -623,7 +623,7 @@ update_dominators_in_loop (struct loop *loop)
basic_block *body;
unsigned i;
- seen = sbitmap_alloc (last_basic_block);
+ seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (seen);
body = get_loop_body (loop);
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index 63f44afbbae..1a632498a64 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -328,10 +328,10 @@ create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after)
BB_HEAD (bb) = head;
BB_END (bb) = end;
- bb->index = last_basic_block++;
+ bb->index = last_basic_block_for_fn (cfun)++;
bb->flags = BB_NEW | BB_RTL;
link_block (bb, after);
- SET_BASIC_BLOCK (bb->index, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
df_bb_refs_record (bb->index, false);
update_bb_for_insn (bb);
BB_SET_PARTITION (bb, BB_UNPARTITIONED);
@@ -355,10 +355,13 @@ rtl_create_basic_block (void *headp, void *endp, basic_block after)
basic_block bb;
/* Grow the basic block array if needed. */
- if ((size_t) last_basic_block >= basic_block_info->length ())
+ if ((size_t) last_basic_block_for_fn (cfun)
+ >= basic_block_info_for_fn (cfun)->length ())
{
- size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
- vec_safe_grow_cleared (basic_block_info, new_size);
+ size_t new_size =
+ (last_basic_block_for_fn (cfun)
+ + (last_basic_block_for_fn (cfun) + 3) / 4);
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
}
n_basic_blocks_for_fn (cfun)++;
@@ -413,7 +416,7 @@ compute_bb_for_insn (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx end = BB_END (bb);
rtx insn;
@@ -2150,7 +2153,7 @@ print_rtl_with_bb (FILE *outf, const_rtx rtx_first, int flags)
if (flags & TDF_BLOCKS)
{
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
rtx x;
@@ -2272,7 +2275,7 @@ find_partition_fixes (bool flag_only)
/* Callers check this. */
gcc_checking_assert (crtl->has_bb_partition);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
bbs_in_cold_partition.safe_push (bb);
@@ -2369,7 +2372,7 @@ verify_hot_cold_block_grouping (void)
|| current_ir_type () != IR_RTL_CFGRTL)
return err;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (current_partition != BB_UNPARTITIONED
&& BB_PARTITION (bb) != current_partition)
@@ -2405,7 +2408,7 @@ rtl_verify_edges (void)
int err = 0;
basic_block bb;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
int n_eh = 0, n_abnormal = 0;
@@ -2420,7 +2423,7 @@ rtl_verify_edges (void)
&& any_condjump_p (BB_END (bb)))
{
if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
- && profile_status != PROFILE_ABSENT)
+ && profile_status_for_fn (cfun) != PROFILE_ABSENT)
{
error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
XINT (note, 0), BRANCH_EDGE (bb)->probability);
@@ -2583,7 +2586,7 @@ rtl_verify_bb_insns (void)
int err = 0;
basic_block bb;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
/* Now check the header of basic
block. It ought to contain optional CODE_LABEL followed
@@ -2646,7 +2649,7 @@ rtl_verify_bb_pointers (void)
basic_block bb;
/* Check the general integrity of the basic blocks. */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
rtx insn;
@@ -2736,7 +2739,7 @@ rtl_verify_bb_insn_chain (void)
bb_info = XCNEWVEC (basic_block, max_uid);
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
rtx head = BB_HEAD (bb);
rtx end = BB_END (bb);
@@ -2818,7 +2821,7 @@ rtl_verify_fallthru (void)
basic_block bb;
int err = 0;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
edge e;
@@ -3198,7 +3201,7 @@ purge_all_dead_edges (void)
int purged = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool purged_here = purge_dead_edges (bb);
@@ -3223,7 +3226,7 @@ fixup_abnormal_edges (void)
bool inserted = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
@@ -3446,7 +3449,7 @@ record_effective_endpoints (void)
cfg_layout_function_header = NULL_RTX;
next_insn = get_insns ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx end;
@@ -3476,7 +3479,7 @@ outof_cfg_layout_mode (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
@@ -3616,7 +3619,7 @@ relink_block_chain (bool stay_in_cfglayout_mode)
EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
/* Then, clean up the aux fields. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bb->aux = NULL;
if (!stay_in_cfglayout_mode)
@@ -3854,7 +3857,7 @@ fixup_reorder_chain (void)
relink_block_chain (/*stay_in_cfglayout_mode=*/false);
/* Annoying special case - jump around dead jumptables left in the code. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e = find_fallthru_edge (bb->succs);
@@ -3865,7 +3868,7 @@ fixup_reorder_chain (void)
/* Ensure goto_locus from edges has some instructions with that locus
in RTL. */
if (!optimize)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
@@ -4044,7 +4047,7 @@ force_one_exit_fallthru (void)
/* Fix up the chain of blocks -- make FORWARDER immediately precede the
exit block. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->aux == NULL && bb != forwarder)
{
@@ -4252,10 +4255,10 @@ break_superblocks (void)
bool need = false;
basic_block bb;
- superblocks = sbitmap_alloc (last_basic_block);
+ superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (superblocks);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->flags & BB_SUPERBLOCK)
{
bb->flags &= ~BB_SUPERBLOCK;
@@ -4778,7 +4781,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
{
int i;
int blocks_split = 0;
- int last_bb = last_basic_block;
+ int last_bb = last_basic_block_for_fn (cfun);
bool check_last_block = false;
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
@@ -4831,7 +4834,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
rtx insn;
rtx prev_insn;
@@ -5011,10 +5014,10 @@ rtl_account_profile_record (basic_block bb, int after_pass,
{
record->size[after_pass]
+= insn_rtx_cost (PATTERN (insn), false);
- if (profile_status == PROFILE_READ)
+ if (profile_status_for_fn (cfun) == PROFILE_READ)
record->time[after_pass]
+= insn_rtx_cost (PATTERN (insn), true) * bb->count;
- else if (profile_status == PROFILE_GUESSED)
+ else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
record->time[after_pass]
+= insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
}
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index 0d8166a961f..0a88da3889e 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -483,7 +483,8 @@ struct cgraph_node_set_def
vec<cgraph_node_ptr> nodes;
};
-typedef struct varpool_node *varpool_node_ptr;
+class varpool_node;
+typedef varpool_node *varpool_node_ptr;
/* A varpool node set is a collection of varpool nodes. A varpool node
@@ -501,25 +502,25 @@ typedef struct varpool_node_set_def *varpool_node_set;
/* Iterator structure for cgraph node sets. */
-typedef struct
+struct cgraph_node_set_iterator
{
cgraph_node_set set;
unsigned index;
-} cgraph_node_set_iterator;
+};
/* Iterator structure for varpool node sets. */
-typedef struct
+struct varpool_node_set_iterator
{
varpool_node_set set;
unsigned index;
-} varpool_node_set_iterator;
+};
#define DEFCIFCODE(code, string) CIF_ ## code,
/* Reasons for inlining failures. */
-typedef enum cgraph_inline_failed_enum {
+enum cgraph_inline_failed_t {
#include "cif-code.def"
CIF_N_REASONS
-} cgraph_inline_failed_t;
+};
/* Structure containing additional information about an indirect call. */
@@ -574,7 +575,7 @@ struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"))) cgrap
PTR GTY ((skip (""))) aux;
/* When equal to CIF_OK, inline this call. Otherwise, points to the
explanation why function was not inlined. */
- cgraph_inline_failed_t inline_failed;
+ enum cgraph_inline_failed_t inline_failed;
/* The stmt_uid of call_stmt. This is used by LTO to recover the call_stmt
when the function is serialized in. */
unsigned int lto_stmt_uid;
@@ -796,7 +797,7 @@ void cgraph_mark_address_taken_node (struct cgraph_node *);
typedef void (*cgraph_edge_hook)(struct cgraph_edge *, void *);
typedef void (*cgraph_node_hook)(struct cgraph_node *, void *);
-typedef void (*varpool_node_hook)(struct varpool_node *, void *);
+typedef void (*varpool_node_hook)(varpool_node *, void *);
typedef void (*cgraph_2edge_hook)(struct cgraph_edge *, struct cgraph_edge *,
void *);
typedef void (*cgraph_2node_hook)(struct cgraph_node *, struct cgraph_node *,
@@ -910,50 +911,50 @@ void cgraph_build_static_cdtor (char which, tree body, int priority);
varpool_node_set varpool_node_set_new (void);
varpool_node_set_iterator varpool_node_set_find (varpool_node_set,
- struct varpool_node *);
-void varpool_node_set_add (varpool_node_set, struct varpool_node *);
-void varpool_node_set_remove (varpool_node_set, struct varpool_node *);
+ varpool_node *);
+void varpool_node_set_add (varpool_node_set, varpool_node *);
+void varpool_node_set_remove (varpool_node_set, varpool_node *);
void dump_varpool_node_set (FILE *, varpool_node_set);
void debug_varpool_node_set (varpool_node_set);
void free_varpool_node_set (varpool_node_set);
void ipa_discover_readonly_nonaddressable_vars (void);
-bool varpool_externally_visible_p (struct varpool_node *);
+bool varpool_externally_visible_p (varpool_node *);
/* In predict.c */
bool cgraph_maybe_hot_edge_p (struct cgraph_edge *e);
bool cgraph_optimize_for_size_p (struct cgraph_node *);
/* In varpool.c */
-struct varpool_node *varpool_create_empty_node (void);
-struct varpool_node *varpool_node_for_decl (tree);
-struct varpool_node *varpool_node_for_asm (tree asmname);
-void varpool_mark_needed_node (struct varpool_node *);
+varpool_node *varpool_create_empty_node (void);
+varpool_node *varpool_node_for_decl (tree);
+varpool_node *varpool_node_for_asm (tree asmname);
+void varpool_mark_needed_node (varpool_node *);
void debug_varpool (void);
void dump_varpool (FILE *);
-void dump_varpool_node (FILE *, struct varpool_node *);
+void dump_varpool_node (FILE *, varpool_node *);
void varpool_finalize_decl (tree);
-enum availability cgraph_variable_initializer_availability (struct varpool_node *);
+enum availability cgraph_variable_initializer_availability (varpool_node *);
void cgraph_make_node_local (struct cgraph_node *);
bool cgraph_node_can_be_local_p (struct cgraph_node *);
-void varpool_remove_node (struct varpool_node *node);
-void varpool_finalize_named_section_flags (struct varpool_node *node);
+void varpool_remove_node (varpool_node *node);
+void varpool_finalize_named_section_flags (varpool_node *node);
bool varpool_output_variables (void);
-bool varpool_assemble_decl (struct varpool_node *node);
-void varpool_analyze_node (struct varpool_node *);
-struct varpool_node * varpool_extra_name_alias (tree, tree);
-struct varpool_node * varpool_create_variable_alias (tree, tree);
+bool varpool_assemble_decl (varpool_node *node);
+void varpool_analyze_node (varpool_node *);
+varpool_node * varpool_extra_name_alias (tree, tree);
+varpool_node * varpool_create_variable_alias (tree, tree);
void varpool_reset_queue (void);
tree ctor_for_folding (tree);
-bool varpool_for_node_and_aliases (struct varpool_node *,
- bool (*) (struct varpool_node *, void *),
+bool varpool_for_node_and_aliases (varpool_node *,
+ bool (*) (varpool_node *, void *),
void *, bool);
void varpool_add_new_variable (tree);
void symtab_initialize_asm_name_hash (void);
void symtab_prevail_in_asm_name_hash (symtab_node *node);
-void varpool_remove_initializer (struct varpool_node *);
+void varpool_remove_initializer (varpool_node *);
/* In cgraph.c */
extern void change_decl_assembler_name (tree, tree);
@@ -967,11 +968,11 @@ cgraph (symtab_node *node)
}
/* Return varpool node for given symbol and check it is a variable. */
-static inline struct varpool_node *
+static inline varpool_node *
varpool (symtab_node *node)
{
gcc_checking_assert (!node || node->type == SYMTAB_VARIABLE);
- return (struct varpool_node *)node;
+ return (varpool_node *)node;
}
/* Return callgraph node for given symbol and check it is a function. */
@@ -983,7 +984,7 @@ cgraph_get_node (const_tree decl)
}
/* Return varpool node for given symbol and check it is a function. */
-static inline struct varpool_node *
+static inline varpool_node *
varpool_get_node (const_tree decl)
{
gcc_checking_assert (TREE_CODE (decl) == VAR_DECL);
@@ -996,7 +997,7 @@ varpool_get_node (const_tree decl)
/* Return first variable. */
-static inline struct varpool_node *
+static inline varpool_node *
varpool_first_variable (void)
{
symtab_node *node;
@@ -1007,8 +1008,8 @@ varpool_first_variable (void)
}
/* Return next variable after NODE. */
-static inline struct varpool_node *
-varpool_next_variable (struct varpool_node *node)
+static inline varpool_node *
+varpool_next_variable (varpool_node *node)
{
symtab_node *node1 = node->next;
for (; node1; node1 = node1->next)
@@ -1023,7 +1024,7 @@ varpool_next_variable (struct varpool_node *node)
(node) = varpool_next_variable ((node)))
/* Return first reachable static variable with initializer. */
-static inline struct varpool_node *
+static inline varpool_node *
varpool_first_static_initializer (void)
{
symtab_node *node;
@@ -1037,8 +1038,8 @@ varpool_first_static_initializer (void)
}
/* Return next reachable static variable with initializer after NODE. */
-static inline struct varpool_node *
-varpool_next_static_initializer (struct varpool_node *node)
+static inline varpool_node *
+varpool_next_static_initializer (varpool_node *node)
{
symtab_node *node1 = node->next;
for (; node1; node1 = node1->next)
@@ -1056,7 +1057,7 @@ varpool_next_static_initializer (struct varpool_node *node)
(node) = varpool_next_static_initializer (node))
/* Return first reachable static variable with initializer. */
-static inline struct varpool_node *
+static inline varpool_node *
varpool_first_defined_variable (void)
{
symtab_node *node;
@@ -1070,8 +1071,8 @@ varpool_first_defined_variable (void)
}
/* Return next reachable static variable with initializer after NODE. */
-static inline struct varpool_node *
-varpool_next_defined_variable (struct varpool_node *node)
+static inline varpool_node *
+varpool_next_defined_variable (varpool_node *node)
{
symtab_node *node1 = node->next;
for (; node1; node1 = node1->next)
@@ -1257,7 +1258,7 @@ vsi_next (varpool_node_set_iterator *vsi)
}
/* Return the node pointed to by VSI. */
-static inline struct varpool_node *
+static inline varpool_node *
vsi_node (varpool_node_set_iterator vsi)
{
return vsi.set->nodes[vsi.index];
@@ -1276,7 +1277,7 @@ vsi_start (varpool_node_set set)
/* Return true if SET contains NODE. */
static inline bool
-varpool_node_in_set_p (struct varpool_node *node, varpool_node_set set)
+varpool_node_in_set_p (varpool_node *node, varpool_node_set set)
{
varpool_node_set_iterator vsi;
vsi = varpool_node_set_find (set, node);
@@ -1341,7 +1342,7 @@ cgraph_only_called_directly_or_aliased_p (struct cgraph_node *node)
if all direct calls are eliminated. */
static inline bool
-varpool_can_remove_if_no_refs (struct varpool_node *node)
+varpool_can_remove_if_no_refs (varpool_node *node)
{
if (DECL_EXTERNAL (node->decl))
return true;
@@ -1359,7 +1360,7 @@ varpool_can_remove_if_no_refs (struct varpool_node *node)
The magic uses are all summarized in force_output flag. */
static inline bool
-varpool_all_refs_explicit_p (struct varpool_node *vnode)
+varpool_all_refs_explicit_p (varpool_node *vnode)
{
return (vnode->definition
&& !vnode->externally_visible
@@ -1390,8 +1391,8 @@ cgraph_alias_target (struct cgraph_node *n)
return dyn_cast <cgraph_node> (symtab_alias_target (n));
}
-static inline struct varpool_node *
-varpool_alias_target (struct varpool_node *n)
+static inline varpool_node *
+varpool_alias_target (varpool_node *n)
{
return dyn_cast <varpool_node> (symtab_alias_target (n));
}
@@ -1416,11 +1417,11 @@ cgraph_function_or_thunk_node (struct cgraph_node *node,
Do not walk through thunks.
When AVAILABILITY is non-NULL, get minimal availability in the chain. */
-static inline struct varpool_node *
-varpool_variable_node (struct varpool_node *node,
+static inline varpool_node *
+varpool_variable_node (varpool_node *node,
enum availability *availability = NULL)
{
- struct varpool_node *n;
+ varpool_node *n;
n = dyn_cast <varpool_node> (symtab_alias_ultimate_target (node,
availability));
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index 9a63982d9cb..f1595fd0aff 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -44,7 +44,7 @@ along with GCC; see the file COPYING3. If not see
struct record_reference_ctx
{
bool only_vars;
- struct varpool_node *varpool_node;
+ class varpool_node *varpool_node;
};
/* Walk tree and record all calls and references to functions/variables.
@@ -89,7 +89,7 @@ record_reference (tree *tp, int *walk_subtrees, void *data)
if (TREE_CODE (decl) == VAR_DECL)
{
- struct varpool_node *vnode = varpool_node_for_decl (decl);
+ varpool_node *vnode = varpool_node_for_decl (decl);
ipa_record_reference (ctx->varpool_node,
vnode,
IPA_REF_ADDR, NULL);
@@ -128,7 +128,7 @@ record_type_list (struct cgraph_node *node, tree list)
type = TREE_OPERAND (type, 0);
if (TREE_CODE (type) == VAR_DECL)
{
- struct varpool_node *vnode = varpool_node_for_decl (type);
+ varpool_node *vnode = varpool_node_for_decl (type);
ipa_record_reference (node,
vnode,
IPA_REF_ADDR, NULL);
@@ -208,7 +208,7 @@ compute_call_stmt_bb_frequency (tree decl, basic_block bb)
(DECL_STRUCT_FUNCTION (decl))->frequency;
int freq = bb->frequency;
- if (profile_status_for_function (DECL_STRUCT_FUNCTION (decl)) == PROFILE_ABSENT)
+ if (profile_status_for_fn (DECL_STRUCT_FUNCTION (decl)) == PROFILE_ABSENT)
return CGRAPH_FREQ_BASE;
if (!entry_freq)
@@ -238,7 +238,7 @@ mark_address (gimple stmt, tree addr, void *data)
else if (addr && TREE_CODE (addr) == VAR_DECL
&& (TREE_STATIC (addr) || DECL_EXTERNAL (addr)))
{
- struct varpool_node *vnode = varpool_node_for_decl (addr);
+ varpool_node *vnode = varpool_node_for_decl (addr);
ipa_record_reference ((symtab_node *)data,
vnode,
@@ -267,7 +267,7 @@ mark_load (gimple stmt, tree t, void *data)
else if (t && TREE_CODE (t) == VAR_DECL
&& (TREE_STATIC (t) || DECL_EXTERNAL (t)))
{
- struct varpool_node *vnode = varpool_node_for_decl (t);
+ varpool_node *vnode = varpool_node_for_decl (t);
ipa_record_reference ((symtab_node *)data,
vnode,
@@ -285,7 +285,7 @@ mark_store (gimple stmt, tree t, void *data)
if (t && TREE_CODE (t) == VAR_DECL
&& (TREE_STATIC (t) || DECL_EXTERNAL (t)))
{
- struct varpool_node *vnode = varpool_node_for_decl (t);
+ varpool_node *vnode = varpool_node_for_decl (t);
ipa_record_reference ((symtab_node *)data,
vnode,
@@ -317,7 +317,7 @@ build_cgraph_edges (void)
/* Create the callgraph edges and record the nodes referenced by the function.
body. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -426,7 +426,7 @@ void
record_references_in_initializer (tree decl, bool only_vars)
{
struct pointer_set_t *visited_nodes = pointer_set_create ();
- struct varpool_node *node = varpool_node_for_decl (decl);
+ varpool_node *node = varpool_node_for_decl (decl);
struct record_reference_ctx ctx = {false, NULL};
ctx.varpool_node = node;
@@ -451,7 +451,7 @@ rebuild_cgraph_edges (void)
node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -505,7 +505,7 @@ cgraph_rebuild_references (void)
node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
ipa_record_stmt_references (node, gsi_stmt (gsi));
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index 2cbed673212..44f3afd6e4a 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -737,10 +737,10 @@ process_common_attributes (tree decl)
static void
process_function_and_variable_attributes (struct cgraph_node *first,
- struct varpool_node *first_var)
+ varpool_node *first_var)
{
struct cgraph_node *node;
- struct varpool_node *vnode;
+ varpool_node *vnode;
for (node = cgraph_first_function (); node != first;
node = cgraph_next_function (node))
@@ -813,7 +813,7 @@ process_function_and_variable_attributes (struct cgraph_node *first,
void
varpool_finalize_decl (tree decl)
{
- struct varpool_node *node = varpool_node_for_decl (decl);
+ varpool_node *node = varpool_node_for_decl (decl);
gcc_assert (TREE_STATIC (decl) || DECL_EXTERNAL (decl));
@@ -928,8 +928,8 @@ analyze_functions (void)
intermodule optimization. */
static struct cgraph_node *first_analyzed;
struct cgraph_node *first_handled = first_analyzed;
- static struct varpool_node *first_analyzed_var;
- struct varpool_node *first_handled_var = first_analyzed_var;
+ static varpool_node *first_analyzed_var;
+ varpool_node *first_handled_var = first_analyzed_var;
struct pointer_set_t *reachable_call_targets = pointer_set_create ();
symtab_node *node;
@@ -1891,7 +1891,7 @@ struct cgraph_order_sort
union
{
struct cgraph_node *f;
- struct varpool_node *v;
+ varpool_node *v;
struct asm_node *a;
} u;
};
@@ -1909,7 +1909,7 @@ output_in_order (void)
struct cgraph_order_sort *nodes;
int i;
struct cgraph_node *pf;
- struct varpool_node *pv;
+ varpool_node *pv;
struct asm_node *pa;
max = symtab_order;
@@ -2019,7 +2019,7 @@ ipa_passes (void)
cgraph_process_new_functions ();
execute_ipa_summary_passes
- ((struct ipa_opt_pass_d *) passes->all_regular_ipa_passes);
+ ((ipa_opt_pass_d *) passes->all_regular_ipa_passes);
}
/* Some targets need to handle LTO assembler output specially. */
diff --git a/gcc/cilk.h b/gcc/cilk.h
index 99b4d782af4..e990992cf27 100644
--- a/gcc/cilk.h
+++ b/gcc/cilk.h
@@ -90,6 +90,7 @@ extern tree cilk_dot (tree, int, bool);
extern void cilk_init_builtins (void);
extern void gimplify_cilk_sync (tree *, gimple_seq *);
extern tree cilk_call_setjmp (tree);
+
/* Returns true if Cilk Plus is enabled and if F->cilk_frame_decl is not
NULL_TREE. */
@@ -99,4 +100,5 @@ fn_contains_cilk_spawn_p (function *f)
return (flag_enable_cilkplus
&& (f->calls_cilk_spawn || f->cilk_frame_decl != NULL_TREE));
}
+
#endif
diff --git a/gcc/combine-stack-adj.c b/gcc/combine-stack-adj.c
index 5ca131f346a..5c897cf106d 100644
--- a/gcc/combine-stack-adj.c
+++ b/gcc/combine-stack-adj.c
@@ -95,7 +95,7 @@ combine_stack_adjustments (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
combine_stack_adjustments_for_block (bb);
}
diff --git a/gcc/combine.c b/gcc/combine.c
index 0f4030e53ce..f09533e6e94 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -960,7 +960,7 @@ delete_noop_moves (void)
rtx insn, next;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
{
@@ -997,7 +997,7 @@ create_log_links (void)
usage -- these are taken from original flow.c did. Don't ask me why it is
done this way; I don't know and if it works, I don't want to know. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_REVERSE (bb, insn)
{
@@ -1160,7 +1160,7 @@ combine_instructions (rtx f, unsigned int nregs)
last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
create_log_links ();
- FOR_EACH_BB (this_basic_block)
+ FOR_EACH_BB_FN (this_basic_block, cfun)
{
optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
last_call_luid = 0;
@@ -1211,7 +1211,7 @@ combine_instructions (rtx f, unsigned int nregs)
setup_incoming_promotions (first);
last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
- FOR_EACH_BB (this_basic_block)
+ FOR_EACH_BB_FN (this_basic_block, cfun)
{
rtx last_combined_insn = NULL_RTX;
optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
@@ -8022,7 +8022,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
return x;
- /* We want to perform the operation is its present mode unless we know
+ /* We want to perform the operation in its present mode unless we know
that the operation is valid in MODE, in which case we do the operation
in MODE. */
op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
@@ -8453,9 +8453,10 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
gen_int_mode (mask, GET_MODE (x)),
XEXP (x, 1));
if (temp && CONST_INT_P (temp))
- SUBST (XEXP (x, 0),
- force_to_mode (XEXP (x, 0), GET_MODE (x),
- INTVAL (temp), next_select));
+ x = simplify_gen_binary (code, GET_MODE (x),
+ force_to_mode (XEXP (x, 0), GET_MODE (x),
+ INTVAL (temp), next_select),
+ XEXP (x, 1));
}
break;
@@ -8523,14 +8524,16 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
/* We have no way of knowing if the IF_THEN_ELSE can itself be
written in a narrower mode. We play it safe and do not do so. */
- SUBST (XEXP (x, 1),
- gen_lowpart_or_truncate (GET_MODE (x),
- force_to_mode (XEXP (x, 1), mode,
- mask, next_select)));
- SUBST (XEXP (x, 2),
- gen_lowpart_or_truncate (GET_MODE (x),
- force_to_mode (XEXP (x, 2), mode,
- mask, next_select)));
+ op0 = gen_lowpart_or_truncate (GET_MODE (x),
+ force_to_mode (XEXP (x, 1), mode,
+ mask, next_select));
+ op1 = gen_lowpart_or_truncate (GET_MODE (x),
+ force_to_mode (XEXP (x, 2), mode,
+ mask, next_select));
+ if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
+ x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
+ GET_MODE (XEXP (x, 0)), XEXP (x, 0),
+ op0, op1);
break;
default:
diff --git a/gcc/common.opt b/gcc/common.opt
index 9ece6832467..0cd1fddd4ad 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -2112,11 +2112,18 @@ foptimize-strlen
Common Report Var(flag_optimize_strlen) Optimization
Enable string length optimizations on trees
-fisolate-erroneous-paths
-Common Report Var(flag_isolate_erroneous_paths) Optimization
-Detect paths which trigger erroneous or undefined behaviour. Isolate those
-paths from the main control flow and turn the statement with erroneous or
-undefined behaviour into a trap.
+fisolate-erroneous-paths-dereference
+Common Report Var(flag_isolate_erroneous_paths_dereference) Optimization
+Detect paths which trigger erroneous or undefined behaviour due to
+dereferencing a NULL pointer. Isolate those paths from the main control
+flow and turn the statement with erroneous or undefined behaviour into a trap.
+
+fisolate-erroneous-paths-attribute
+Common Report Var(flag_isolate_erroneous_paths_attribute) Optimization
+Detect paths which trigger erroneous or undefined behaviour due a NULL value
+being used in a way which is forbidden by a returns_nonnull or nonnull
+attribute. Isolate those paths from the main control flow and turn the
+statement with erroneous or undefined behaviour into a trap.
ftree-loop-distribution
Common Report Var(flag_tree_loop_distribution) Optimization
diff --git a/gcc/common/config/sh/sh-common.c b/gcc/common/config/sh/sh-common.c
index f7fb9f5641c..cd4295a1138 100644
--- a/gcc/common/config/sh/sh-common.c
+++ b/gcc/common/config/sh/sh-common.c
@@ -34,7 +34,6 @@ static const struct default_options sh_option_optimization_table[] =
{ OPT_LEVELS_1_PLUS_SPEED_ONLY, OPT_mdiv_, "inv:minlat", 1 },
{ OPT_LEVELS_SIZE, OPT_mdiv_, SH_DIV_STR_FOR_SIZE, 1 },
{ OPT_LEVELS_0_ONLY, OPT_mdiv_, "", 1 },
- { OPT_LEVELS_SIZE, OPT_mcbranchdi, NULL, 0 },
/* We can't meaningfully test TARGET_SHMEDIA here, because -m
options haven't been parsed yet, hence we'd read only the
default. sh_target_reg_class will return NO_REGS if this is
diff --git a/gcc/conditions.h b/gcc/conditions.h
index f6acc4f05f8..0798bc62a5e 100644
--- a/gcc/conditions.h
+++ b/gcc/conditions.h
@@ -51,7 +51,7 @@ along with GCC; see the file COPYING3. If not see
#define CC_STATUS_MDEP_INIT 0
#endif
-typedef struct {int flags; rtx value1, value2; CC_STATUS_MDEP mdep;} CC_STATUS;
+struct CC_STATUS {int flags; rtx value1, value2; CC_STATUS_MDEP mdep;};
/* While outputting an insn as assembler code,
this is the status BEFORE that insn. */
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 1f20f18aa2c..8464d8fdfee 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -676,11 +676,15 @@ case ${target} in
native_system_header_dir=/include
;;
esac
- # glibc / uclibc / bionic switch.
+ # Linux C libraries selection switch: glibc / uclibc / bionic.
# uclibc and bionic aren't usable for GNU/Hurd and neither for GNU/k*BSD.
case $target in
*linux*)
- extra_options="$extra_options linux.opt";;
+ tm_p_file="${tm_p_file} linux-protos.h"
+ tmake_file="${tmake_file} t-linux"
+ extra_objs="${extra_objs} linux.o"
+ extra_options="${extra_options} linux.opt"
+ ;;
esac
case $target in
*-*-*android*)
@@ -698,16 +702,6 @@ case ${target} in
default_use_cxa_atexit=yes
use_gcc_tgmath=no
use_gcc_stdint=wrap
- # Add Android userspace support to Linux targets.
- case $target in
- *linux*)
- tm_p_file="${tm_p_file} linux-protos.h"
- tmake_file="${tmake_file} t-linux-android"
- tm_file="$tm_file linux-android.h"
- extra_options="$extra_options linux-android.opt"
- extra_objs="$extra_objs linux-android.o"
- ;;
- esac
# Enable compilation for Android by default for *android* targets.
case $target in
*-*-*android*)
@@ -993,6 +987,7 @@ arm*-*-netbsdelf*)
;;
arm*-*-linux-*) # ARM GNU/Linux with ELF
tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h arm/elf.h arm/linux-gas.h arm/linux-elf.h"
+ extra_options="${extra_options} linux-android.opt"
case $target in
arm*b-*-linux*)
tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1"
@@ -1000,7 +995,6 @@ arm*-*-linux-*) # ARM GNU/Linux with ELF
esac
tmake_file="${tmake_file} arm/t-arm arm/t-arm-elf arm/t-bpabi arm/t-linux-eabi"
tm_file="$tm_file arm/bpabi.h arm/linux-eabi.h arm/aout.h arm/arm.h"
- extra_objs="$extra_objs linux-android.o"
# Define multilib configuration for arm-linux-androideabi.
case ${target} in
*-androideabi)
@@ -1074,7 +1068,7 @@ bfin*-uclinux*)
;;
bfin*-linux-uclibc*)
tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h gnu-user.h linux.h glibc-stdint.h bfin/linux.h ./linux-sysroot-suffix.h"
- tmake_file="bfin/t-bfin-linux t-slibgcc t-linux-android"
+ tmake_file="bfin/t-bfin-linux t-slibgcc t-linux"
use_collect2=no
;;
bfin*-rtems*)
@@ -1109,7 +1103,7 @@ cris-*-elf | cris-*-none)
crisv32-*-linux* | cris-*-linux*)
tm_file="dbxelf.h elfos.h ${tm_file} gnu-user.h linux.h glibc-stdint.h cris/linux.h"
# We need to avoid using t-linux, so override default tmake_file
- tmake_file="cris/t-cris cris/t-linux t-slibgcc t-linux-android"
+ tmake_file="cris/t-cris cris/t-linux t-slibgcc t-linux"
extra_options="${extra_options} cris/linux.opt"
case $target in
cris-*-*)
@@ -1373,7 +1367,8 @@ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i
tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h"
case ${target} in
i[34567]86-*-linux*)
- tm_file="${tm_file} linux.h"
+ tm_file="${tm_file} linux.h linux-android.h"
+ extra_options="${extra_options} linux-android.opt"
# Assume modern glibc
default_gnu_indirect_function=yes
if test x$enable_targets = xall; then
@@ -1398,7 +1393,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i
TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'`
need_64bit_isa=yes
case X"${with_cpu}" in
- Xgeneric|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver4|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
+ Xgeneric|Xintel|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver4|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
;;
X)
if test x$with_cpu_64 = x; then
@@ -1407,7 +1402,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i
;;
*)
echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
- echo "generic atom slm core2 corei7 corei7-avx nocona x86-64 bdver4 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
+ echo "generic intel atom slm core2 corei7 corei7-avx nocona x86-64 bdver4 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
exit 1
;;
esac
@@ -1434,7 +1429,8 @@ x86_64-*-linux* | x86_64-*-kfreebsd*-gnu | x86_64-*-knetbsd*-gnu)
i386/x86-64.h i386/gnu-user-common.h i386/gnu-user64.h"
case ${target} in
x86_64-*-linux*)
- tm_file="${tm_file} linux.h i386/linux-common.h i386/linux64.h"
+ tm_file="${tm_file} linux.h linux-android.h i386/linux-common.h i386/linux64.h"
+ extra_options="${extra_options} linux-android.opt"
# Assume modern glibc
default_gnu_indirect_function=yes
;;
@@ -1519,7 +1515,7 @@ i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*)
tmake_file="$tmake_file i386/t-sol2-64"
need_64bit_isa=yes
case X"${with_cpu}" in
- Xgeneric|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver4|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
+ Xgeneric|Xintel|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver4|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
;;
X)
if test x$with_cpu_64 = x; then
@@ -1528,7 +1524,7 @@ i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*)
;;
*)
echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
- echo "generic atom slm core2 corei7 corei7-avx nocona x86-64 bdver4 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
+ echo "generic intel atom slm core2 corei7 corei7-avx nocona x86-64 bdver4 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
exit 1
;;
esac
@@ -1604,7 +1600,7 @@ i[34567]86-*-mingw* | x86_64-*-mingw*)
if test x$enable_targets = xall; then
tm_defines="${tm_defines} TARGET_BI_ARCH=1"
case X"${with_cpu}" in
- Xgeneric|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver4|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
+ Xgeneric|Xintel|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver4|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
;;
X)
if test x$with_cpu_64 = x; then
@@ -1613,7 +1609,7 @@ i[34567]86-*-mingw* | x86_64-*-mingw*)
;;
*)
echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
- echo "generic atom slm core2 corei7 Xcorei7-avx nocona x86-64 bdver4 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
+ echo "generic intel atom slm core2 corei7 Xcorei7-avx nocona x86-64 bdver4 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
exit 1
;;
esac
@@ -1887,6 +1883,14 @@ microblaze*-linux*)
tmake_file="${tmake_file} microblaze/t-microblaze-linux"
;;
microblaze*-*-rtems*)
+ case $target in
+ microblazeel-*)
+ tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=0"
+ ;;
+ microblaze-*)
+ tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=4321"
+ ;;
+ esac
tm_file="${tm_file} dbxelf.h"
tm_file="${tm_file} microblaze/rtems.h rtems.h newlib-stdint.h"
c_target_objs="${c_target_objs} microblaze-c.o"
@@ -1914,14 +1918,16 @@ mips*-*-netbsd*) # NetBSD/mips, either endian.
extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
;;
mips*-mti-linux*)
- tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h mips/mti-linux.h"
+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h mips/mti-linux.h"
+ extra_options="${extra_options} linux-android.opt"
tmake_file="${tmake_file} mips/t-mti-linux"
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33 MIPS_ABI_DEFAULT=ABI_32"
gnu_ld=yes
gas=yes
;;
mips64*-*-linux* | mipsisa64*-*-linux*)
- tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h"
+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h"
+ extra_options="${extra_options} linux-android.opt"
tmake_file="${tmake_file} mips/t-linux64"
tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_N32"
case ${target} in
@@ -1941,7 +1947,8 @@ mips64*-*-linux* | mipsisa64*-*-linux*)
gas=yes
;;
mips*-*-linux*) # Linux MIPS, either endian.
- tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h"
+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h"
+ extra_options="${extra_options} linux-android.opt"
if test x$enable_targets = xall; then
tm_file="${tm_file} mips/gnu-user64.h mips/linux64.h"
tmake_file="${tmake_file} mips/t-linux64"
@@ -2382,7 +2389,7 @@ s390-*-linux*)
s390x-*-linux*)
default_gnu_indirect_function=yes
tm_file="s390/s390x.h s390/s390.h dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h s390/linux.h"
- tm_p_file=s390/s390-protos.h
+ tm_p_file="linux-protos.h s390/s390-protos.h"
md_file=s390/s390.md
extra_modes=s390/s390-modes.def
out_file=s390/s390.c
@@ -3664,7 +3671,7 @@ case "${target}" in
esac
# OK
;;
- "" | x86-64 | generic | native \
+ "" | x86-64 | generic | intel | native \
| k8 | k8-sse3 | athlon64 | athlon64-sse3 | opteron \
| opteron-sse3 | athlon-fx | bdver4 | bdver3 | bdver2 \
| bdver1 | btver2 | btver1 | amdfam10 | barcelona \
diff --git a/gcc/config.in b/gcc/config.in
index 7bb5be8fdf1..3aefa06708e 100644
--- a/gcc/config.in
+++ b/gcc/config.in
@@ -411,6 +411,12 @@
#endif
+/* Define if your assembler supports the -mabi option. */
+#ifndef USED_FOR_TARGET
+#undef HAVE_AS_MABI_OPTION
+#endif
+
+
/* Define if your assembler supports mfcr field. */
#ifndef USED_FOR_TARGET
#undef HAVE_AS_MFCRF
diff --git a/gcc/config/aarch64/aarch64-elf.h b/gcc/config/aarch64/aarch64-elf.h
index 4757d22473b..a66c3dbe9a1 100644
--- a/gcc/config/aarch64/aarch64-elf.h
+++ b/gcc/config/aarch64/aarch64-elf.h
@@ -134,13 +134,19 @@
" %{!mbig-endian:%{!mlittle-endian:" ENDIAN_SPEC "}}" \
" %{!mabi=*:" ABI_SPEC "}"
+#ifdef HAVE_AS_MABI_OPTION
+#define ASM_MABI_SPEC "%{mabi=*:-mabi=%*}"
+#else
+#define ASM_MABI_SPEC "%{mabi=lp64:}"
+#endif
+
#ifndef ASM_SPEC
#define ASM_SPEC "\
%{mbig-endian:-EB} \
%{mlittle-endian:-EL} \
%{mcpu=*:-mcpu=%*} \
-%{march=*:-march=%*} \
-%{mabi=*:-mabi=%*}"
+%{march=*:-march=%*}" \
+ASM_MABI_SPEC
#endif
#undef TYPE_OPERAND_FMT
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 28080fdca3d..1df931d08b6 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -5187,6 +5187,13 @@ aarch64_override_options (void)
aarch64_parse_tune ();
}
+#ifndef HAVE_AS_MABI_OPTION
+ /* The compiler may have been configured with 2.23.* binutils, which does
+ not have support for ILP32. */
+ if (TARGET_ILP32)
+ error ("Assembler does not support -mabi=ilp32");
+#endif
+
initialize_aarch64_code_model ();
aarch64_build_bitmask_table ();
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 8b3dbd7550e..c83622d6cad 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -290,6 +290,12 @@
[(set_attr "type" "no_insn")]
)
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 8))]
+ ""
+ "brk #1000"
+ [(set_attr "type" "trap")])
+
(define_expand "prologue"
[(clobber (const_int 0))]
""
diff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64
index 9f8d8cd6e0d..98a30d86acd 100644
--- a/gcc/config/aarch64/t-aarch64
+++ b/gcc/config/aarch64/t-aarch64
@@ -41,5 +41,5 @@ aarch-common.o: $(srcdir)/config/arm/aarch-common.c $(CONFIG_H) $(SYSTEM_H) \
$(srcdir)/config/arm/aarch-common.c
comma=,
-MULTILIB_OPTIONS = $(patsubst %, mabi=%, $(subst $(comma), ,$(TM_MULTILIB_CONFIG)))
+MULTILIB_OPTIONS = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG))))
MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG))
diff --git a/gcc/config/alpha/linux.h b/gcc/config/alpha/linux.h
index da5842fda85..f1e058d8f39 100644
--- a/gcc/config/alpha/linux.h
+++ b/gcc/config/alpha/linux.h
@@ -70,7 +70,7 @@ along with GCC; see the file COPYING3. If not see
/* Determine what functions are present at the runtime;
this includes full c99 runtime and sincos. */
#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION linux_android_libc_has_function
+#define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function
#define TARGET_POSIX_IO
diff --git a/gcc/config/arc/arc.h b/gcc/config/arc/arc.h
index cc49c553c55..88102f0a532 100644
--- a/gcc/config/arc/arc.h
+++ b/gcc/config/arc/arc.h
@@ -303,9 +303,6 @@ along with GCC; see the file COPYING3. If not see
numbered. */
#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN)
-/* Number of bits in an addressable storage unit. */
-#define BITS_PER_UNIT 8
-
/* Width in bits of a "word", which is the contents of a machine register.
Note that this is not necessarily the width of data type `int';
if using 16-bit ints on a 68000, this would still be 32.
diff --git a/gcc/config/arm/arm-cores.def b/gcc/config/arm/arm-cores.def
index 119dc336fe1..e7cea63beae 100644
--- a/gcc/config/arm/arm-cores.def
+++ b/gcc/config/arm/arm-cores.def
@@ -128,6 +128,7 @@ ARM_CORE("cortex-a5", cortexa5, 7A, FL_LDSCHED, cortex_a5)
ARM_CORE("cortex-a7", cortexa7, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a7)
ARM_CORE("cortex-a8", cortexa8, 7A, FL_LDSCHED, cortex)
ARM_CORE("cortex-a9", cortexa9, 7A, FL_LDSCHED, cortex_a9)
+ARM_CORE("cortex-a12", cortexa12, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a12)
ARM_CORE("cortex-a15", cortexa15, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a15)
ARM_CORE("cortex-a53", cortexa53, 8A, FL_LDSCHED, cortex_a53)
ARM_CORE("cortex-r4", cortexr4, 7R, FL_LDSCHED, cortex)
diff --git a/gcc/config/arm/arm-tables.opt b/gcc/config/arm/arm-tables.opt
index bf206959569..b3e7a7c62d7 100644
--- a/gcc/config/arm/arm-tables.opt
+++ b/gcc/config/arm/arm-tables.opt
@@ -247,6 +247,9 @@ EnumValue
Enum(processor_type) String(cortex-a9) Value(cortexa9)
EnumValue
+Enum(processor_type) String(cortex-a12) Value(cortexa12)
+
+EnumValue
Enum(processor_type) String(cortex-a15) Value(cortexa15)
EnumValue
diff --git a/gcc/config/arm/arm-tune.md b/gcc/config/arm/arm-tune.md
index e4da0988b22..e10d0aa9544 100644
--- a/gcc/config/arm/arm-tune.md
+++ b/gcc/config/arm/arm-tune.md
@@ -1,5 +1,5 @@
;; -*- buffer-read-only: t -*-
;; Generated automatically by gentune.sh from arm-cores.def
(define_attr "tune"
- "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,fa526,fa626,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,fa606te,fa626te,fmp626,fa726te,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,arm1156t2fs,genericv7a,cortexa5,cortexa7,cortexa8,cortexa9,cortexa15,cortexa53,cortexr4,cortexr4f,cortexr5,cortexr7,cortexm4,cortexm3,cortexm1,cortexm0,cortexm0plus,marvell_pj4"
+ "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,fa526,fa626,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,fa606te,fa626te,fmp626,fa726te,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,arm1156t2fs,genericv7a,cortexa5,cortexa7,cortexa8,cortexa9,cortexa12,cortexa15,cortexa53,cortexr4,cortexr4f,cortexr5,cortexr7,cortexm4,cortexm3,cortexm1,cortexm0,cortexm0plus,marvell_pj4"
(const (symbol_ref "((enum attr_tune) arm_tune)")))
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 7251ebdab3c..ee0de684956 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -1164,6 +1164,106 @@ const struct cpu_cost_table cortexa7_extra_costs =
}
};
+const struct cpu_cost_table cortexa12_extra_costs =
+{
+ /* ALU */
+ {
+ 0, /* Arith. */
+ 0, /* Logical. */
+ 0, /* Shift. */
+ COSTS_N_INSNS (1), /* Shift_reg. */
+ COSTS_N_INSNS (1), /* Arith_shift. */
+ COSTS_N_INSNS (1), /* Arith_shift_reg. */
+ COSTS_N_INSNS (1), /* Log_shift. */
+ COSTS_N_INSNS (1), /* Log_shift_reg. */
+ 0, /* Extend. */
+ COSTS_N_INSNS (1), /* Extend_arith. */
+ 0, /* Bfi. */
+ COSTS_N_INSNS (1), /* Bfx. */
+ COSTS_N_INSNS (1), /* Clz. */
+ 0, /* non_exec. */
+ true /* non_exec_costs_exec. */
+ },
+ /* MULT SImode */
+ {
+ {
+ COSTS_N_INSNS (2), /* Simple. */
+ COSTS_N_INSNS (3), /* Flag_setting. */
+ COSTS_N_INSNS (2), /* Extend. */
+ COSTS_N_INSNS (3), /* Add. */
+ COSTS_N_INSNS (2), /* Extend_add. */
+ COSTS_N_INSNS (18) /* Idiv. */
+ },
+ /* MULT DImode */
+ {
+ 0, /* Simple (N/A). */
+ 0, /* Flag_setting (N/A). */
+ COSTS_N_INSNS (3), /* Extend. */
+ 0, /* Add (N/A). */
+ COSTS_N_INSNS (3), /* Extend_add. */
+ 0 /* Idiv (N/A). */
+ }
+ },
+ /* LD/ST */
+ {
+ COSTS_N_INSNS (3), /* Load. */
+ COSTS_N_INSNS (3), /* Load_sign_extend. */
+ COSTS_N_INSNS (3), /* Ldrd. */
+ COSTS_N_INSNS (3), /* Ldm_1st. */
+ 1, /* Ldm_regs_per_insn_1st. */
+ 2, /* Ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (3), /* Loadf. */
+ COSTS_N_INSNS (3), /* Loadd. */
+ 0, /* Load_unaligned. */
+ 0, /* Store. */
+ 0, /* Strd. */
+ 0, /* Stm_1st. */
+ 1, /* Stm_regs_per_insn_1st. */
+ 2, /* Stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* Storef. */
+ COSTS_N_INSNS (2), /* Stored. */
+ 0 /* Store_unaligned. */
+ },
+ {
+ /* FP SFmode */
+ {
+ COSTS_N_INSNS (17), /* Div. */
+ COSTS_N_INSNS (4), /* Mult. */
+ COSTS_N_INSNS (8), /* Mult_addsub. */
+ COSTS_N_INSNS (8), /* Fma. */
+ COSTS_N_INSNS (4), /* Addsub. */
+ COSTS_N_INSNS (2), /* Fpconst. */
+ COSTS_N_INSNS (2), /* Neg. */
+ COSTS_N_INSNS (2), /* Compare. */
+ COSTS_N_INSNS (4), /* Widen. */
+ COSTS_N_INSNS (4), /* Narrow. */
+ COSTS_N_INSNS (4), /* Toint. */
+ COSTS_N_INSNS (4), /* Fromint. */
+ COSTS_N_INSNS (4) /* Roundint. */
+ },
+ /* FP DFmode */
+ {
+ COSTS_N_INSNS (31), /* Div. */
+ COSTS_N_INSNS (4), /* Mult. */
+ COSTS_N_INSNS (8), /* Mult_addsub. */
+ COSTS_N_INSNS (8), /* Fma. */
+ COSTS_N_INSNS (4), /* Addsub. */
+ COSTS_N_INSNS (2), /* Fpconst. */
+ COSTS_N_INSNS (2), /* Neg. */
+ COSTS_N_INSNS (2), /* Compare. */
+ COSTS_N_INSNS (4), /* Widen. */
+ COSTS_N_INSNS (4), /* Narrow. */
+ COSTS_N_INSNS (4), /* Toint. */
+ COSTS_N_INSNS (4), /* Fromint. */
+ COSTS_N_INSNS (4) /* Roundint. */
+ }
+ },
+ /* Vector */
+ {
+ COSTS_N_INSNS (1) /* Alu. */
+ }
+};
+
const struct cpu_cost_table cortexa15_extra_costs =
{
/* ALU */
@@ -1563,6 +1663,22 @@ const struct tune_params arm_cortex_a9_tune =
false /* Prefer Neon for 64-bits bitops. */
};
+const struct tune_params arm_cortex_a12_tune =
+{
+ arm_9e_rtx_costs,
+ &cortexa12_extra_costs,
+ NULL,
+ 1, /* Constant limit. */
+ 5, /* Max cond insns. */
+ ARM_PREFETCH_BENEFICIAL(4,32,32),
+ false, /* Prefer constant pool. */
+ arm_default_branch_cost,
+ true, /* Prefer LDRD/STRD. */
+ {true, true}, /* Prefer non short circuit. */
+ &arm_default_vec_cost, /* Vectorizer costs. */
+ false /* Prefer Neon for 64-bits bitops. */
+};
+
/* armv7m tuning. On Cortex-M4 cores for example, MOVW/MOVT take a single
cycle to execute each. An LDR from the constant pool also takes two cycles
to execute, but mildly increases pipelining opportunity (consecutive
@@ -15169,28 +15285,37 @@ operands_ok_ldrd_strd (rtx rt, rtx rt2, rtx rn, HOST_WIDE_INT offset,
}
/* Helper for gen_operands_ldrd_strd. Returns true iff the memory
- operand ADDR is an immediate offset from the base register and is
- not volatile, in which case it sets BASE and OFFSET
- accordingly. */
-bool
-mem_ok_for_ldrd_strd (rtx addr, rtx *base, rtx *offset)
+ operand MEM's address contains an immediate offset from the base
+ register and has no side effects, in which case it sets BASE and
+ OFFSET accordingly. */
+static bool
+mem_ok_for_ldrd_strd (rtx mem, rtx *base, rtx *offset)
{
+ rtx addr;
+
+ gcc_assert (base != NULL && offset != NULL);
+
/* TODO: Handle more general memory operand patterns, such as
PRE_DEC and PRE_INC. */
- /* Convert a subreg of mem into mem itself. */
- if (GET_CODE (addr) == SUBREG)
- addr = alter_subreg (&addr, true);
-
- gcc_assert (MEM_P (addr));
+ if (side_effects_p (mem))
+ return false;
- /* Don't modify volatile memory accesses. */
- if (MEM_VOLATILE_P (addr))
+ /* Can't deal with subregs. */
+ if (GET_CODE (mem) == SUBREG)
return false;
+ gcc_assert (MEM_P (mem));
+
*offset = const0_rtx;
- addr = XEXP (addr, 0);
+ addr = XEXP (mem, 0);
+
+ /* If addr isn't valid for DImode, then we can't handle it. */
+ if (!arm_legitimate_address_p (DImode, addr,
+ reload_in_progress || reload_completed))
+ return false;
+
if (REG_P (addr))
{
*base = addr;
@@ -16548,7 +16673,7 @@ thumb1_reorg (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx dest, src;
rtx pat, op0, set = NULL;
@@ -16626,7 +16751,7 @@ thumb2_reorg (void)
compute_bb_for_insn ();
df_analyze ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 841c624d485..46fc4422d5c 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -469,7 +469,7 @@
(define_attr "generic_sched" "yes,no"
(const (if_then_else
- (ior (eq_attr "tune" "fa526,fa626,fa606te,fa626te,fmp626,fa726te,arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa7,cortexa8,cortexa9,cortexa15,cortexa53,cortexm4,marvell_pj4")
+ (ior (eq_attr "tune" "fa526,fa626,fa606te,fa626te,fmp626,fa726te,arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa7,cortexa8,cortexa9,cortexa12,cortexa15,cortexa53,cortexm4,marvell_pj4")
(eq_attr "tune_cortexr4" "yes"))
(const_string "no")
(const_string "yes"))))
@@ -477,7 +477,7 @@
(define_attr "generic_vfp" "yes,no"
(const (if_then_else
(and (eq_attr "fpu" "vfp")
- (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa7,cortexa8,cortexa9,cortexa53,cortexm4,marvell_pj4")
+ (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa7,cortexa8,cortexa9,cortexa12,cortexa53,cortexm4,marvell_pj4")
(eq_attr "tune_cortexr4" "no"))
(const_string "yes")
(const_string "no"))))
@@ -9927,6 +9927,23 @@
(set_attr "type" "mov_reg")]
)
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+ "*
+ if (TARGET_ARM)
+ return \".inst\\t0xe7f000f0\";
+ else
+ return \".inst\\t0xdeff\";
+ "
+ [(set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 2)
+ (const_int 4)))
+ (set_attr "type" "trap")
+ (set_attr "conds" "unconditional")]
+)
+
;; Patterns to allow combination of arithmetic, cond code and shifts
diff --git a/gcc/config/arm/arm.opt b/gcc/config/arm/arm.opt
index 24e5b062c0b..5fbac7becf2 100644
--- a/gcc/config/arm/arm.opt
+++ b/gcc/config/arm/arm.opt
@@ -144,7 +144,7 @@ Target RejectNegative Joined Enum(arm_fpu) Var(arm_fpu_index)
Specify the name of the target floating point hardware/format
mlra
-Target Report Var(arm_lra_flag) Init(0) Save
+Target Report Var(arm_lra_flag) Init(1) Save
Use LRA instead of reload (transitional)
mhard-float
diff --git a/gcc/config/arm/bpabi.h b/gcc/config/arm/bpabi.h
index ff89633d788..b39c4a91a9d 100644
--- a/gcc/config/arm/bpabi.h
+++ b/gcc/config/arm/bpabi.h
@@ -59,6 +59,7 @@
" %{!mlittle-endian:%{march=armv7-a|mcpu=cortex-a5 \
|mcpu=cortex-a7 \
|mcpu=cortex-a8|mcpu=cortex-a9|mcpu=cortex-a15 \
+ |mcpu=cortex-a12 \
|mcpu=marvell-pj4 \
|mcpu=cortex-a53 \
|mcpu=generic-armv7-a \
@@ -72,6 +73,7 @@
" %{mbig-endian:%{march=armv7-a|mcpu=cortex-a5 \
|mcpu=cortex-a7 \
|mcpu=cortex-a8|mcpu=cortex-a9|mcpu=cortex-a15 \
+ |mcpu=cortex-a12 \
|mcpu=cortex-a53 \
|mcpu=marvell-pj4 \
|mcpu=generic-armv7-a \
diff --git a/gcc/config/arm/types.md b/gcc/config/arm/types.md
index 1c4b9e33c7e..6351f080b32 100644
--- a/gcc/config/arm/types.md
+++ b/gcc/config/arm/types.md
@@ -152,6 +152,7 @@
; store2 store 2 words to memory from arm registers.
; store3 store 3 words to memory from arm registers.
; store4 store 4 (or more) words to memory from arm registers.
+; trap cause a trap in the kernel.
; udiv unsigned division.
; umaal unsigned multiply accumulate accumulate long.
; umlal unsigned multiply accumulate long.
@@ -645,6 +646,7 @@
store2,\
store3,\
store4,\
+ trap,\
udiv,\
umaal,\
umlal,\
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 60da450976a..dc8d5b36f1c 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -3956,7 +3956,7 @@ static void
bfin_gen_bundles (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx slot[3];
@@ -4035,7 +4035,7 @@ static void
reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx queue = NULL_RTX;
diff --git a/gcc/config/bfin/bfin.h b/gcc/config/bfin/bfin.h
index cc175062ebc..85ab0b0f5b8 100644
--- a/gcc/config/bfin/bfin.h
+++ b/gcc/config/bfin/bfin.h
@@ -859,9 +859,6 @@ typedef struct {
/* Define this if most significant word of a multiword number is numbered. */
#define WORDS_BIG_ENDIAN 0
-/* number of bits in an addressable storage unit */
-#define BITS_PER_UNIT 8
-
/* Width in bits of a "word", which is the contents of a machine register.
Note that this is not necessarily the width of data type `int';
if using 16-bit ints on a 68000, this would still be 32.
diff --git a/gcc/config/bfin/uclinux.h b/gcc/config/bfin/uclinux.h
index 848515c1bbc..10a52f353e7 100644
--- a/gcc/config/bfin/uclinux.h
+++ b/gcc/config/bfin/uclinux.h
@@ -36,6 +36,3 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define TARGET_SUPPORTS_SYNC_CALLS 1
#define SUBTARGET_FDPIC_NOT_SUPPORTED
-
-#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
diff --git a/gcc/config/c6x/c6x.c b/gcc/config/c6x/c6x.c
index af310bac8dc..6f80bc8aa4b 100644
--- a/gcc/config/c6x/c6x.c
+++ b/gcc/config/c6x/c6x.c
@@ -4629,7 +4629,7 @@ c6x_gen_bundles (void)
basic_block bb;
rtx insn, next, last_call;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
/* The machine is eight insns wide. We can have up to six shadow
@@ -5383,7 +5383,7 @@ conditionalize_after_sched (void)
{
basic_block bb;
rtx insn;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
unsigned uid = INSN_UID (insn);
@@ -5959,7 +5959,7 @@ c6x_reorg (void)
if (c6x_flag_schedule_insns2)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if ((bb->flags & BB_DISABLE_SCHEDULE) == 0)
assign_reservations (BB_HEAD (bb), BB_END (bb));
}
diff --git a/gcc/config/c6x/uclinux-elf.h b/gcc/config/c6x/uclinux-elf.h
index fa0937ed268..928c2b9ec06 100644
--- a/gcc/config/c6x/uclinux-elf.h
+++ b/gcc/config/c6x/uclinux-elf.h
@@ -61,6 +61,3 @@
: "=a" (_beg) \
: "0" (_beg), "b" (_end), "b" (_scno)); \
}
-
-#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
diff --git a/gcc/config/epiphany/epiphany.md b/gcc/config/epiphany/epiphany.md
index e8756ad8e23..fb7d6301adf 100644
--- a/gcc/config/epiphany/epiphany.md
+++ b/gcc/config/epiphany/epiphany.md
@@ -1787,14 +1787,14 @@
(define_peephole2
[(parallel
- [(set (match_operand:SI 0 "gpr_operand" "=r")
- (logical_op:SI (match_operand:SI 1 "gpr_operand" "r")
- (match_operand:SI 2 "gpr_operand" "%r")))
+ [(set (match_operand:SI 0 "gpr_operand")
+ (logical_op:SI (match_operand:SI 1 "gpr_operand")
+ (match_operand:SI 2 "gpr_operand")))
(clobber (reg:CC CC_REGNUM))])
(parallel
[(set (reg:CC CC_REGNUM)
(compare:CC (and:SI (match_dup 0) (match_dup 0)) (const_int 0)))
- (set (match_operand:SI 3 "gpr_operand" "=r")
+ (set (match_operand:SI 3 "gpr_operand")
(and:SI (match_dup 0) (match_dup 0)))])]
"peep2_reg_dead_p (2, operands[0])"
[(parallel
@@ -1805,14 +1805,14 @@
(define_peephole2
[(parallel
- [(set (match_operand:SI 0 "gpr_operand" "=r")
- (logical_op:SI (match_operand:SI 1 "gpr_operand" "r")
- (match_operand:SI 2 "gpr_operand" "%r")))
+ [(set (match_operand:SI 0 "gpr_operand")
+ (logical_op:SI (match_operand:SI 1 "gpr_operand")
+ (match_operand:SI 2 "gpr_operand")))
(clobber (reg:CC CC_REGNUM))])
(parallel
[(set (reg:CC CC_REGNUM)
(compare:CC (and:SI (match_dup 0) (match_dup 0)) (const_int 0)))
- (set (match_operand:SI 3 "gpr_operand" "=r")
+ (set (match_operand:SI 3 "gpr_operand")
(and:SI (match_dup 0) (match_dup 0)))])]
"peep2_reg_dead_p (2, operands[3])"
[(parallel
@@ -1823,14 +1823,14 @@
(define_peephole2
[(parallel
- [(set (match_operand:SI 0 "gpr_operand" "=r")
- (logical_op:SI (match_operand:SI 1 "gpr_operand" "r")
- (match_operand:SI 2 "gpr_operand" "%r")))
+ [(set (match_operand:SI 0 "gpr_operand")
+ (logical_op:SI (match_operand:SI 1 "gpr_operand")
+ (match_operand:SI 2 "gpr_operand")))
(clobber (reg:CC CC_REGNUM))])
(parallel
[(set (reg:CC CC_REGNUM)
(compare:CC (match_dup 0) (const_int 0)))
- (clobber (match_operand:SI 3 "gpr_operand" "=r"))])]
+ (clobber (match_operand:SI 3 "gpr_operand"))])]
""
[(parallel
[(set (reg:CC CC_REGNUM)
diff --git a/gcc/config/epiphany/resolve-sw-modes.c b/gcc/config/epiphany/resolve-sw-modes.c
index b43b4d953cd..30f6920aba0 100644
--- a/gcc/config/epiphany/resolve-sw-modes.c
+++ b/gcc/config/epiphany/resolve-sw-modes.c
@@ -61,15 +61,15 @@ resolve_sw_modes (void)
bool need_commit = false;
bool finalize_fp_sets = (MACHINE_FUNCTION (cfun)->unknown_mode_sets == 0);
- todo.create (last_basic_block);
- pushed = sbitmap_alloc (last_basic_block);
+ todo.create (last_basic_block_for_fn (cfun));
+ pushed = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (pushed);
if (!finalize_fp_sets)
{
df_note_add_problem ();
df_analyze ();
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
enum attr_fp_mode selected_mode;
diff --git a/gcc/config/frv/frv-protos.h b/gcc/config/frv/frv-protos.h
index e589211c90f..03033e91528 100644
--- a/gcc/config/frv/frv-protos.h
+++ b/gcc/config/frv/frv-protos.h
@@ -62,14 +62,14 @@ extern rtx frv_split_abs (rtx *);
extern void frv_split_double_load (rtx, rtx);
extern void frv_split_double_store (rtx, rtx);
#ifdef BB_HEAD
-extern void frv_ifcvt_init_extra_fields (ce_if_block_t *);
-extern void frv_ifcvt_modify_tests (ce_if_block_t *, rtx *, rtx *);
+extern void frv_ifcvt_init_extra_fields (ce_if_block *);
+extern void frv_ifcvt_modify_tests (ce_if_block *, rtx *, rtx *);
extern void frv_ifcvt_modify_multiple_tests
- (ce_if_block_t *, basic_block,
+ (ce_if_block *, basic_block,
rtx *, rtx *);
-extern rtx frv_ifcvt_modify_insn (ce_if_block_t *, rtx, rtx);
-extern void frv_ifcvt_modify_final (ce_if_block_t *);
-extern void frv_ifcvt_modify_cancel (ce_if_block_t *);
+extern rtx frv_ifcvt_modify_insn (ce_if_block *, rtx, rtx);
+extern void frv_ifcvt_modify_final (ce_if_block *);
+extern void frv_ifcvt_modify_cancel (ce_if_block *);
#endif
extern enum reg_class frv_secondary_reload_class
(enum reg_class,
diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c
index a5eb2c1c844..8d659fe05c2 100644
--- a/gcc/config/frv/frv.c
+++ b/gcc/config/frv/frv.c
@@ -5272,7 +5272,7 @@ frv_ifcvt_add_insn (rtx pattern, rtx insn, int before_p)
tests cannot be converted. */
void
-frv_ifcvt_modify_tests (ce_if_block_t *ce_info, rtx *p_true, rtx *p_false)
+frv_ifcvt_modify_tests (ce_if_block *ce_info, rtx *p_true, rtx *p_false)
{
basic_block test_bb = ce_info->test_bb; /* test basic block */
basic_block then_bb = ce_info->then_bb; /* THEN */
@@ -5629,7 +5629,7 @@ frv_ifcvt_modify_tests (ce_if_block_t *ce_info, rtx *p_true, rtx *p_false)
(const_int 0))) */
void
-frv_ifcvt_modify_multiple_tests (ce_if_block_t *ce_info,
+frv_ifcvt_modify_multiple_tests (ce_if_block *ce_info,
basic_block bb,
rtx *p_true,
rtx *p_false)
@@ -5923,7 +5923,7 @@ single_set_pattern (rtx pattern)
insn cannot be converted to be executed conditionally. */
rtx
-frv_ifcvt_modify_insn (ce_if_block_t *ce_info,
+frv_ifcvt_modify_insn (ce_if_block *ce_info,
rtx pattern,
rtx insn)
{
@@ -6188,7 +6188,7 @@ frv_ifcvt_modify_insn (ce_if_block_t *ce_info,
conditional if information CE_INFO. */
void
-frv_ifcvt_modify_final (ce_if_block_t *ce_info ATTRIBUTE_UNUSED)
+frv_ifcvt_modify_final (ce_if_block *ce_info ATTRIBUTE_UNUSED)
{
rtx existing_insn;
rtx check_insn;
@@ -6243,7 +6243,7 @@ frv_ifcvt_modify_final (ce_if_block_t *ce_info ATTRIBUTE_UNUSED)
information CE_INFO. */
void
-frv_ifcvt_modify_cancel (ce_if_block_t *ce_info ATTRIBUTE_UNUSED)
+frv_ifcvt_modify_cancel (ce_if_block *ce_info ATTRIBUTE_UNUSED)
{
int i;
rtx p = frv_ifcvt.added_insns_list;
@@ -8067,14 +8067,14 @@ frv_optimize_membar (void)
rtx *last_membar;
compute_bb_for_insn ();
- first_io = XCNEWVEC (struct frv_io, last_basic_block);
- last_membar = XCNEWVEC (rtx, last_basic_block);
+ first_io = XCNEWVEC (struct frv_io, last_basic_block_for_fn (cfun));
+ last_membar = XCNEWVEC (rtx, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
frv_optimize_membar_local (bb, &first_io[bb->index],
&last_membar[bb->index]);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (last_membar[bb->index] != 0)
frv_optimize_membar_global (bb, first_io, last_membar[bb->index]);
diff --git a/gcc/config/h8300/h8300.md b/gcc/config/h8300/h8300.md
index 19b0d456a86..3b07f5f8fd7 100644
--- a/gcc/config/h8300/h8300.md
+++ b/gcc/config/h8300/h8300.md
@@ -1703,9 +1703,9 @@
[(set_attr "length" "8")])
(define_split
- [(set (match_operand:HI 0 "bit_register_indirect_operand" "=U")
- (and:HI (match_operand:HI 1 "bit_register_indirect_operand" "%0")
- (match_operand:HI 2 "single_zero_operand" "Y0")))]
+ [(set (match_operand:HI 0 "bit_register_indirect_operand")
+ (and:HI (match_operand:HI 1 "bit_register_indirect_operand")
+ (match_operand:HI 2 "single_zero_operand")))]
"TARGET_H8300SX"
[(set (match_dup 0)
(and:QI (match_dup 1)
@@ -1844,9 +1844,9 @@
[(set_attr "length" "8")])
(define_split
- [(set (match_operand:HI 0 "bit_register_indirect_operand" "=U")
- (ior:HI (match_operand:HI 1 "bit_register_indirect_operand" "%0")
- (match_operand:HI 2 "single_one_operand" "Y2")))]
+ [(set (match_operand:HI 0 "bit_register_indirect_operand")
+ (ior:HI (match_operand:HI 1 "bit_register_indirect_operand")
+ (match_operand:HI 2 "single_one_operand")))]
"TARGET_H8300SX"
[(set (match_dup 0)
(ior:QI (match_dup 1)
@@ -1922,9 +1922,9 @@
[(set_attr "length" "8")])
(define_split
- [(set (match_operand:HI 0 "bit_register_indirect_operand" "=U")
- (xor:HI (match_operand:HI 1 "bit_register_indirect_operand" "%0")
- (match_operand:HI 2 "single_one_operand" "Y2")))]
+ [(set (match_operand:HI 0 "bit_register_indirect_operand")
+ (xor:HI (match_operand:HI 1 "bit_register_indirect_operand")
+ (match_operand:HI 2 "single_one_operand")))]
"TARGET_H8300SX"
[(set (match_dup 0)
(xor:QI (match_dup 1)
diff --git a/gcc/config/i386/i386-modes.def b/gcc/config/i386/i386-modes.def
index e0b8fc826ab..57d08fb1b68 100644
--- a/gcc/config/i386/i386-modes.def
+++ b/gcc/config/i386/i386-modes.def
@@ -90,5 +90,10 @@ VECTOR_MODE (INT, QI, 2); /* V2QI */
INT_MODE (OI, 32);
INT_MODE (XI, 64);
+/* Keep the OI and XI modes from confusing the compiler into thinking
+ that these modes could actually be used for computation. They are
+ only holders for vectors during data movement. */
+#define MAX_BITSIZE_MODE_ANY_INT (128)
+
/* The symbol Pmode stands for one of the above machine modes (usually SImode).
The tm.h file specifies which one. It is not a distinct mode. */
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 548aec74093..71fe38a1a50 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -1685,7 +1685,7 @@ struct processor_costs slm_cost = {
COSTS_N_INSNS (1), /* variable shift costs */
COSTS_N_INSNS (1), /* constant shift costs */
{COSTS_N_INSNS (3), /* cost of starting multiply for QI */
- COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* HI */
COSTS_N_INSNS (3), /* SI */
COSTS_N_INSNS (4), /* DI */
COSTS_N_INSNS (2)}, /* other */
@@ -2435,6 +2435,7 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
"core-avx2",
"atom",
"slm",
+ "intel",
"geode",
"k6",
"k6-2",
@@ -3143,6 +3144,9 @@ ix86_option_override_internal (bool main_args_p,
PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
| PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16 | PTA_POPCNT | PTA_AES
| PTA_PCLMUL | PTA_RDRND | PTA_MOVBE | PTA_FXSR},
+ {"intel", PROCESSOR_SLM, CPU_SLM,
+ PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
+ | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16 | PTA_POPCNT | PTA_FXSR},
{"geode", PROCESSOR_GEODE, CPU_GEODE,
PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE | PTA_PRFCHW},
{"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
@@ -3629,6 +3633,9 @@ ix86_option_override_internal (bool main_args_p,
if (!strcmp (opts->x_ix86_arch_string, "generic"))
error ("generic CPU can be used only for %stune=%s %s",
prefix, suffix, sw);
+ else if (!strcmp (ix86_arch_string, "intel"))
+ error ("intel CPU can be used only for %stune=%s %s",
+ prefix, suffix, sw);
else if (!strncmp (opts->x_ix86_arch_string, "generic", 7) || i == pta_size)
error ("bad value (%s) for %sarch=%s %s",
opts->x_ix86_arch_string, prefix, suffix, sw);
@@ -3692,6 +3699,10 @@ ix86_option_override_internal (bool main_args_p,
{
if (opts->x_optimize >= 1 && !opts_set->x_flag_omit_frame_pointer)
opts->x_flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
+ if (opts->x_flag_asynchronous_unwind_tables
+ && !opts_set->x_flag_unwind_tables
+ && TARGET_64BIT_MS_ABI)
+ opts->x_flag_unwind_tables = 1;
if (opts->x_flag_asynchronous_unwind_tables == 2)
opts->x_flag_unwind_tables
= opts->x_flag_asynchronous_unwind_tables = 1;
@@ -5734,6 +5745,17 @@ ix86_legitimate_combined_insn (rtx insn)
bool win;
int j;
+ /* For pre-AVX disallow unaligned loads/stores where the
+ instructions don't support it. */
+ if (!TARGET_AVX
+ && VECTOR_MODE_P (GET_MODE (op))
+ && misaligned_operand (op, GET_MODE (op)))
+ {
+ int min_align = get_attr_ssememalign (insn);
+ if (min_align == 0)
+ return false;
+ }
+
/* A unary operator may be accepted by the predicate, but it
is irrelevant for matching constraints. */
if (UNARY_P (op))
@@ -6155,7 +6177,8 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
}
return TYPE_MODE (type);
}
- else if ((size == 8 || size == 16) && !TARGET_SSE)
+ else if (((size == 8 && TARGET_64BIT) || size == 16)
+ && !TARGET_SSE)
{
static bool warnedsse;
@@ -6167,10 +6190,21 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
warning (0, "SSE vector argument without SSE "
"enabled changes the ABI");
}
- return mode;
}
- else
- return mode;
+ else if ((size == 8 && !TARGET_64BIT) && !TARGET_MMX)
+ {
+ static bool warnedmmx;
+
+ if (cum
+ && !warnedmmx
+ && cum->warn_mmx)
+ {
+ warnedmmx = true;
+ warning (0, "MMX vector argument without MMX "
+ "enabled changes the ABI");
+ }
+ }
+ return mode;
}
gcc_unreachable ();
@@ -10471,7 +10505,7 @@ ix86_finalize_stack_realign_flags (void)
add_to_hard_reg_set (&set_up_by_prologue, Pmode, ARG_POINTER_REGNUM);
add_to_hard_reg_set (&set_up_by_prologue, Pmode,
HARD_FRAME_POINTER_REGNUM);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
@@ -10917,18 +10951,21 @@ ix86_expand_prologue (void)
}
m->fs.sp_offset += allocate;
+ /* Use stack_pointer_rtx for relative addressing so that code
+ works for realigned stack, too. */
if (r10_live && eax_live)
{
- t = choose_baseaddr (m->fs.sp_offset - allocate);
+ t = plus_constant (Pmode, stack_pointer_rtx, allocate);
emit_move_insn (gen_rtx_REG (word_mode, R10_REG),
gen_frame_mem (word_mode, t));
- t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
+ t = plus_constant (Pmode, stack_pointer_rtx,
+ allocate - UNITS_PER_WORD);
emit_move_insn (gen_rtx_REG (word_mode, AX_REG),
gen_frame_mem (word_mode, t));
}
else if (eax_live || r10_live)
{
- t = choose_baseaddr (m->fs.sp_offset - allocate);
+ t = plus_constant (Pmode, stack_pointer_rtx, allocate);
emit_move_insn (gen_rtx_REG (word_mode,
(eax_live ? AX_REG : R10_REG)),
gen_frame_mem (word_mode, t));
@@ -27909,6 +27946,10 @@ enum ix86_builtins
IX86_BUILTIN_CPU_IS,
IX86_BUILTIN_CPU_SUPPORTS,
+ /* Read/write FLAGS register built-ins. */
+ IX86_BUILTIN_READ_FLAGS,
+ IX86_BUILTIN_WRITE_FLAGS,
+
IX86_BUILTIN_MAX
};
@@ -29750,6 +29791,17 @@ ix86_init_mmx_sse_builtins (void)
UCHAR_FTYPE_UCHAR_ULONGLONG_ULONGLONG_PULONGLONG,
IX86_BUILTIN_ADDCARRYX64);
+ /* Read/write FLAGS. */
+ def_builtin (~OPTION_MASK_ISA_64BIT, "__builtin_ia32_readeflags_u32",
+ UNSIGNED_FTYPE_VOID, IX86_BUILTIN_READ_FLAGS);
+ def_builtin (OPTION_MASK_ISA_64BIT, "__builtin_ia32_readeflags_u64",
+ UINT64_FTYPE_VOID, IX86_BUILTIN_READ_FLAGS);
+ def_builtin (~OPTION_MASK_ISA_64BIT, "__builtin_ia32_writeeflags_u32",
+ VOID_FTYPE_UNSIGNED, IX86_BUILTIN_WRITE_FLAGS);
+ def_builtin (OPTION_MASK_ISA_64BIT, "__builtin_ia32_writeeflags_u64",
+ VOID_FTYPE_UINT64, IX86_BUILTIN_WRITE_FLAGS);
+
+
/* Add FMA4 multi-arg argument instructions */
for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
{
@@ -32481,11 +32533,12 @@ ix86_expand_args_builtin (const struct builtin_description *d,
static rtx
ix86_expand_special_args_builtin (const struct builtin_description *d,
- tree exp, rtx target)
+ tree exp, rtx target)
{
tree arg;
rtx pat, op;
unsigned int i, nargs, arg_adjust, memory;
+ bool aligned_mem = false;
struct
{
rtx op;
@@ -32531,6 +32584,15 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
nargs = 1;
klass = load;
memory = 0;
+ switch (icode)
+ {
+ case CODE_FOR_sse4_1_movntdqa:
+ case CODE_FOR_avx2_movntdqa:
+ aligned_mem = true;
+ break;
+ default:
+ break;
+ }
break;
case VOID_FTYPE_PV2SF_V4SF:
case VOID_FTYPE_PV4DI_V4DI:
@@ -32548,6 +32610,26 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
klass = store;
/* Reserve memory operand for target. */
memory = ARRAY_SIZE (args);
+ switch (icode)
+ {
+ /* These builtins and instructions require the memory
+ to be properly aligned. */
+ case CODE_FOR_avx_movntv4di:
+ case CODE_FOR_sse2_movntv2di:
+ case CODE_FOR_avx_movntv8sf:
+ case CODE_FOR_sse_movntv4sf:
+ case CODE_FOR_sse4a_vmmovntv4sf:
+ case CODE_FOR_avx_movntv4df:
+ case CODE_FOR_sse2_movntv2df:
+ case CODE_FOR_sse4a_vmmovntv2df:
+ case CODE_FOR_sse2_movntidi:
+ case CODE_FOR_sse_movntq:
+ case CODE_FOR_sse2_movntisi:
+ aligned_mem = true;
+ break;
+ default:
+ break;
+ }
break;
case V4SF_FTYPE_V4SF_PCV2SF:
case V2DF_FTYPE_V2DF_PCDOUBLE:
@@ -32604,6 +32686,17 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
{
op = ix86_zero_extend_to_Pmode (op);
target = gen_rtx_MEM (tmode, op);
+ /* target at this point has just BITS_PER_UNIT MEM_ALIGN
+ on it. Try to improve it using get_pointer_alignment,
+ and if the special builtin is one that requires strict
+ mode alignment, also from it's GET_MODE_ALIGNMENT.
+ Failure to do so could lead to ix86_legitimate_combined_insn
+ rejecting all changes to such insns. */
+ unsigned int align = get_pointer_alignment (arg);
+ if (aligned_mem && align < GET_MODE_ALIGNMENT (tmode))
+ align = GET_MODE_ALIGNMENT (tmode);
+ if (MEM_ALIGN (target) < align)
+ set_mem_align (target, align);
}
else
target = force_reg (tmode, op);
@@ -32649,8 +32742,17 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
/* This must be the memory operand. */
op = ix86_zero_extend_to_Pmode (op);
op = gen_rtx_MEM (mode, op);
- gcc_assert (GET_MODE (op) == mode
- || GET_MODE (op) == VOIDmode);
+ /* op at this point has just BITS_PER_UNIT MEM_ALIGN
+ on it. Try to improve it using get_pointer_alignment,
+ and if the special builtin is one that requires strict
+ mode alignment, also from it's GET_MODE_ALIGNMENT.
+ Failure to do so could lead to ix86_legitimate_combined_insn
+ rejecting all changes to such insns. */
+ unsigned int align = get_pointer_alignment (arg);
+ if (aligned_mem && align < GET_MODE_ALIGNMENT (mode))
+ align = GET_MODE_ALIGNMENT (mode);
+ if (MEM_ALIGN (op) < align)
+ set_mem_align (op, align);
}
else
{
@@ -33378,6 +33480,29 @@ addcarryx:
emit_insn (gen_rtx_SET (VOIDmode, target, pat));
return target;
+ case IX86_BUILTIN_READ_FLAGS:
+ emit_insn (gen_push (gen_rtx_REG (word_mode, FLAGS_REG)));
+
+ if (optimize
+ || target == NULL_RTX
+ || !nonimmediate_operand (target, word_mode)
+ || GET_MODE (target) != word_mode)
+ target = gen_reg_rtx (word_mode);
+
+ emit_insn (gen_pop (target));
+ return target;
+
+ case IX86_BUILTIN_WRITE_FLAGS:
+
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ if (!general_no_elim_operand (op0, word_mode))
+ op0 = copy_to_mode_reg (word_mode, op0);
+
+ emit_insn (gen_push (op0));
+ emit_insn (gen_pop (gen_rtx_REG (word_mode, FLAGS_REG)));
+ return 0;
+
case IX86_BUILTIN_GATHERSIV2DF:
icode = CODE_FOR_avx2_gathersiv2df;
goto gather_gen;
@@ -33656,6 +33781,31 @@ addcarryx:
gcc_unreachable ();
}
+/* This returns the target-specific builtin with code CODE if
+ current_function_decl has visibility on this builtin, which is checked
+ using isa flags. Returns NULL_TREE otherwise. */
+
+static tree ix86_get_builtin (enum ix86_builtins code)
+{
+ struct cl_target_option *opts;
+ tree target_tree = NULL_TREE;
+
+ /* Determine the isa flags of current_function_decl. */
+
+ if (current_function_decl)
+ target_tree = DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl);
+
+ if (target_tree == NULL)
+ target_tree = target_option_default_node;
+
+ opts = TREE_TARGET_OPTION (target_tree);
+
+ if (ix86_builtins_isa[(int) code].isa & opts->x_ix86_isa_flags)
+ return ix86_builtin_decl (code, true);
+ else
+ return NULL_TREE;
+}
+
/* Returns a function decl for a vectorized version of the builtin function
with builtin function code FN and the result vector type TYPE, or NULL_TREE
if it is not available. */
@@ -33684,9 +33834,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_SQRTPD];
+ return ix86_get_builtin (IX86_BUILTIN_SQRTPD);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_SQRTPD256];
+ return ix86_get_builtin (IX86_BUILTIN_SQRTPD256);
}
break;
@@ -33694,9 +33844,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
+ return ix86_get_builtin (IX86_BUILTIN_SQRTPS_NR);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
+ return ix86_get_builtin (IX86_BUILTIN_SQRTPS_NR256);
}
break;
@@ -33710,9 +33860,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX);
else if (out_n == 8 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256);
}
break;
@@ -33726,9 +33876,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_FLOORPS_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_FLOORPS_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX256);
}
break;
@@ -33742,9 +33892,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_CEILPD_VEC_PACK_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPD_VEC_PACK_SFIX);
else if (out_n == 8 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256);
}
break;
@@ -33758,9 +33908,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CEILPS_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_CEILPS_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX256);
}
break;
@@ -33770,9 +33920,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_VEC_PACK_SFIX);
else if (out_n == 8 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_VEC_PACK_SFIX256);
}
break;
@@ -33782,9 +33932,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
+ return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
+ return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ256);
}
break;
@@ -33798,9 +33948,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX);
else if (out_n == 8 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256);
}
break;
@@ -33814,9 +33964,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SImode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_ROUNDPS_AZ_SFIX];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_ROUNDPS_AZ_SFIX256];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX256);
}
break;
@@ -33824,9 +33974,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
+ return ix86_get_builtin (IX86_BUILTIN_CPYSGNPD);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
+ return ix86_get_builtin (IX86_BUILTIN_CPYSGNPD256);
}
break;
@@ -33834,9 +33984,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
+ return ix86_get_builtin (IX86_BUILTIN_CPYSGNPS);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
+ return ix86_get_builtin (IX86_BUILTIN_CPYSGNPS256);
}
break;
@@ -33848,9 +33998,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_FLOORPD];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPD);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_FLOORPD256];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPD256);
}
break;
@@ -33862,9 +34012,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_FLOORPS];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPS);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_FLOORPS256];
+ return ix86_get_builtin (IX86_BUILTIN_FLOORPS256);
}
break;
@@ -33876,9 +34026,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_CEILPD];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPD);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CEILPD256];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPD256);
}
break;
@@ -33890,9 +34040,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_CEILPS];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPS);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_CEILPS256];
+ return ix86_get_builtin (IX86_BUILTIN_CEILPS256);
}
break;
@@ -33904,9 +34054,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_TRUNCPD];
+ return ix86_get_builtin (IX86_BUILTIN_TRUNCPD);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_TRUNCPD256];
+ return ix86_get_builtin (IX86_BUILTIN_TRUNCPD256);
}
break;
@@ -33918,9 +34068,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_TRUNCPS];
+ return ix86_get_builtin (IX86_BUILTIN_TRUNCPS);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_TRUNCPS256];
+ return ix86_get_builtin (IX86_BUILTIN_TRUNCPS256);
}
break;
@@ -33932,9 +34082,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_RINTPD];
+ return ix86_get_builtin (IX86_BUILTIN_RINTPD);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_RINTPD256];
+ return ix86_get_builtin (IX86_BUILTIN_RINTPD256);
}
break;
@@ -33946,9 +34096,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_RINTPS];
+ return ix86_get_builtin (IX86_BUILTIN_RINTPS);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_RINTPS256];
+ return ix86_get_builtin (IX86_BUILTIN_RINTPS256);
}
break;
@@ -33960,9 +34110,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_ROUNDPD_AZ];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPD_AZ);
else if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_ROUNDPD_AZ256];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPD_AZ256);
}
break;
@@ -33974,9 +34124,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_ROUNDPS_AZ];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ);
else if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_ROUNDPS_AZ256];
+ return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ256);
}
break;
@@ -33984,9 +34134,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == DFmode && in_mode == DFmode)
{
if (out_n == 2 && in_n == 2)
- return ix86_builtins[IX86_BUILTIN_VFMADDPD];
+ return ix86_get_builtin (IX86_BUILTIN_VFMADDPD);
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
+ return ix86_get_builtin (IX86_BUILTIN_VFMADDPD256);
}
break;
@@ -33994,9 +34144,9 @@ ix86_builtin_vectorized_function (tree fndecl, tree type_out,
if (out_mode == SFmode && in_mode == SFmode)
{
if (out_n == 4 && in_n == 4)
- return ix86_builtins[IX86_BUILTIN_VFMADDPS];
+ return ix86_get_builtin (IX86_BUILTIN_VFMADDPS);
if (out_n == 8 && in_n == 8)
- return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
+ return ix86_get_builtin (IX86_BUILTIN_VFMADDPS256);
}
break;
@@ -34276,7 +34426,7 @@ ix86_vectorize_builtin_gather (const_tree mem_vectype,
return NULL_TREE;
}
- return ix86_builtins[code];
+ return ix86_get_builtin (code);
}
/* Returns a code for a target-specific builtin that implements
@@ -34297,10 +34447,10 @@ ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
{
/* Vectorized version of sqrt to rsqrt conversion. */
case IX86_BUILTIN_SQRTPS_NR:
- return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
+ return ix86_get_builtin (IX86_BUILTIN_RSQRTPS_NR);
case IX86_BUILTIN_SQRTPS_NR256:
- return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
+ return ix86_get_builtin (IX86_BUILTIN_RSQRTPS_NR256);
default:
return NULL_TREE;
@@ -34311,7 +34461,7 @@ ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
{
/* Sqrt to rsqrt conversion. */
case BUILT_IN_SQRTF:
- return ix86_builtins[IX86_BUILTIN_RSQRTF];
+ return ix86_get_builtin (IX86_BUILTIN_RSQRTF);
default:
return NULL_TREE;
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 113c75e0813..7efd1e01f4e 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -626,6 +626,7 @@ enum target_cpu_default
TARGET_CPU_DEFAULT_haswell,
TARGET_CPU_DEFAULT_atom,
TARGET_CPU_DEFAULT_slm,
+ TARGET_CPU_DEFAULT_intel,
TARGET_CPU_DEFAULT_geode,
TARGET_CPU_DEFAULT_k6,
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 6976124d4a8..ab5b33f6399 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -402,6 +402,13 @@
(const_string "unknown")]
(const_string "integer")))
+;; The minimum required alignment of vector mode memory operands of the SSE
+;; (non-VEX/EVEX) instruction in bits, if it is different from
+;; GET_MODE_ALIGNMENT of the operand, otherwise 0. If an instruction has
+;; multiple alternatives, this should be conservative maximum of those minimum
+;; required alignments.
+(define_attr "ssememalign" "" (const_int 0))
+
;; The (bounding maximum) length of an instruction immediate.
(define_attr "length_immediate" ""
(cond [(eq_attr "type" "incdec,setcc,icmov,str,lea,other,multi,idiv,leave,
@@ -898,8 +905,8 @@
(TI "TARGET_64BIT")])
;; Double word integer modes as mode attribute.
-(define_mode_attr DWI [(SI "DI") (DI "TI")])
-(define_mode_attr dwi [(SI "di") (DI "ti")])
+(define_mode_attr DWI [(QI "HI") (HI "SI") (SI "DI") (DI "TI")])
+(define_mode_attr dwi [(QI "hi") (HI "si") (SI "di") (DI "ti")])
;; Half mode for double word integer modes.
(define_mode_iterator DWIH [(SI "!TARGET_64BIT")
@@ -1714,6 +1721,23 @@
"pop{<imodesuffix>}\t%0"
[(set_attr "type" "pop")
(set_attr "mode" "<MODE>")])
+
+(define_insn "*pushfl<mode>2"
+ [(set (match_operand:W 0 "push_operand" "=<")
+ (match_operand:W 1 "flags_reg_operand"))]
+ ""
+ "pushf{<imodesuffix>}"
+ [(set_attr "type" "push")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*popfl<mode>1"
+ [(set (match_operand:W 0 "flags_reg_operand")
+ (match_operand:W 1 "pop_operand" ">"))]
+ ""
+ "popf{<imodesuffix>}"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "<MODE>")])
+
;; Move instructions.
@@ -3102,7 +3126,7 @@
(const_string "1")
(const_string "*")))
(set (attr "mode")
- (cond [(eq_attr "alternative" "3,4,9,10,14,15")
+ (cond [(eq_attr "alternative" "3,4,9,10,13,14,15")
(const_string "SI")
(eq_attr "alternative" "11")
(const_string "DI")
@@ -6153,6 +6177,41 @@
[(set_attr "type" "alu")
(set_attr "mode" "QI")])
+;; Add with jump on overflow.
+(define_expand "addv<mode>4"
+ [(parallel [(set (reg:CCO FLAGS_REG)
+ (eq:CCO (plus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:SWI 1 "nonimmediate_operand"))
+ (sign_extend:<DWI>
+ (match_operand:SWI 2 "<general_operand>")))
+ (sign_extend:<DWI>
+ (plus:SWI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:SWI 0 "register_operand")
+ (plus:SWI (match_dup 1) (match_dup 2)))])
+ (set (pc) (if_then_else
+ (eq (reg:CCO FLAGS_REG) (const_int 0))
+ (label_ref (match_operand 3))
+ (pc)))]
+ ""
+ "ix86_fixup_binary_operands_no_copy (PLUS, <MODE>mode, operands);")
+
+(define_insn "*addv<mode>4"
+ [(set (reg:CCO FLAGS_REG)
+ (eq:CCO (plus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:SWI 1 "nonimmediate_operand" "%0,0"))
+ (sign_extend:<DWI>
+ (match_operand:SWI 2 "<general_operand>" "<g>,<r><i>")))
+ (sign_extend:<DWI>
+ (plus:SWI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:SWI 0 "nonimmediate_operand" "=<r>,<r>m")
+ (plus:SWI (match_dup 1) (match_dup 2)))]
+ "ix86_binary_operator_ok (PLUS, <MODE>mode, operands)"
+ "add{<imodesuffix>}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "<MODE>")])
+
;; The lea patterns for modes less than 32 bits need to be matched by
;; several insns converted to real lea by splitters.
@@ -6390,6 +6449,41 @@
[(set_attr "type" "alu")
(set_attr "mode" "SI")])
+;; Subtract with jump on overflow.
+(define_expand "subv<mode>4"
+ [(parallel [(set (reg:CCO FLAGS_REG)
+ (eq:CCO (minus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:SWI 1 "nonimmediate_operand"))
+ (sign_extend:<DWI>
+ (match_operand:SWI 2 "<general_operand>")))
+ (sign_extend:<DWI>
+ (minus:SWI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:SWI 0 "register_operand")
+ (minus:SWI (match_dup 1) (match_dup 2)))])
+ (set (pc) (if_then_else
+ (eq (reg:CCO FLAGS_REG) (const_int 0))
+ (label_ref (match_operand 3))
+ (pc)))]
+ ""
+ "ix86_fixup_binary_operands_no_copy (MINUS, <MODE>mode, operands);")
+
+(define_insn "*subv<mode>4"
+ [(set (reg:CCO FLAGS_REG)
+ (eq:CCO (minus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:SWI 1 "nonimmediate_operand" "0,0"))
+ (sign_extend:<DWI>
+ (match_operand:SWI 2 "<general_operand>" "<r><i>,<r>m")))
+ (sign_extend:<DWI>
+ (minus:SWI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:SWI 0 "nonimmediate_operand" "=<r>m,<r>")
+ (minus:SWI (match_dup 1) (match_dup 2)))]
+ "ix86_binary_operator_ok (MINUS, <MODE>mode, operands)"
+ "sub{<imodesuffix>}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "<MODE>")])
+
(define_insn "*sub<mode>_3"
[(set (reg FLAGS_REG)
(compare (match_operand:SWI 1 "nonimmediate_operand" "0,0")
@@ -6704,6 +6798,58 @@
(set_attr "bdver1_decode" "direct")
(set_attr "mode" "QI")])
+;; Multiply with jump on overflow.
+(define_expand "mulv<mode>4"
+ [(parallel [(set (reg:CCO FLAGS_REG)
+ (eq:CCO (mult:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:SWI48 1 "register_operand"))
+ (sign_extend:<DWI>
+ (match_operand:SWI48 2 "<general_operand>")))
+ (sign_extend:<DWI>
+ (mult:SWI48 (match_dup 1) (match_dup 2)))))
+ (set (match_operand:SWI48 0 "register_operand")
+ (mult:SWI48 (match_dup 1) (match_dup 2)))])
+ (set (pc) (if_then_else
+ (eq (reg:CCO FLAGS_REG) (const_int 0))
+ (label_ref (match_operand 3))
+ (pc)))])
+
+(define_insn "*mulv<mode>4"
+ [(set (reg:CCO FLAGS_REG)
+ (eq:CCO (mult:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:SWI 1 "nonimmediate_operand" "%rm,rm,0"))
+ (sign_extend:<DWI>
+ (match_operand:SWI 2 "<general_operand>" "K,<i>,mr")))
+ (sign_extend:<DWI>
+ (mult:SWI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:SWI 0 "register_operand" "=r,r,r")
+ (mult:SWI (match_dup 1) (match_dup 2)))]
+ "!(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "@
+ imul{<imodesuffix>}\t{%2, %1, %0|%0, %1, %2}
+ imul{<imodesuffix>}\t{%2, %1, %0|%0, %1, %2}
+ imul{<imodesuffix>}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "imul")
+ (set_attr "prefix_0f" "0,0,1")
+ (set (attr "athlon_decode")
+ (cond [(eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (eq_attr "alternative" "1")
+ (const_string "vector")
+ (and (eq_attr "alternative" "2")
+ (match_operand 1 "memory_operand"))
+ (const_string "vector")]
+ (const_string "direct")))
+ (set (attr "amdfam10_decode")
+ (cond [(and (eq_attr "alternative" "0,1")
+ (match_operand 1 "memory_operand"))
+ (const_string "vector")]
+ (const_string "direct")))
+ (set_attr "bdver1_decode" "direct")
+ (set_attr "mode" "<MODE>")])
+
(define_expand "<u>mul<mode><dwi>3"
[(parallel [(set (match_operand:<DWI> 0 "register_operand")
(mult:<DWI>
@@ -8617,6 +8763,36 @@
[(set_attr "type" "negnot")
(set_attr "mode" "SI")])
+;; Negate with jump on overflow.
+(define_expand "negv<mode>3"
+ [(parallel [(set (reg:CCO FLAGS_REG)
+ (ne:CCO (match_operand:SWI 1 "register_operand")
+ (match_dup 3)))
+ (set (match_operand:SWI 0 "register_operand")
+ (neg:SWI (match_dup 1)))])
+ (set (pc) (if_then_else
+ (eq (reg:CCO FLAGS_REG) (const_int 0))
+ (label_ref (match_operand 2))
+ (pc)))]
+ ""
+{
+ operands[3]
+ = gen_int_mode (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (<MODE>mode) - 1),
+ <MODE>mode);
+})
+
+(define_insn "*negv<mode>3"
+ [(set (reg:CCO FLAGS_REG)
+ (ne:CCO (match_operand:SWI 1 "nonimmediate_operand" "0")
+ (match_operand:SWI 2 "const_int_operand")))
+ (set (match_operand:SWI 0 "nonimmediate_operand" "=<r>m")
+ (neg:SWI (match_dup 1)))]
+ "ix86_unary_operator_ok (NEG, <MODE>mode, operands)
+ && mode_signbit_p (<MODE>mode, operands[2])"
+ "neg{<imodesuffix>}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "<MODE>")])
+
;; Changing of sign for FP values is doable using integer unit too.
(define_expand "<code><mode>2"
@@ -17288,7 +17464,7 @@
&& REGNO (operands[0]) == REGNO (operands[1])
&& peep2_regno_dead_p (0, FLAGS_REG)"
[(parallel [(set (match_dup 0)
- (zero_extend (ashift:SI (match_dup 1) (match_dup 2))))
+ (zero_extend:DI (ashift:SI (match_dup 1) (match_dup 2))))
(clobber (reg:CC FLAGS_REG))])]
"operands[2] = GEN_INT (exact_log2 (INTVAL (operands[2])));")
diff --git a/gcc/config/i386/ia32intrin.h b/gcc/config/i386/ia32intrin.h
index b26dc46d256..65642e46023 100644
--- a/gcc/config/i386/ia32intrin.h
+++ b/gcc/config/i386/ia32intrin.h
@@ -238,6 +238,22 @@ __rorq (unsigned long long __X, int __C)
return (__X >> __C) | (__X << (64 - __C));
}
+/* Read flags register */
+extern __inline unsigned long long
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+__readeflags (void)
+{
+ return __builtin_ia32_readeflags_u64 ();
+}
+
+/* Write flags register */
+extern __inline void
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+__writeeflags (unsigned long long X)
+{
+ __builtin_ia32_writeeflags_u64 (X);
+}
+
#define _bswap64(a) __bswapq(a)
#define _popcnt64(a) __popcntq(a)
#define _lrotl(a,b) __rolq((a), (b))
@@ -245,6 +261,23 @@ __rorq (unsigned long long __X, int __C)
#else
#define _lrotl(a,b) __rold((a), (b))
#define _lrotr(a,b) __rord((a), (b))
+
+/* Read flags register */
+extern __inline unsigned int
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+__readeflags (void)
+{
+ return __builtin_ia32_readeflags_u32 ();
+}
+
+/* Write flags register */
+extern __inline void
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+__writeeflags (unsigned int X)
+{
+ __builtin_ia32_writeeflags_u32 (X);
+}
+
#endif
#define _bit_scan_forward(a) __bsfd(a)
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 7a47f27e94e..30895c67c09 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -931,6 +931,7 @@
}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
+ (set_attr "ssememalign" "8")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
@@ -961,6 +962,7 @@
}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
+ (set_attr "ssememalign" "8")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
(cond [(ior (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
@@ -1020,6 +1022,7 @@
}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
+ (set_attr "ssememalign" "8")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -1059,6 +1062,7 @@
}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
+ (set_attr "ssememalign" "8")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -1105,6 +1109,7 @@
"%vlddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
+ (set_attr "ssememalign" "8")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -1369,6 +1374,7 @@
vrcpss\t{%1, %2, %0|%0, %2, %k1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
+ (set_attr "ssememalign" "32")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "orig,vex")
@@ -1509,6 +1515,7 @@
vrsqrtss\t{%1, %2, %0|%0, %2, %k1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
+ (set_attr "ssememalign" "32")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
@@ -3853,6 +3860,7 @@
"%vcvtdq2pd\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_vex")
+ (set_attr "ssememalign" "64")
(set_attr "mode" "V2DF")])
(define_insn "<mask_codefor>avx512f_cvtpd2dq512<mask_name>"
@@ -4725,6 +4733,7 @@
%vmovhps\t{%2, %0|%q0, %2}"
[(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
@@ -4770,6 +4779,7 @@
%vmovlps\t{%2, %H0|%H0, %2}"
[(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
@@ -5174,6 +5184,7 @@
%vmovhlps\t{%1, %d0|%d0, %1}
%vmovlps\t{%H1, %d0|%d0, %H1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V2SF,V4SF,V2SF")])
@@ -5213,6 +5224,7 @@
%vmovlps\t{%2, %H0|%H0, %2}"
[(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V2SF,V2SF,V4SF,V4SF,V2SF")])
@@ -5266,6 +5278,7 @@
%vmovlps\t{%2, %0|%q0, %2}"
[(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "sseshuf,sseshuf,ssemov,ssemov,ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "length_immediate" "1,1,*,*,*")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
@@ -6224,7 +6237,8 @@
vmovlpd\t{%H1, %2, %0|%0, %2, %H1}
%vmovhpd\t{%1, %0|%q0, %1}"
[(set_attr "isa" "noavx,avx,sse3,noavx,avx,*")
- (set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov")
+ (set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix_data16" "*,*,*,1,*,1")
(set_attr "prefix" "orig,vex,maybe_vex,orig,vex,maybe_vex")
(set_attr "mode" "V2DF,V2DF,DF,V1DF,V1DF,V1DF")])
@@ -6368,6 +6382,7 @@
%vmovlpd\t{%2, %H0|%H0, %2}"
[(set_attr "isa" "noavx,avx,sse3,noavx,avx,*")
(set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix_data16" "*,*,*,1,*,1")
(set_attr "prefix" "orig,vex,maybe_vex,orig,vex,maybe_vex")
(set_attr "mode" "V2DF,V2DF,DF,V1DF,V1DF,V1DF")])
@@ -6959,6 +6974,7 @@
movhlps\t{%1, %0|%0, %1}
movlps\t{%H1, %0|%0, %H1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "mode" "V2SF,V4SF,V2SF")])
;; Avoid combining registers from different units in a single alternative,
@@ -7051,6 +7067,7 @@
#"
[(set_attr "isa" "noavx,avx,noavx,avx,*,*,*")
(set_attr "type" "ssemov,ssemov,sselog,sselog,ssemov,fmov,imov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix_data16" "1,*,*,*,*,*,*")
(set_attr "prefix" "orig,vex,orig,vex,*,*,*")
(set_attr "mode" "V1DF,V1DF,V2DF,V2DF,DF,DF,DF")])
@@ -7119,6 +7136,7 @@
(const_string "imov")
]
(const_string "ssemov")))
+ (set_attr "ssememalign" "64")
(set_attr "prefix_data16" "*,1,*,*,*,*,1,*,*,*,*")
(set_attr "length_immediate" "*,*,*,*,*,1,*,*,*,*,*")
(set_attr "prefix" "maybe_vex,orig,vex,orig,vex,orig,orig,vex,*,*,*")
@@ -7163,6 +7181,7 @@
(const_string "1")
(const_string "*")))
(set_attr "length_immediate" "*,*,*,*,*,1,*,*,*")
+ (set_attr "ssememalign" "64")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex,orig,orig,vex,maybe_vex")
(set_attr "mode" "DF,DF,V1DF,V1DF,V1DF,V2DF,V1DF,V1DF,V1DF")])
@@ -11459,6 +11478,7 @@
"TARGET_SSE4_1"
"%vpmov<extsuffix>bw\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
@@ -11499,6 +11519,7 @@
"TARGET_SSE4_1"
"%vpmov<extsuffix>bd\t{%1, %0|%0, %k1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "32")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
@@ -11534,6 +11555,7 @@
"TARGET_SSE4_1"
"%vpmov<extsuffix>wd\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
@@ -11576,6 +11598,7 @@
"TARGET_SSE4_1"
"%vpmov<extsuffix>bq\t{%1, %0|%0, %w1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "16")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
@@ -11613,6 +11636,7 @@
"TARGET_SSE4_1"
"%vpmov<extsuffix>wq\t{%1, %0|%0, %k1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "32")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
@@ -11646,6 +11670,7 @@
"TARGET_SSE4_1"
"%vpmov<extsuffix>dq\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
+ (set_attr "ssememalign" "64")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
@@ -11939,6 +11964,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
@@ -12001,6 +12027,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "memory" "load")
(set_attr "mode" "TI")])
@@ -12028,6 +12055,7 @@
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "btver2_decode" "vector")
(set_attr "memory" "none,load")
@@ -12055,6 +12083,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "btver2_decode" "vector")
@@ -12081,6 +12110,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load,none,load")
(set_attr "btver2_decode" "vector,vector,vector,vector")
@@ -12134,6 +12164,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
@@ -12187,6 +12218,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "memory" "load")
(set_attr "mode" "TI")])
@@ -12209,6 +12241,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "none,load")
@@ -12233,6 +12266,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "none,load")
@@ -12257,6 +12291,7 @@
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "ssememalign" "8")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load,none,load")
(set_attr "prefix" "maybe_vex")
@@ -14218,6 +14253,23 @@
(set_attr "btver2_decode" "vector")
(set_attr "mode" "<sseinsnmode>")])
+(define_expand "maskload<mode>"
+ [(set (match_operand:V48_AVX2 0 "register_operand")
+ (unspec:V48_AVX2
+ [(match_operand:<sseintvecmode> 2 "register_operand")
+ (match_operand:V48_AVX2 1 "memory_operand")]
+ UNSPEC_MASKMOV))]
+ "TARGET_AVX")
+
+(define_expand "maskstore<mode>"
+ [(set (match_operand:V48_AVX2 0 "memory_operand")
+ (unspec:V48_AVX2
+ [(match_operand:<sseintvecmode> 2 "register_operand")
+ (match_operand:V48_AVX2 1 "register_operand")
+ (match_dup 0)]
+ UNSPEC_MASKMOV))]
+ "TARGET_AVX")
+
(define_insn_and_split "avx_<castmode><avxsizesuffix>_<castmode>"
[(set (match_operand:AVX256MODE2P 0 "nonimmediate_operand" "=x,m")
(unspec:AVX256MODE2P
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index df4a4b02d4c..99bc094132c 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -7159,7 +7159,9 @@ ia64_single_set (rtx insn)
switch (recog_memoized (insn))
{
case CODE_FOR_prologue_allocate_stack:
+ case CODE_FOR_prologue_allocate_stack_pr:
case CODE_FOR_epilogue_deallocate_stack:
+ case CODE_FOR_epilogue_deallocate_stack_pr:
ret = XVECEXP (x, 0, 0);
break;
@@ -9611,7 +9613,7 @@ emit_predicate_relation_info (void)
{
basic_block bb;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
int r;
rtx head = BB_HEAD (bb);
@@ -9639,7 +9641,7 @@ emit_predicate_relation_info (void)
relations around them. Otherwise the assembler will assume the call
returns, and complain about uses of call-clobbered predicates after
the call. */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
rtx insn = BB_HEAD (bb);
@@ -9686,7 +9688,7 @@ ia64_reorg (void)
/* We can't let modulo-sched prevent us from scheduling any bbs,
since we need the final schedule to produce bundle information. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_DISABLE_SCHEDULE;
initiate_bundle_states ();
diff --git a/gcc/config/ia64/ia64.md b/gcc/config/ia64/ia64.md
index 4d9d4e0129f..bc4e8cbfd1b 100644
--- a/gcc/config/ia64/ia64.md
+++ b/gcc/config/ia64/ia64.md
@@ -4652,6 +4652,8 @@
;; This prevents the scheduler from moving the SP decrement past FP-relative
;; stack accesses. This is the same as adddi3 plus the extra set.
+;; Explicit predicated version of insn needed to check by CODE_FOR_
+;; in ia64_single_set, where despite of 2 sets this define_insn should be OK.
(define_insn "prologue_allocate_stack"
[(set (match_operand:DI 0 "register_operand" "=r,r,r")
@@ -4664,10 +4666,31 @@
add %0 = %1, %2
adds %0 = %2, %1
addl %0 = %2, %1"
- [(set_attr "itanium_class" "ialu")])
+ [(set_attr "itanium_class" "ialu")
+ (set_attr "predicable" "no")])
+
+(define_insn "prologue_allocate_stack_pr"
+ [(cond_exec (match_operator 0 ("predicate_operator")
+ [(match_operand:BI 1 ("register_operand") ("c,c,c"))
+ (const_int 0)])
+ (parallel
+ [(set (match_operand:DI 2 "register_operand" "=r,r,r")
+ (plus:DI (match_operand:DI 3 "register_operand" "%r,r,a")
+ (match_operand:DI 4 "gr_reg_or_22bit_operand" "r,I,J")))
+ (set (match_operand:DI 5 "register_operand" "+r,r,r")
+ (match_dup 5))]))]
+ ""
+ "@
+ (%J0) add %2 = %3, %4
+ (%J0) adds %2 = %3, %4
+ (%J0) addl %2 = %3, %4"
+ [(set_attr "itanium_class" "ialu")
+ (set_attr "predicable" "no")])
;; This prevents the scheduler from moving the SP restore past FP-relative
;; stack accesses. This is similar to movdi plus the extra set.
+;; Explicit predicated version of insn needed to check by CODE_FOR_
+;; in ia64_single_set, where despite of 2 sets this define_insn should be OK.
(define_insn "epilogue_deallocate_stack"
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -4675,7 +4698,21 @@
(set (match_dup 1) (match_dup 1))]
""
"mov %0 = %1"
- [(set_attr "itanium_class" "ialu")])
+ [(set_attr "itanium_class" "ialu")
+ (set_attr "predicable" "no")])
+
+(define_insn "epilogue_deallocate_stack_pr"
+ [(cond_exec (match_operator 0 ("predicate_operator")
+ [(match_operand:BI 1 ("register_operand") ("c"))
+ (const_int 0)])
+ (parallel
+ [(set (match_operand:DI 2 "register_operand" "=r")
+ (match_operand:DI 3 "register_operand" "+r"))
+ (set (match_dup 3) (match_dup 3))]))]
+ ""
+ "(%J0) mov %2 = %3"
+ [(set_attr "itanium_class" "ialu")
+ (set_attr "predicable" "no")])
;; As USE insns aren't meaningful after reload, this is used instead
;; to prevent deleting instructions setting registers for EH handling
diff --git a/gcc/config/linux-android.h b/gcc/config/linux-android.h
index 831a19c416e..2c87c846cd7 100644
--- a/gcc/config/linux-android.h
+++ b/gcc/config/linux-android.h
@@ -57,6 +57,3 @@
#define ANDROID_ENDFILE_SPEC \
"%{shared: crtend_so%O%s;: crtend_android%O%s}"
-
-#undef TARGET_HAS_IFUNC_P
-#define TARGET_HAS_IFUNC_P linux_android_has_ifunc_p
diff --git a/gcc/config/linux-protos.h b/gcc/config/linux-protos.h
index d1f0f926367..cfc660ab4df 100644
--- a/gcc/config/linux-protos.h
+++ b/gcc/config/linux-protos.h
@@ -18,6 +18,6 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-extern bool linux_android_has_ifunc_p (void);
+extern bool linux_has_ifunc_p (void);
-extern bool linux_android_libc_has_function (enum function_class fn_class);
+extern bool linux_libc_has_function (enum function_class fn_class);
diff --git a/gcc/config/linux-android.c b/gcc/config/linux.c
index 4a4b48d9882..ffaf614bc1a 100644
--- a/gcc/config/linux-android.c
+++ b/gcc/config/linux.c
@@ -27,13 +27,13 @@ along with GCC; see the file COPYING3. If not see
/* Android does not support GNU indirect functions. */
bool
-linux_android_has_ifunc_p (void)
+linux_has_ifunc_p (void)
{
- return TARGET_ANDROID ? false : HAVE_GNU_INDIRECT_FUNCTION;
+ return OPTION_BIONIC ? false : HAVE_GNU_INDIRECT_FUNCTION;
}
bool
-linux_android_libc_has_function (enum function_class fn_class)
+linux_libc_has_function (enum function_class fn_class)
{
if (OPTION_GLIBC)
return true;
diff --git a/gcc/config/linux.h b/gcc/config/linux.h
index 8116e698d94..fb1a8de7dfd 100644
--- a/gcc/config/linux.h
+++ b/gcc/config/linux.h
@@ -99,7 +99,31 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#undef TARGET_HAS_BIONIC
#define TARGET_HAS_BIONIC (OPTION_BIONIC)
+#if (DEFAULT_LIBC == LIBC_UCLIBC) && defined (SINGLE_LIBC) /* uClinux */
+/* This is a *uclinux* target. We don't define below macros to normal linux
+ versions, because doing so would require *uclinux* targets to include
+ linux.c, linux-protos.h, linux.opt, etc. We could, alternatively, add
+ these files to *uclinux* targets, but that would only pollute option list
+ (add -mglibc, etc.) without adding any useful support. */
+
+/* Define TARGET_LIBC_HAS_FUNCTION for *uclinux* targets to
+ no_c99_libc_has_function, because uclibc does not, normally, have
+ c99 runtime. If, in special cases, uclibc does have c99 runtime,
+ this should be defined to a new hook. Also please note that for targets
+ like *-linux-uclibc that similar check will also need to be added to
+ linux_libc_has_function. */
+# undef TARGET_LIBC_HAS_FUNCTION
+# define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
+
+#else /* !uClinux, i.e., normal Linux */
+
+/* IFUNCs are supported by glibc, but not by uClibc or Bionic. */
+# undef TARGET_HAS_IFUNC_P
+# define TARGET_HAS_IFUNC_P linux_has_ifunc_p
+
/* Determine what functions are present at the runtime;
this includes full c99 runtime and sincos. */
-#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION linux_android_libc_has_function
+# undef TARGET_LIBC_HAS_FUNCTION
+# define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function
+
+#endif
diff --git a/gcc/config/lm32/lm32.h b/gcc/config/lm32/lm32.h
index d19adea8822..edb96b7b0b2 100644
--- a/gcc/config/lm32/lm32.h
+++ b/gcc/config/lm32/lm32.h
@@ -73,7 +73,6 @@
#define BYTES_BIG_ENDIAN 1
#define WORDS_BIG_ENDIAN 1
-#define BITS_PER_UNIT 8
#define BITS_PER_WORD 32
#define UNITS_PER_WORD 4
diff --git a/gcc/config/lm32/uclinux-elf.h b/gcc/config/lm32/uclinux-elf.h
index a5e8163cf6f..f2a94f19a23 100644
--- a/gcc/config/lm32/uclinux-elf.h
+++ b/gcc/config/lm32/uclinux-elf.h
@@ -76,6 +76,3 @@
#undef CC1_SPEC
#define CC1_SPEC "%{G*} %{!fno-PIC:-fPIC}"
-
-#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
diff --git a/gcc/config/m32c/m32c.h b/gcc/config/m32c/m32c.h
index 3ceb093f7d9..b7b5aa46924 100644
--- a/gcc/config/m32c/m32c.h
+++ b/gcc/config/m32c/m32c.h
@@ -140,7 +140,6 @@ machine_function;
matches "int". Pointers are 16 bits for R8C/M16C (when TARGET_A16
is true) and 24 bits for M32CM/M32C (when TARGET_A24 is true), but
24-bit pointers are stored in 32-bit words. */
-#define BITS_PER_UNIT 8
#define UNITS_PER_WORD 2
#define POINTER_SIZE (TARGET_A16 ? 16 : 32)
#define POINTERS_EXTEND_UNSIGNED 1
diff --git a/gcc/config/m68k/uclinux.h b/gcc/config/m68k/uclinux.h
index b1af7d2c585..8d743126547 100644
--- a/gcc/config/m68k/uclinux.h
+++ b/gcc/config/m68k/uclinux.h
@@ -67,6 +67,3 @@ along with GCC; see the file COPYING3. If not see
sections. */
#undef M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
#define M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 1
-
-#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
diff --git a/gcc/config/mcore/mcore.md b/gcc/config/mcore/mcore.md
index c0568d338d5..9ac68c40644 100644
--- a/gcc/config/mcore/mcore.md
+++ b/gcc/config/mcore/mcore.md
@@ -1288,7 +1288,7 @@
}")
(define_insn "movdi_i"
- [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,a,r,m")
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,a,r,m")
(match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))]
""
"* return mcore_output_movedouble (operands, DImode);"
@@ -1307,7 +1307,7 @@
}")
(define_insn "movsf_i"
- [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
(match_operand:SF 1 "general_operand" "r,m,r"))]
""
"@
@@ -1329,7 +1329,7 @@
}")
(define_insn "movdf_k"
- [(set (match_operand:DF 0 "general_operand" "=r,r,m")
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
(match_operand:DF 1 "general_operand" "r,m,r"))]
""
"* return mcore_output_movedouble (operands, DFmode);"
diff --git a/gcc/config/microblaze/microblaze.h b/gcc/config/microblaze/microblaze.h
index eb8e45ce17b..dcca4ac9f5f 100644
--- a/gcc/config/microblaze/microblaze.h
+++ b/gcc/config/microblaze/microblaze.h
@@ -193,7 +193,6 @@ extern enum pipeline_type microblaze_pipe;
#define BITS_BIG_ENDIAN 0
#define BYTES_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
-#define BITS_PER_UNIT 8
#define BITS_PER_WORD 32
#define UNITS_PER_WORD 4
#define MIN_UNITS_PER_WORD 4
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 36ba6df7a4c..e65dc6bda6a 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -15071,15 +15071,15 @@ r10k_insert_cache_barriers (void)
/* Bit X of PROTECTED_BBS is set if the last operation in basic block
X is protected by a cache barrier. */
- protected_bbs = sbitmap_alloc (last_basic_block);
+ protected_bbs = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (protected_bbs);
/* Iterate over the basic blocks in reverse post-order. */
- rev_post_order = XNEWVEC (int, last_basic_block);
+ rev_post_order = XNEWVEC (int, last_basic_block_for_fn (cfun));
n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
for (i = 0; i < n; i++)
{
- bb = BASIC_BLOCK (rev_post_order[i]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, rev_post_order[i]);
/* If this block is only reached by unconditional edges, and if the
source of every edge is protected, the beginning of the block is
@@ -15332,7 +15332,7 @@ mips_annotate_pic_calls (void)
basic_block bb;
rtx insn;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
rtx call, reg, symbol, second_call;
diff --git a/gcc/config/moxie/moxie.md b/gcc/config/moxie/moxie.md
index 92f65c19c8a..2e6a699e941 100644
--- a/gcc/config/moxie/moxie.md
+++ b/gcc/config/moxie/moxie.md
@@ -223,7 +223,7 @@
}")
(define_insn "*movsi"
- [(set (match_operand:SI 0 "general_operand" "=r,r,r,W,A,r,r,B,r")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,W,A,r,r,B,r")
(match_operand:SI 1 "moxie_general_movsrc_operand" "O,r,i,r,r,W,A,r,B"))]
"register_operand (operands[0], SImode)
|| register_operand (operands[1], SImode)"
@@ -251,7 +251,7 @@
}")
(define_insn "*movqi"
- [(set (match_operand:QI 0 "general_operand" "=r,r,r,W,A,r,r,B,r")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,W,A,r,r,B,r")
(match_operand:QI 1 "moxie_general_movsrc_operand" "O,r,i,r,r,W,A,r,B"))]
"register_operand (operands[0], QImode)
|| register_operand (operands[1], QImode)"
@@ -279,7 +279,7 @@
}")
(define_insn "*movhi"
- [(set (match_operand:HI 0 "general_operand" "=r,r,r,W,A,r,r,B,r")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,W,A,r,r,B,r")
(match_operand:HI 1 "moxie_general_movsrc_operand" "O,r,i,r,r,W,A,r,B"))]
"(register_operand (operands[0], HImode)
|| register_operand (operands[1], HImode))"
diff --git a/gcc/config/moxie/uclinux.h b/gcc/config/moxie/uclinux.h
index fb8c92542b5..a29d38075c8 100644
--- a/gcc/config/moxie/uclinux.h
+++ b/gcc/config/moxie/uclinux.h
@@ -32,3 +32,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#undef TARGET_LIBC_HAS_FUNCTION
#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
+
+/* Like the definition in gcc.c, but for purposes of uClinux, every link is
+ static. */
+#define MFWRAP_SPEC " %{fmudflap|fmudflapth: \
+ --wrap=malloc --wrap=free --wrap=calloc --wrap=realloc\
+ --wrap=mmap --wrap=munmap --wrap=alloca\
+ %{fmudflapth: --wrap=pthread_create\
+}} %{fmudflap|fmudflapth: --wrap=main}"
diff --git a/gcc/config/msp430/msp430.c b/gcc/config/msp430/msp430.c
index e3f6712596a..b2d8953c0da 100644
--- a/gcc/config/msp430/msp430.c
+++ b/gcc/config/msp430/msp430.c
@@ -188,7 +188,7 @@ msp430_mcu_name (void)
mcu_name[i] = TOUPPER (mcu_name[i]);
return mcu_name;
}
-
+
return msp430x ? "__MSP430XGENERIC__" : "__MSP430GENERIC__";
}
@@ -966,6 +966,12 @@ msp430_is_interrupt_func (void)
return is_attr_func ("interrupt");
}
+static bool
+is_wakeup_func (void)
+{
+ return msp430_is_interrupt_func () && is_attr_func ("wakeup");
+}
+
static inline bool
is_naked_func (void)
{
@@ -1005,6 +1011,8 @@ msp430_start_function (FILE *outfile, HOST_WIDE_INT hwi_local ATTRIBUTE_UNUSED)
fprintf (outfile, "reentrant ");
if (is_critical_func ())
fprintf (outfile, "critical ");
+ if (is_wakeup_func ())
+ fprintf (outfile, "wakeup ");
fprintf (outfile, "\n");
}
@@ -1131,6 +1139,7 @@ const struct attribute_spec msp430_attribute_table[] =
{ "naked", 0, 0, true, false, false, msp430_attr, false },
{ "reentrant", 0, 0, true, false, false, msp430_attr, false },
{ "critical", 0, 0, true, false, false, msp430_attr, false },
+ { "wakeup", 0, 0, true, false, false, msp430_attr, false },
{ NULL, 0, 0, false, false, false, NULL, false }
};
@@ -1409,6 +1418,14 @@ msp430_expand_epilogue (int is_eh)
emit_insn (gen_epilogue_start_marker ());
+ if (is_wakeup_func ())
+ /* Clear the SCG1, SCG0, OSCOFF and CPUOFF bits in the saved copy of the
+ status register current residing on the stack. When this function
+ executes its RETI instruction the SR will be updated with this saved
+ value, thus ensuring that the processor is woken up from any low power
+ state in which it may be residing. */
+ emit_insn (gen_bic_SR (GEN_INT (0xf0)));
+
fs = cfun->machine->framesize_locals + cfun->machine->framesize_outgoing;
increment_stack (fs);
@@ -1828,7 +1845,7 @@ msp430_output_labelref (FILE *file, const char *name)
static void
msp430_print_operand_raw (FILE * file, rtx op)
{
- int i;
+ HOST_WIDE_INT i;
switch (GET_CODE (op))
{
@@ -1839,9 +1856,9 @@ msp430_print_operand_raw (FILE * file, rtx op)
case CONST_INT:
i = INTVAL (op);
if (TARGET_ASM_HEX)
- fprintf (file, "%#x", i);
+ fprintf (file, "%#" HOST_WIDE_INT_PRINT "x", i);
else
- fprintf (file, "%d", i);
+ fprintf (file, "%" HOST_WIDE_INT_PRINT "d", i);
break;
case CONST:
diff --git a/gcc/config/msp430/msp430.md b/gcc/config/msp430/msp430.md
index 22a3953bf97..21720a47c11 100644
--- a/gcc/config/msp430/msp430.md
+++ b/gcc/config/msp430/msp430.md
@@ -362,9 +362,9 @@
; so that gcc knows when it can and can't optimize away the two
; halves.
(define_split
- [(set (match_operand:SI 0 "msp430_nonsubreg_operand" "=&rm")
- (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
- (match_operand:SI 2 "general_operand" "rmi")))
+ [(set (match_operand:SI 0 "msp430_nonsubreg_operand")
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand")
+ (match_operand:SI 2 "general_operand")))
]
""
[(parallel [(set (match_operand:HI 3 "nonimmediate_operand" "=&rm")
@@ -1253,11 +1253,11 @@
"1"
"NOP"
)
-
+
(define_insn "disable_interrupts"
[(unspec_volatile [(const_int 0)] UNS_DINT)]
""
- "DINT"
+ "DINT \; NOP"
)
(define_insn "enable_interrupts"
diff --git a/gcc/config/pdp11/predicates.md b/gcc/config/pdp11/predicates.md
index 7ae9ee4f03d..e3b205187ae 100644
--- a/gcc/config/pdp11/predicates.md
+++ b/gcc/config/pdp11/predicates.md
@@ -42,7 +42,7 @@
(ior
(match_test "REGNO_REG_CLASS (REGNO (op)) == LOAD_FPU_REGS")
(match_test "REGNO_REG_CLASS (REGNO (op)) == NO_LOAD_FPU_REGS"))
- (match_test "general_operand (op, mode)")))
+ (match_operand 0 "general_operand")))
;; Accept anything nonimmediate_operand accepts, except that registers must
;; be FPU registers.
@@ -51,4 +51,4 @@
(ior
(match_test "REGNO_REG_CLASS (REGNO (op)) == LOAD_FPU_REGS")
(match_test "REGNO_REG_CLASS (REGNO (op)) == NO_LOAD_FPU_REGS"))
- (match_test "nonimmediate_operand (op, mode)")))
+ (match_operand 0 "nonimmediate_operand")))
diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c
index 4756cb78b72..8861ffc7706 100644
--- a/gcc/config/picochip/picochip.c
+++ b/gcc/config/picochip/picochip.c
@@ -3174,7 +3174,7 @@ reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next, last_insn = NULL_RTX;
rtx queue = NULL_RTX;
diff --git a/gcc/config/picochip/picochip.h b/gcc/config/picochip/picochip.h
index 13414c6cc9c..3621f3d1b1c 100644
--- a/gcc/config/picochip/picochip.h
+++ b/gcc/config/picochip/picochip.h
@@ -92,8 +92,6 @@ extern enum picochip_dfa_type picochip_schedule_type;
#define BYTES_BIG_ENDIAN 0
#define WORDS_BIG_ENDIAN 0
-#define BITS_PER_UNIT 8
-
#define BITS_PER_WORD 16
#define UNITS_PER_WORD (BITS_PER_WORD / BITS_PER_UNIT)
diff --git a/gcc/config/rs6000/linux.h b/gcc/config/rs6000/linux.h
index f7df111b57e..694367030df 100644
--- a/gcc/config/rs6000/linux.h
+++ b/gcc/config/rs6000/linux.h
@@ -39,7 +39,7 @@
/* Determine what functions are present at the runtime;
this includes full c99 runtime and sincos. */
#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION linux_android_libc_has_function
+#define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function
#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h
index 66b483ec116..1870c327da0 100644
--- a/gcc/config/rs6000/linux64.h
+++ b/gcc/config/rs6000/linux64.h
@@ -312,7 +312,7 @@ extern int dot_symbols;
/* Determine what functions are present at the runtime;
this includes full c99 runtime and sincos. */
#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION linux_android_libc_has_function
+#define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function
#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 00143a7ed74..542476453e3 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -16387,7 +16387,7 @@ rs6000_alloc_sdmode_stack_slot (void)
if (TARGET_NO_SDMODE_STACK)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 089a229ab4e..7038fb7bf7f 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -2379,7 +2379,7 @@
;; Non-power7/cell, fall back to use lwbrx/stwbrx
(define_insn "*bswapdi2_64bit"
- [(set (match_operand:DI 0 "reg_or_mem_operand" "=&r,Z,??&r")
+ [(set (match_operand:DI 0 "reg_or_mem_operand" "=&r,Z,&r")
(bswap:DI (match_operand:DI 1 "reg_or_mem_operand" "Z,r,r")))
(clobber (match_scratch:DI 2 "=&b,&b,&r"))
(clobber (match_scratch:DI 3 "=&r,&r,&r"))
@@ -2544,7 +2544,7 @@
}")
(define_insn "bswapdi2_32bit"
- [(set (match_operand:DI 0 "reg_or_mem_operand" "=&r,Z,??&r")
+ [(set (match_operand:DI 0 "reg_or_mem_operand" "=&r,Z,&r")
(bswap:DI (match_operand:DI 1 "reg_or_mem_operand" "Z,r,r")))
(clobber (match_scratch:SI 2 "=&b,&b,X"))]
"!TARGET_POWERPC64 && (REG_P (operands[0]) || REG_P (operands[1]))"
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index a435b2dcbdc..f9b7cd0f741 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -7458,7 +7458,7 @@ s390_regs_ever_clobbered (char regs_ever_clobbered[])
if (!call_really_used_regs[i])
regs_ever_clobbered[i] = 1;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
FOR_BB_INSNS (cur_bb, cur_insn)
{
@@ -7982,7 +7982,7 @@ s390_optimize_nonescaping_tx (void)
for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
{
- bb = BASIC_BLOCK (bb_index);
+ bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
if (!bb)
continue;
diff --git a/gcc/config/score/score.c b/gcc/config/score/score.c
index 3fdf2ea9050..30b49edb8c3 100644
--- a/gcc/config/score/score.c
+++ b/gcc/config/score/score.c
@@ -516,30 +516,6 @@ score_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
reload_completed = 0;
}
-/* Copy VALUE to a register and return that register. If new psuedos
- are allowed, copy it into a new register, otherwise use DEST. */
-static rtx
-score_force_temporary (rtx dest, rtx value)
-{
- if (can_create_pseudo_p ())
- return force_reg (Pmode, value);
- else
- {
- emit_move_insn (copy_rtx (dest), value);
- return dest;
- }
-}
-
-/* Return a LO_SUM expression for ADDR. TEMP is as for score_force_temporary
- and is used to load the high part into a register. */
-static rtx
-score_split_symbol (rtx temp, rtx addr)
-{
- rtx high = score_force_temporary (temp,
- gen_rtx_HIGH (Pmode, copy_rtx (addr)));
- return gen_rtx_LO_SUM (Pmode, high, addr);
-}
-
/* Fill INFO with information about a single argument. CUM is the
cumulative state for earlier arguments. MODE is the mode of this
argument and TYPE is its type (if known). NAMED is true if this
diff --git a/gcc/config/score/score.h b/gcc/config/score/score.h
index 5ab78752c39..ca73401fc59 100644
--- a/gcc/config/score/score.h
+++ b/gcc/config/score/score.h
@@ -755,13 +755,15 @@ typedef struct score_args
/* Output of Dispatch Tables. */
/* This is how to output an element of a case-vector. We can make the
entries PC-relative in GP-relative when .gp(d)word is supported. */
-#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
- do { \
- if (TARGET_SCORE7) \
- if (flag_pic) \
- fprintf (STREAM, "\t.gpword %sL%d\n", LOCAL_LABEL_PREFIX, VALUE); \
- else \
- fprintf (STREAM, "\t.word %sL%d\n", LOCAL_LABEL_PREFIX, VALUE); \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do { \
+ if (TARGET_SCORE7) \
+ { \
+ if (flag_pic) \
+ fprintf (STREAM, "\t.gpword %sL%d\n", LOCAL_LABEL_PREFIX, VALUE); \
+ else \
+ fprintf (STREAM, "\t.word %sL%d\n", LOCAL_LABEL_PREFIX, VALUE); \
+ } \
} while (0)
/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index 3e907b24a9d..864b04e9b0f 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -771,6 +771,11 @@ sh_option_override (void)
SUBTARGET_OVERRIDE_OPTIONS;
if (optimize > 1 && !optimize_size)
target_flags |= MASK_SAVE_ALL_TARGET_REGS;
+
+ /* Set default values of TARGET_CBRANCHDI4 and TARGET_CMPEQDI_T. */
+ TARGET_CBRANCHDI4 = 1;
+ TARGET_CMPEQDI_T = 0;
+
sh_cpu = PROCESSOR_SH1;
assembler_dialect = 0;
if (TARGET_SH2)
@@ -11110,7 +11115,7 @@ sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
r0_life_regions = 0;
- FOR_EACH_BB_REVERSE (b)
+ FOR_EACH_BB_REVERSE_FN (b, cfun)
{
find_regmode_weight (b, SImode);
find_regmode_weight (b, SFmode);
diff --git a/gcc/config/sh/sh.opt b/gcc/config/sh/sh.opt
index 8a6788eb3d1..2a782c0e596 100644
--- a/gcc/config/sh/sh.opt
+++ b/gcc/config/sh/sh.opt
@@ -233,11 +233,11 @@ Target Var(TARGET_ZDCBRANCH)
Assume that zero displacement conditional branches are fast
mcbranchdi
-Target Var(TARGET_CBRANCHDI4)
+Target Undocumented Var(TARGET_CBRANCHDI4) Warn(%qs is deprecated and has no effect)
Enable cbranchdi4 pattern
mcmpeqdi
-Target Var(TARGET_CMPEQDI_T)
+Target Undocumented Var(TARGET_CMPEQDI_T) Warn(%qs is deprecated and has no effect)
Emit cmpeqdi_t pattern even when -mcbranchdi is in effect.
mcut2-workaround
diff --git a/gcc/config/sh/sh_optimize_sett_clrt.cc b/gcc/config/sh/sh_optimize_sett_clrt.cc
index fc58bf9efdf..8d40ce1270b 100644
--- a/gcc/config/sh/sh_optimize_sett_clrt.cc
+++ b/gcc/config/sh/sh_optimize_sett_clrt.cc
@@ -206,7 +206,7 @@ sh_optimize_sett_clrt::execute (void)
// Look for insns that set the ccreg to a constant value and see if it can
// be optimized.
basic_block bb;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
for (rtx next_i, i = NEXT_INSN (BB_HEAD (bb));
i != NULL_RTX && i != BB_END (bb); i = next_i)
{
diff --git a/gcc/config/sh/sh_treg_combine.cc b/gcc/config/sh/sh_treg_combine.cc
index 0f9027ec763..fc4a1c03bce 100644
--- a/gcc/config/sh/sh_treg_combine.cc
+++ b/gcc/config/sh/sh_treg_combine.cc
@@ -1469,7 +1469,7 @@ sh_treg_combine::execute (void)
// Look for basic blocks that end with a conditional branch and try to
// optimize them.
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx i = BB_END (bb);
if (any_condjump_p (i) && onlyjump_p (i))
diff --git a/gcc/config/sparc/sol2.h b/gcc/config/sparc/sol2.h
index 4010939df6e..c9a3f43fd40 100644
--- a/gcc/config/sparc/sol2.h
+++ b/gcc/config/sparc/sol2.h
@@ -406,3 +406,6 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
#undef SUN_INTEGER_MULTIPLY_64
#define SUN_INTEGER_MULTIPLY_64 1
+
+#undef SPARC_LOW_FE_EXCEPT_VALUES
+#define SPARC_LOW_FE_EXCEPT_VALUES 1
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index abca70a47b0..ec66b4f7f53 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -571,11 +571,11 @@ static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
static bool sparc_function_ok_for_sibcall (tree, tree);
static void sparc_init_libfuncs (void);
static void sparc_init_builtins (void);
+static void sparc_fpu_init_builtins (void);
static void sparc_vis_init_builtins (void);
+static tree sparc_builtin_decl (unsigned, bool);
static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static tree sparc_fold_builtin (tree, int, tree *, bool);
-static int sparc_vis_mul8x16 (int, int);
-static void sparc_handle_vis_mul8x16 (tree *, int, tree, tree, tree);
static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
@@ -639,6 +639,7 @@ static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
enum machine_mode,
secondary_reload_info *);
static enum machine_mode sparc_cstore_mode (enum insn_code icode);
+static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
#ifdef SUBTARGET_ATTRIBUTE_TABLE
/* Table of valid machine attributes. */
@@ -694,8 +695,6 @@ char sparc_hard_reg_printed[8];
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
-#undef TARGET_INIT_BUILTINS
-#define TARGET_INIT_BUILTINS sparc_init_builtins
#undef TARGET_LEGITIMIZE_ADDRESS
#define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
@@ -704,6 +703,10 @@ char sparc_hard_reg_printed[8];
#undef TARGET_MODE_DEPENDENT_ADDRESS_P
#define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS sparc_init_builtins
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL sparc_builtin_decl
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN sparc_expand_builtin
#undef TARGET_FOLD_BUILTIN
@@ -844,6 +847,9 @@ char sparc_hard_reg_printed[8];
#undef TARGET_CSTORE_MODE
#define TARGET_CSTORE_MODE sparc_cstore_mode
+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
+
struct gcc_target targetm = TARGET_INITIALIZER;
/* Return the memory reference contained in X if any, zero otherwise. */
@@ -9238,14 +9244,14 @@ sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
/* On UltraSPARC a flush flushes an entire cache line. The trampoline is
aligned on a 16 byte boundary so one flush clears it all. */
- emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
+ emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
if (sparc_cpu != PROCESSOR_ULTRASPARC
&& sparc_cpu != PROCESSOR_ULTRASPARC3
&& sparc_cpu != PROCESSOR_NIAGARA
&& sparc_cpu != PROCESSOR_NIAGARA2
&& sparc_cpu != PROCESSOR_NIAGARA3
&& sparc_cpu != PROCESSOR_NIAGARA4)
- emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
+ emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
/* Call __enable_execute_stack after writing onto the stack to make sure
the stack address is accessible. */
@@ -9970,15 +9976,145 @@ sparc_init_libfuncs (void)
}
}
-static tree def_builtin(const char *name, int code, tree type)
+/* SPARC builtins. */
+enum sparc_builtins
+{
+ /* FPU builtins. */
+ SPARC_BUILTIN_LDFSR,
+ SPARC_BUILTIN_STFSR,
+
+ /* VIS 1.0 builtins. */
+ SPARC_BUILTIN_FPACK16,
+ SPARC_BUILTIN_FPACK32,
+ SPARC_BUILTIN_FPACKFIX,
+ SPARC_BUILTIN_FEXPAND,
+ SPARC_BUILTIN_FPMERGE,
+ SPARC_BUILTIN_FMUL8X16,
+ SPARC_BUILTIN_FMUL8X16AU,
+ SPARC_BUILTIN_FMUL8X16AL,
+ SPARC_BUILTIN_FMUL8SUX16,
+ SPARC_BUILTIN_FMUL8ULX16,
+ SPARC_BUILTIN_FMULD8SUX16,
+ SPARC_BUILTIN_FMULD8ULX16,
+ SPARC_BUILTIN_FALIGNDATAV4HI,
+ SPARC_BUILTIN_FALIGNDATAV8QI,
+ SPARC_BUILTIN_FALIGNDATAV2SI,
+ SPARC_BUILTIN_FALIGNDATADI,
+ SPARC_BUILTIN_WRGSR,
+ SPARC_BUILTIN_RDGSR,
+ SPARC_BUILTIN_ALIGNADDR,
+ SPARC_BUILTIN_ALIGNADDRL,
+ SPARC_BUILTIN_PDIST,
+ SPARC_BUILTIN_EDGE8,
+ SPARC_BUILTIN_EDGE8L,
+ SPARC_BUILTIN_EDGE16,
+ SPARC_BUILTIN_EDGE16L,
+ SPARC_BUILTIN_EDGE32,
+ SPARC_BUILTIN_EDGE32L,
+ SPARC_BUILTIN_FCMPLE16,
+ SPARC_BUILTIN_FCMPLE32,
+ SPARC_BUILTIN_FCMPNE16,
+ SPARC_BUILTIN_FCMPNE32,
+ SPARC_BUILTIN_FCMPGT16,
+ SPARC_BUILTIN_FCMPGT32,
+ SPARC_BUILTIN_FCMPEQ16,
+ SPARC_BUILTIN_FCMPEQ32,
+ SPARC_BUILTIN_FPADD16,
+ SPARC_BUILTIN_FPADD16S,
+ SPARC_BUILTIN_FPADD32,
+ SPARC_BUILTIN_FPADD32S,
+ SPARC_BUILTIN_FPSUB16,
+ SPARC_BUILTIN_FPSUB16S,
+ SPARC_BUILTIN_FPSUB32,
+ SPARC_BUILTIN_FPSUB32S,
+ SPARC_BUILTIN_ARRAY8,
+ SPARC_BUILTIN_ARRAY16,
+ SPARC_BUILTIN_ARRAY32,
+
+ /* VIS 2.0 builtins. */
+ SPARC_BUILTIN_EDGE8N,
+ SPARC_BUILTIN_EDGE8LN,
+ SPARC_BUILTIN_EDGE16N,
+ SPARC_BUILTIN_EDGE16LN,
+ SPARC_BUILTIN_EDGE32N,
+ SPARC_BUILTIN_EDGE32LN,
+ SPARC_BUILTIN_BMASK,
+ SPARC_BUILTIN_BSHUFFLEV4HI,
+ SPARC_BUILTIN_BSHUFFLEV8QI,
+ SPARC_BUILTIN_BSHUFFLEV2SI,
+ SPARC_BUILTIN_BSHUFFLEDI,
+
+ /* VIS 3.0 builtins. */
+ SPARC_BUILTIN_CMASK8,
+ SPARC_BUILTIN_CMASK16,
+ SPARC_BUILTIN_CMASK32,
+ SPARC_BUILTIN_FCHKSM16,
+ SPARC_BUILTIN_FSLL16,
+ SPARC_BUILTIN_FSLAS16,
+ SPARC_BUILTIN_FSRL16,
+ SPARC_BUILTIN_FSRA16,
+ SPARC_BUILTIN_FSLL32,
+ SPARC_BUILTIN_FSLAS32,
+ SPARC_BUILTIN_FSRL32,
+ SPARC_BUILTIN_FSRA32,
+ SPARC_BUILTIN_PDISTN,
+ SPARC_BUILTIN_FMEAN16,
+ SPARC_BUILTIN_FPADD64,
+ SPARC_BUILTIN_FPSUB64,
+ SPARC_BUILTIN_FPADDS16,
+ SPARC_BUILTIN_FPADDS16S,
+ SPARC_BUILTIN_FPSUBS16,
+ SPARC_BUILTIN_FPSUBS16S,
+ SPARC_BUILTIN_FPADDS32,
+ SPARC_BUILTIN_FPADDS32S,
+ SPARC_BUILTIN_FPSUBS32,
+ SPARC_BUILTIN_FPSUBS32S,
+ SPARC_BUILTIN_FUCMPLE8,
+ SPARC_BUILTIN_FUCMPNE8,
+ SPARC_BUILTIN_FUCMPGT8,
+ SPARC_BUILTIN_FUCMPEQ8,
+ SPARC_BUILTIN_FHADDS,
+ SPARC_BUILTIN_FHADDD,
+ SPARC_BUILTIN_FHSUBS,
+ SPARC_BUILTIN_FHSUBD,
+ SPARC_BUILTIN_FNHADDS,
+ SPARC_BUILTIN_FNHADDD,
+ SPARC_BUILTIN_UMULXHI,
+ SPARC_BUILTIN_XMULX,
+ SPARC_BUILTIN_XMULXHI,
+
+ SPARC_BUILTIN_MAX
+};
+
+static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
+static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
+
+/* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
+ function decl or NULL_TREE if the builtin was not added. */
+
+static tree
+def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
+ tree type)
{
- return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
- NULL_TREE);
+ tree t
+ = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
+
+ if (t)
+ {
+ sparc_builtins[code] = t;
+ sparc_builtins_icode[code] = icode;
+ }
+
+ return t;
}
-static tree def_builtin_const(const char *name, int code, tree type)
+/* Likewise, but also marks the function as "const". */
+
+static tree
+def_builtin_const (const char *name, enum insn_code icode,
+ enum sparc_builtins code, tree type)
{
- tree t = def_builtin(name, code, type);
+ tree t = def_builtin (name, icode, code, type);
if (t)
TREE_READONLY (t) = 1;
@@ -9992,11 +10128,28 @@ static tree def_builtin_const(const char *name, int code, tree type)
static void
sparc_init_builtins (void)
{
+ if (TARGET_FPU)
+ sparc_fpu_init_builtins ();
+
if (TARGET_VIS)
sparc_vis_init_builtins ();
}
-/* Create builtin functions for VIS 1.0 instructions. */
+/* Create builtin functions for FPU instructions. */
+
+static void
+sparc_fpu_init_builtins (void)
+{
+ tree ftype
+ = build_function_type_list (void_type_node,
+ build_pointer_type (unsigned_type_node), 0);
+ def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
+ SPARC_BUILTIN_LDFSR, ftype);
+ def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
+ SPARC_BUILTIN_STFSR, ftype);
+}
+
+/* Create builtin functions for VIS instructions. */
static void
sparc_vis_init_builtins (void)
@@ -10070,223 +10223,225 @@ sparc_vis_init_builtins (void)
/* Packing and expanding vectors. */
def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
- v4qi_ftype_v4hi);
+ SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
- v8qi_ftype_v2si_v8qi);
+ SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
- v2hi_ftype_v2si);
+ SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
- v4hi_ftype_v4qi);
+ SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
- v8qi_ftype_v4qi_v4qi);
+ SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
/* Multiplications. */
def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
- v4hi_ftype_v4qi_v4hi);
+ SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
- v4hi_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
- v4hi_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
- v4hi_ftype_v8qi_v4hi);
+ SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
- v4hi_ftype_v8qi_v4hi);
+ SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
- v2si_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
- v2si_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
/* Data aligning. */
def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
- v8qi_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
- void_ftype_di);
+ SPARC_BUILTIN_WRGSR, void_ftype_di);
def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
- di_ftype_void);
+ SPARC_BUILTIN_RDGSR, di_ftype_void);
if (TARGET_ARCH64)
{
def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
- ptr_ftype_ptr_di);
+ SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
- ptr_ftype_ptr_di);
+ SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
}
else
{
def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
- ptr_ftype_ptr_si);
+ SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
- ptr_ftype_ptr_si);
+ SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
}
/* Pixel distance. */
def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
- di_ftype_v8qi_v8qi_di);
+ SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
/* Edge handling. */
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
- di_ftype_ptr_ptr);
- if (TARGET_VIS2)
- {
- def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
- di_ftype_ptr_ptr);
- }
+ SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
}
else
{
def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
- si_ftype_ptr_ptr);
- if (TARGET_VIS2)
- {
- def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
- si_ftype_ptr_ptr);
- }
+ SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
}
/* Pixel compare. */
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
}
else
{
def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
}
/* Addition and subtraction. */
def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
/* Three-dimensional array addressing. */
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
}
else
{
def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
- si_ftype_si_si);
+ SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
- si_ftype_si_si);
+ SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
- si_ftype_si_si);
- }
+ SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
+ }
if (TARGET_VIS2)
{
- /* Byte mask and shuffle */
+ /* Edge handling. */
+ if (TARGET_ARCH64)
+ {
+ def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
+ SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
+ SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
+ SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
+ SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
+ SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
+ SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
+ }
+ else
+ {
+ def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
+ SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
+ SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
+ SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
+ SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
+ SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
+ SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
+ }
+
+ /* Byte mask and shuffle. */
if (TARGET_ARCH64)
def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_BMASK, di_ftype_di_di);
else
def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
- si_ftype_si_si);
+ SPARC_BUILTIN_BMASK, si_ftype_si_si);
def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
- v8qi_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
}
if (TARGET_VIS3)
@@ -10294,120 +10449,130 @@ sparc_vis_init_builtins (void)
if (TARGET_ARCH64)
{
def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
- void_ftype_di);
+ SPARC_BUILTIN_CMASK8, void_ftype_di);
def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
- void_ftype_di);
+ SPARC_BUILTIN_CMASK16, void_ftype_di);
def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
- void_ftype_di);
+ SPARC_BUILTIN_CMASK32, void_ftype_di);
}
else
{
def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
- void_ftype_si);
+ SPARC_BUILTIN_CMASK8, void_ftype_si);
def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
- void_ftype_si);
+ SPARC_BUILTIN_CMASK16, void_ftype_si);
def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
- void_ftype_si);
+ SPARC_BUILTIN_CMASK32, void_ftype_si);
}
def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
if (TARGET_ARCH64)
def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
else
def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_FPADD64, di_ftype_di_di);
def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
}
else
{
def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
}
def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
- sf_ftype_sf_sf);
+ SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
- df_ftype_df_df);
+ SPARC_BUILTIN_FHADDD, df_ftype_df_df);
def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
- sf_ftype_sf_sf);
+ SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
- df_ftype_df_df);
+ SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
- sf_ftype_sf_sf);
+ SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
- df_ftype_df_df);
+ SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_XMULX, di_ftype_di_di);
def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
}
}
-/* Handle TARGET_EXPAND_BUILTIN target hook.
- Expand builtin functions for sparc intrinsics. */
+/* Implement TARGET_BUILTIN_DECL hook. */
+
+static tree
+sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= SPARC_BUILTIN_MAX)
+ return error_mark_node;
+
+ return sparc_builtins[code];
+}
+
+/* Implemented TARGET_EXPAND_BUILTIN hook. */
static rtx
sparc_expand_builtin (tree exp, rtx target,
@@ -10415,15 +10580,14 @@ sparc_expand_builtin (tree exp, rtx target,
enum machine_mode tmode ATTRIBUTE_UNUSED,
int ignore ATTRIBUTE_UNUSED)
{
- tree arg;
- call_expr_arg_iterator iter;
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- unsigned int icode = DECL_FUNCTION_CODE (fndecl);
- rtx pat, op[4];
+ enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
+ enum insn_code icode = sparc_builtins_icode[code];
+ bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
+ call_expr_arg_iterator iter;
int arg_count = 0;
- bool nonvoid;
-
- nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
+ rtx pat, op[4];
+ tree arg;
if (nonvoid)
{
@@ -10435,6 +10599,7 @@ sparc_expand_builtin (tree exp, rtx target,
else
op[0] = target;
}
+
FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
{
const struct insn_operand_data *insn_op;
@@ -10448,11 +10613,22 @@ sparc_expand_builtin (tree exp, rtx target,
insn_op = &insn_data[icode].operand[idx];
op[arg_count] = expand_normal (arg);
- if (insn_op->mode == V1DImode
- && GET_MODE (op[arg_count]) == DImode)
+ if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
+ {
+ if (!address_operand (op[arg_count], SImode))
+ {
+ op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
+ op[arg_count] = copy_addr_to_reg (op[arg_count]);
+ }
+ op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
+ }
+
+ else if (insn_op->mode == V1DImode
+ && GET_MODE (op[arg_count]) == DImode)
op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
+
else if (insn_op->mode == V1SImode
- && GET_MODE (op[arg_count]) == SImode)
+ && GET_MODE (op[arg_count]) == SImode)
op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
@@ -10486,12 +10662,11 @@ sparc_expand_builtin (tree exp, rtx target,
emit_insn (pat);
- if (nonvoid)
- return op[0];
- else
- return const0_rtx;
+ return (nonvoid ? op[0] : const0_rtx);
}
+/* Return the upper 16 bits of the 8x16 multiplication. */
+
static int
sparc_vis_mul8x16 (int e8, int e16)
{
@@ -10502,15 +10677,15 @@ sparc_vis_mul8x16 (int e8, int e16)
the result into the array N_ELTS, whose elements are of INNER_TYPE. */
static void
-sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
- tree cst0, tree cst1)
+sparc_handle_vis_mul8x16 (tree *n_elts, enum sparc_builtins fncode,
+ tree inner_type, tree cst0, tree cst1)
{
unsigned i, num = VECTOR_CST_NELTS (cst0);
int scale;
switch (fncode)
{
- case CODE_FOR_fmul8x16_vis:
+ case SPARC_BUILTIN_FMUL8X16:
for (i = 0; i < num; ++i)
{
int val
@@ -10520,7 +10695,7 @@ sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
}
break;
- case CODE_FOR_fmul8x16au_vis:
+ case SPARC_BUILTIN_FMUL8X16AU:
scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
for (i = 0; i < num; ++i)
@@ -10532,7 +10707,7 @@ sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
}
break;
- case CODE_FOR_fmul8x16al_vis:
+ case SPARC_BUILTIN_FMUL8X16AL:
scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
for (i = 0; i < num; ++i)
@@ -10549,7 +10724,8 @@ sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
}
}
-/* Handle TARGET_FOLD_BUILTIN target hook.
+/* Implement TARGET_FOLD_BUILTIN hook.
+
Fold builtin functions for SPARC intrinsics. If IGNORE is true the
result of the function call is ignored. NULL_TREE is returned if the
function could not be folded. */
@@ -10558,34 +10734,30 @@ static tree
sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
tree *args, bool ignore)
{
- tree arg0, arg1, arg2;
+ enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
- enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1, arg2;
if (ignore)
- {
- /* Note that a switch statement instead of the sequence of tests would
- be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
- and that would yield multiple alternatives with identical values. */
- if (icode == CODE_FOR_alignaddrsi_vis
- || icode == CODE_FOR_alignaddrdi_vis
- || icode == CODE_FOR_wrgsr_vis
- || icode == CODE_FOR_bmasksi_vis
- || icode == CODE_FOR_bmaskdi_vis
- || icode == CODE_FOR_cmask8si_vis
- || icode == CODE_FOR_cmask8di_vis
- || icode == CODE_FOR_cmask16si_vis
- || icode == CODE_FOR_cmask16di_vis
- || icode == CODE_FOR_cmask32si_vis
- || icode == CODE_FOR_cmask32di_vis)
- ;
- else
+ switch (code)
+ {
+ case SPARC_BUILTIN_LDFSR:
+ case SPARC_BUILTIN_STFSR:
+ case SPARC_BUILTIN_ALIGNADDR:
+ case SPARC_BUILTIN_WRGSR:
+ case SPARC_BUILTIN_BMASK:
+ case SPARC_BUILTIN_CMASK8:
+ case SPARC_BUILTIN_CMASK16:
+ case SPARC_BUILTIN_CMASK32:
+ break;
+
+ default:
return build_zero_cst (rtype);
- }
+ }
- switch (icode)
+ switch (code)
{
- case CODE_FOR_fexpand_vis:
+ case SPARC_BUILTIN_FEXPAND:
arg0 = args[0];
STRIP_NOPS (arg0);
@@ -10604,9 +10776,9 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
}
break;
- case CODE_FOR_fmul8x16_vis:
- case CODE_FOR_fmul8x16au_vis:
- case CODE_FOR_fmul8x16al_vis:
+ case SPARC_BUILTIN_FMUL8X16:
+ case SPARC_BUILTIN_FMUL8X16AU:
+ case SPARC_BUILTIN_FMUL8X16AL:
arg0 = args[0];
arg1 = args[1];
STRIP_NOPS (arg0);
@@ -10616,12 +10788,12 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
{
tree inner_type = TREE_TYPE (rtype);
tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
- sparc_handle_vis_mul8x16 (n_elts, icode, inner_type, arg0, arg1);
+ sparc_handle_vis_mul8x16 (n_elts, code, inner_type, arg0, arg1);
return build_vector (rtype, n_elts);
}
break;
- case CODE_FOR_fpmerge_vis:
+ case SPARC_BUILTIN_FPMERGE:
arg0 = args[0];
arg1 = args[1];
STRIP_NOPS (arg0);
@@ -10641,13 +10813,19 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
}
break;
- case CODE_FOR_pdist_vis:
+ case SPARC_BUILTIN_PDIST:
+ case SPARC_BUILTIN_PDISTN:
arg0 = args[0];
arg1 = args[1];
- arg2 = args[2];
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
- STRIP_NOPS (arg2);
+ if (code == SPARC_BUILTIN_PDIST)
+ {
+ arg2 = args[2];
+ STRIP_NOPS (arg2);
+ }
+ else
+ arg2 = integer_zero_node;
if (TREE_CODE (arg0) == VECTOR_CST
&& TREE_CODE (arg1) == VECTOR_CST
@@ -12326,9 +12504,106 @@ sparc_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
return true;
}
-static enum machine_mode sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
+/* Implement TARGET_CSTORE_MODE. */
+
+static enum machine_mode
+sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
{
return (TARGET_ARCH64 ? DImode : SImode);
}
+/* Return the compound expression made of T1 and T2. */
+
+static inline tree
+compound_expr (tree t1, tree t2)
+{
+ return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
+}
+
+/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
+
+static void
+sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
+{
+ if (!TARGET_FPU)
+ return;
+
+ const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
+ const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
+
+ /* We generate the equivalent of feholdexcept (&fenv_var):
+
+ unsigned int fenv_var;
+ __builtin_store_fsr (&fenv_var);
+
+ unsigned int tmp1_var;
+ tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
+
+ __builtin_load_fsr (&tmp1_var); */
+
+ tree fenv_var = create_tmp_var (unsigned_type_node, NULL);
+ mark_addressable (fenv_var);
+ tree fenv_addr = build_fold_addr_expr (fenv_var);
+ tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
+ tree hold_stfsr = build_call_expr (stfsr, 1, fenv_addr);
+
+ tree tmp1_var = create_tmp_var (unsigned_type_node, NULL);
+ mark_addressable (tmp1_var);
+ tree masked_fenv_var
+ = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
+ build_int_cst (unsigned_type_node,
+ ~(accrued_exception_mask | trap_enable_mask)));
+ tree hold_mask
+ = build2 (MODIFY_EXPR, void_type_node, tmp1_var, masked_fenv_var);
+
+ tree tmp1_addr = build_fold_addr_expr (tmp1_var);
+ tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
+ tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
+
+ *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
+
+ /* We reload the value of tmp1_var to clear the exceptions:
+
+ __builtin_load_fsr (&tmp1_var); */
+
+ *clear = build_call_expr (ldfsr, 1, tmp1_addr);
+
+ /* We generate the equivalent of feupdateenv (&fenv_var):
+
+ unsigned int tmp2_var;
+ __builtin_store_fsr (&tmp2_var);
+
+ __builtin_load_fsr (&fenv_var);
+
+ if (SPARC_LOW_FE_EXCEPT_VALUES)
+ tmp2_var >>= 5;
+ __atomic_feraiseexcept ((int) tmp2_var); */
+
+ tree tmp2_var = create_tmp_var (unsigned_type_node, NULL);
+ mark_addressable (tmp2_var);
+ tree tmp3_addr = build_fold_addr_expr (tmp2_var);
+ tree update_stfsr = build_call_expr (stfsr, 1, tmp3_addr);
+
+ tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
+
+ tree atomic_feraiseexcept
+ = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
+ tree update_call
+ = build_call_expr (atomic_feraiseexcept, 1,
+ fold_convert (integer_type_node, tmp2_var));
+
+ if (SPARC_LOW_FE_EXCEPT_VALUES)
+ {
+ tree shifted_tmp2_var
+ = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
+ build_int_cst (unsigned_type_node, 5));
+ tree update_shift
+ = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
+ update_call = compound_expr (update_shift, update_call);
+ }
+
+ *update
+ = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
+}
+
#include "gt-sparc.h"
diff --git a/gcc/config/sparc/sparc.h b/gcc/config/sparc/sparc.h
index d96c1b6b422..7533e88491b 100644
--- a/gcc/config/sparc/sparc.h
+++ b/gcc/config/sparc/sparc.h
@@ -1777,3 +1777,6 @@ extern int sparc_indent_opcode;
#ifndef SUBTARGET_DEFAULT_MEMORY_MODEL
#define SUBTARGET_DEFAULT_MEMORY_MODEL SMM_DEFAULT
#endif
+
+/* Define this to 1 if the FE_EXCEPT values defined in fenv.h start at 1. */
+#define SPARC_LOW_FE_EXCEPT_VALUES 0
diff --git a/gcc/config/sparc/sparc.md b/gcc/config/sparc/sparc.md
index be7bbe977a2..b3fb2eb18fb 100644
--- a/gcc/config/sparc/sparc.md
+++ b/gcc/config/sparc/sparc.md
@@ -96,13 +96,19 @@
(define_c_enum "unspecv" [
UNSPECV_BLOCKAGE
+ UNSPECV_PROBE_STACK_RANGE
+
UNSPECV_FLUSHW
- UNSPECV_FLUSH
UNSPECV_SAVEW
- UNSPECV_CAS
- UNSPECV_SWAP
+
+ UNSPECV_FLUSH
+
UNSPECV_LDSTUB
- UNSPECV_PROBE_STACK_RANGE
+ UNSPECV_SWAP
+ UNSPECV_CAS
+
+ UNSPECV_LDFSR
+ UNSPECV_STFSR
])
(define_constants
@@ -6783,22 +6789,26 @@
;; Special pattern for the FLUSH instruction.
-; We do SImode and DImode versions of this to quiet down genrecog's complaints
-; of the define_insn otherwise missing a mode. We make "flush", aka
-; gen_flush, the default one since sparc_initialize_trampoline uses
-; it on SImode mem values.
-
-(define_insn "flush"
- [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] UNSPECV_FLUSH)]
+(define_insn "flush<P:mode>"
+ [(unspec_volatile [(match_operand:P 0 "memory_operand" "m")] UNSPECV_FLUSH)]
""
{ return TARGET_V9 ? "flush\t%f0" : "iflush\t%f0"; }
[(set_attr "type" "iflush")])
-(define_insn "flushdi"
- [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_FLUSH)]
- ""
- { return TARGET_V9 ? "flush\t%f0" : "iflush\t%f0"; }
- [(set_attr "type" "iflush")])
+;; Special insns to load and store the 32-bit FP Status Register.
+
+(define_insn "ldfsr"
+ [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] UNSPECV_LDFSR)]
+ "TARGET_FPU"
+ "ld\t%0, %%fsr"
+ [(set_attr "type" "load")])
+
+(define_insn "stfsr"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_STFSR))]
+ "TARGET_FPU"
+ "st\t%%fsr, %0"
+ [(set_attr "type" "store")])
;; Find first set instructions.
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index 5b8aef179c8..66209b675ad 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -2469,7 +2469,7 @@ spu_machine_dependent_reorg (void)
return;
}
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
in_spu_reorg = 1;
@@ -2490,7 +2490,7 @@ spu_machine_dependent_reorg (void)
for (i = n_basic_blocks_for_fn (cfun) - 1; i >= 0; i--)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
branch = 0;
if (spu_bb_info[i].prop_jump)
{
@@ -2645,7 +2645,7 @@ spu_machine_dependent_reorg (void)
find_many_sub_basic_blocks (blocks);
/* We have to schedule to make sure alignment is ok. */
- FOR_EACH_BB (bb) bb->flags &= ~BB_DISABLE_SCHEDULE;
+ FOR_EACH_BB_FN (bb, cfun) bb->flags &= ~BB_DISABLE_SCHEDULE;
/* The hints need to be scheduled, so call it again. */
schedule_insns ();
diff --git a/gcc/config/spu/spu.h b/gcc/config/spu/spu.h
index 64a2ba06b22..ad4405ae3d9 100644
--- a/gcc/config/spu/spu.h
+++ b/gcc/config/spu/spu.h
@@ -54,8 +54,6 @@ extern GTY(()) int spu_tune;
#define WORDS_BIG_ENDIAN 1
-#define BITS_PER_UNIT 8
-
/* GCC uses word_mode in many places, assuming that it is the fastest
integer mode. That is not the case for SPU though. We can't use
32 here because (of some reason I can't remember.) */
diff --git a/gcc/config/t-linux-android b/gcc/config/t-linux
index 75155f44d7c..7451baf1ddb 100644
--- a/gcc/config/t-linux-android
+++ b/gcc/config/t-linux
@@ -16,6 +16,6 @@
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
-linux-android.o: $(srcdir)/config/linux-android.c
+linux.o: $(srcdir)/config/linux.c
$(COMPILE) $<
$(POSTCOMPILE)
diff --git a/gcc/config/tilegx/tilegx.c b/gcc/config/tilegx/tilegx.c
index c2f9e070a29..eecc9a926c3 100644
--- a/gcc/config/tilegx/tilegx.c
+++ b/gcc/config/tilegx/tilegx.c
@@ -4383,7 +4383,7 @@ static void
tilegx_gen_bundles (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx end = NEXT_INSN (BB_END (bb));
@@ -4709,7 +4709,7 @@ static void
reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx queue = NULL_RTX;
diff --git a/gcc/config/tilegx/tilegx.md b/gcc/config/tilegx/tilegx.md
index 30dc8e628ba..379b305a939 100644
--- a/gcc/config/tilegx/tilegx.md
+++ b/gcc/config/tilegx/tilegx.md
@@ -3284,9 +3284,9 @@
"")
(define_insn "insn_ld_add<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(mem:DI (match_dup 3)))]
""
@@ -3302,9 +3302,9 @@
[(set_attr "type" "X1_2cycle")])
(define_insn "insn_ldna_add<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(mem:DI (and:DI (match_dup 3) (const_int -8))))]
""
@@ -3318,9 +3318,9 @@
"")
(define_insn "insn_ld<I124MODE:n><s>_add<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(any_extend:DI (mem:I124MODE (match_dup 3))))]
""
@@ -3338,9 +3338,9 @@
[(set_attr "type" "X1_2cycle")])
(define_insn "insn_ldnt_add<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(mem:DI (match_dup 3))]
UNSPEC_NON_TEMPORAL))]
@@ -3359,9 +3359,9 @@
[(set_attr "type" "X1_2cycle")])
(define_insn "insn_ldnt<I124MODE:n><s>_add<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(any_extend:DI (unspec:I124MODE [(mem:I124MODE (match_dup 3))]
UNSPEC_NON_TEMPORAL)))]
@@ -3380,9 +3380,9 @@
[(set_attr "type" "Y2_L2")])
(define_insn "insn_ld_add_L2<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(mem:DI (match_dup 3))]
UNSPEC_LATENCY_L2))]
@@ -3400,9 +3400,9 @@
[(set_attr "type" "X1_L2")])
(define_insn "insn_ldna_add_L2<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(mem:DI (and:DI (match_dup 3) (const_int -8)))]
UNSPEC_LATENCY_L2))]
@@ -3421,9 +3421,9 @@
[(set_attr "type" "Y2_L2")])
(define_insn "insn_ld<I124MODE:n><s>_add_L2<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(any_extend:DI (unspec:I124MODE [(mem:I124MODE (match_dup 3))]
UNSPEC_LATENCY_L2)))]
@@ -3444,9 +3444,9 @@
[(set_attr "type" "X1_L2")])
(define_insn "insn_ldnt_add_L2<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(unspec:DI
[(mem:DI (match_dup 3))]
@@ -3469,9 +3469,9 @@
[(set_attr "type" "X1_L2")])
(define_insn "insn_ldnt<I124MODE:n><s>_add_L2<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(any_extend:DI
(unspec:I124MODE [(unspec:I124MODE
@@ -3493,9 +3493,9 @@
[(set_attr "type" "Y2_miss")])
(define_insn "insn_ld_add_miss<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(mem:DI (match_dup 3))]
UNSPEC_LATENCY_MISS))]
@@ -3513,9 +3513,9 @@
[(set_attr "type" "X1_miss")])
(define_insn "insn_ldna_add_miss<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(mem:DI (and:DI (match_dup 3) (const_int -8)))]
UNSPEC_LATENCY_MISS))]
@@ -3534,9 +3534,9 @@
[(set_attr "type" "Y2_miss")])
(define_insn "insn_ld<I124MODE:n><s>_add_miss<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(any_extend:DI (unspec:I124MODE [(mem:I124MODE (match_dup 3))]
UNSPEC_LATENCY_MISS)))]
@@ -3557,9 +3557,9 @@
[(set_attr "type" "X1_miss")])
(define_insn "insn_ldnt_add_miss<bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(unspec:DI
[(mem:DI (match_dup 3))]
@@ -3582,9 +3582,9 @@
[(set_attr "type" "X1_miss")])
(define_insn "insn_ldnt<I124MODE:n><s>_add_miss<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 1 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "1")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 1 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "1")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (match_operand:DI 0 "register_operand" "=r")
(any_extend:DI
(unspec:I124MODE [(unspec:I124MODE
@@ -3969,9 +3969,9 @@
"")
(define_insn "insn_st_add<bitsuffix>"
- [(set (match_operand:I48MODE 0 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "0")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 0 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "0")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (mem:DI (match_dup 3))
(match_operand:DI 1 "reg_or_0_operand" "rO"))]
""
@@ -3988,9 +3988,9 @@
(define_expand "insn_st<I124MODE:n>_add<I48MODE:bitsuffix>"
[(parallel
- [(set (match_operand:I48MODE 0 "pointer_operand" "")
- (plus:I48MODE (match_operand 3 "pointer_operand" "")
- (match_operand 2 "s8bit_cint_operand" "")))
+ [(set (match_operand:I48MODE 0 "register_operand" "")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "")))
(set (mem:I124MODE (match_dup 3))
(match_operand:DI 1 "reg_or_0_operand" ""))])]
""
@@ -4000,9 +4000,9 @@
})
(define_insn "*insn_st<I124MODE:n>_add<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 0 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "0")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 0 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "0")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (mem:I124MODE (match_dup 3))
(match_operand:I124MODE 1 "reg_or_0_operand" "rO"))]
""
@@ -4020,9 +4020,9 @@
[(set_attr "type" "X1")])
(define_insn "insn_stnt_add<bitsuffix>"
- [(set (match_operand:I48MODE 0 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "0")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 0 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "0")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (mem:DI (unspec:I48MODE [(match_dup 3)] UNSPEC_NON_TEMPORAL))
(match_operand:DI 1 "reg_or_0_operand" "rO"))]
""
@@ -4048,9 +4048,9 @@
(define_expand "insn_stnt<I124MODE:n>_add<I48MODE:bitsuffix>"
[(parallel
- [(set (match_operand:I48MODE 0 "pointer_operand" "")
- (plus:I48MODE (match_operand 3 "pointer_operand" "")
- (match_operand 2 "s8bit_cint_operand" "")))
+ [(set (match_operand:I48MODE 0 "register_operand" "")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "")))
(set (mem:I124MODE (unspec:I48MODE [(match_dup 3)] UNSPEC_NON_TEMPORAL))
(match_operand:DI 1 "reg_or_0_operand" "rO"))])]
""
@@ -4060,9 +4060,9 @@
})
(define_insn "*insn_stnt<I124MODE:n>_add<I48MODE:bitsuffix>"
- [(set (match_operand:I48MODE 0 "pointer_operand" "=r")
- (plus:I48MODE (match_operand 3 "pointer_operand" "0")
- (match_operand 2 "s8bit_cint_operand" "i")))
+ [(set (match_operand:I48MODE 0 "register_operand" "=r")
+ (plus:I48MODE (match_operand:I48MODE 3 "register_operand" "0")
+ (match_operand:I48MODE 2 "s8bit_cint_operand" "i")))
(set (mem:I124MODE (unspec:I48MODE [(match_dup 3)] UNSPEC_NON_TEMPORAL))
(match_operand:I124MODE 1 "reg_or_0_operand" "rO"))]
""
@@ -4828,7 +4828,7 @@
;; {B3,B2,B1,B0} {A3,A2,A1,A0}
;; => {A3,A2,A1,A0,B3,B2,B1,B0}
(define_insn "vec_pack_<pack_optab>_v4hi"
- [(set (match_operand:V8QI 0 "reg_or_0_operand" "=r")
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
(vec_concat:V8QI
(v2pack:V4QI (match_operand:V4HI 1 "reg_or_0_operand" "rO"))
(v2pack:V4QI (match_operand:V4HI 2 "reg_or_0_operand" "rO"))))]
@@ -4837,7 +4837,7 @@
[(set_attr "type" "X01")])
(define_expand "insn_v2<pack_insn>"
- [(set (match_operand:DI 0 "reg_or_0_operand" "")
+ [(set (match_operand:DI 0 "register_operand" "")
(vec_concat:V8QI
(v2pack:V4QI (match_operand:DI 2 "reg_or_0_operand" ""))
(v2pack:V4QI (match_operand:DI 1 "reg_or_0_operand" ""))))]
@@ -4855,7 +4855,7 @@
;; {B3,B2,B1,B0} {A3,A2,A1,A0}
;; => {A3_hi,A2_hi,A1_hi,A0_hi,B3_hi,B2_hi,B1_hi,B0_hi}
(define_insn "vec_pack_hipart_v4hi"
- [(set (match_operand:V8QI 0 "reg_or_0_operand" "=r")
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
(vec_concat:V8QI
(truncate:V4QI
(ashiftrt:V4HI (match_operand:V4HI 1 "reg_or_0_operand" "rO")
@@ -4868,7 +4868,7 @@
[(set_attr "type" "X01")])
(define_expand "insn_v2packh"
- [(set (match_operand:DI 0 "reg_or_0_operand" "")
+ [(set (match_operand:DI 0 "register_operand" "")
(vec_concat:V8QI
(truncate:V4QI
(ashiftrt:V4HI (match_operand:DI 2 "reg_or_0_operand" "")
@@ -4890,7 +4890,7 @@
;; {B1,B0} {A1,A0}
;; => {A1,A0,B1,B0}
(define_insn "vec_pack_ssat_v2si"
- [(set (match_operand:V4HI 0 "reg_or_0_operand" "=r")
+ [(set (match_operand:V4HI 0 "register_operand" "=r")
(vec_concat:V4HI
(us_truncate:V2HI (match_operand:V2SI 1 "reg_or_0_operand" "rO"))
(us_truncate:V2HI (match_operand:V2SI 2 "reg_or_0_operand" "rO"))))]
@@ -4899,7 +4899,7 @@
[(set_attr "type" "X01")])
(define_expand "insn_v4packsc"
- [(set (match_operand:DI 0 "reg_or_0_operand" "")
+ [(set (match_operand:DI 0 "register_operand" "")
(vec_concat:V4HI
(us_truncate:V2HI (match_operand:DI 2 "reg_or_0_operand" ""))
(us_truncate:V2HI (match_operand:DI 1 "reg_or_0_operand" ""))))]
diff --git a/gcc/config/tilepro/tilepro.c b/gcc/config/tilepro/tilepro.c
index 31bc4908965..b2bafb4f300 100644
--- a/gcc/config/tilepro/tilepro.c
+++ b/gcc/config/tilepro/tilepro.c
@@ -3988,7 +3988,7 @@ static void
tilepro_gen_bundles (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx end = NEXT_INSN (BB_END (bb));
@@ -4259,7 +4259,7 @@ static void
reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx queue = NULL_RTX;
diff --git a/gcc/config/v850/v850.md b/gcc/config/v850/v850.md
index f56d54a91a0..213aedffa57 100644
--- a/gcc/config/v850/v850.md
+++ b/gcc/config/v850/v850.md
@@ -233,7 +233,7 @@
})
(define_insn "*movqi_internal"
- [(set (match_operand:QI 0 "general_operand" "=r,r,r,Q,r,m,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,Q,r,m,m")
(match_operand:QI 1 "general_operand" "Jr,n,Q,Ir,m,r,I"))]
"register_operand (operands[0], QImode)
|| reg_or_0_operand (operands[1], QImode)"
@@ -258,7 +258,7 @@
})
(define_insn "*movhi_internal"
- [(set (match_operand:HI 0 "general_operand" "=r,r,r,Q,r,m,m")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,Q,r,m,m")
(match_operand:HI 1 "general_operand" "Jr,n,Q,Ir,m,r,I"))]
"register_operand (operands[0], HImode)
|| reg_or_0_operand (operands[1], HImode)"
@@ -334,7 +334,7 @@
;; upper part with hi, and then put the lower part in the load/store insn.
(define_insn "*movsi_internal_v850e"
- [(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m,r")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,Q,r,r,m,m,r")
(match_operand:SI 1 "general_operand" "Jr,K,L,Q,Ir,m,R,r,I,i"))]
"(TARGET_V850E_UP)
&& (register_operand (operands[0], SImode)
@@ -347,7 +347,7 @@
(set_attr "type" "other,other,other,load,other,load,other,store,store,other")])
(define_insn "*movsi_internal"
- [(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,Q,r,r,m,m")
(match_operand:SI 1 "movsi_source_operand" "Jr,K,L,Q,Ir,m,R,r,I"))]
"register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode)"
@@ -359,7 +359,7 @@
(set_attr "type" "other,other,other,load,other,load,store,store,other")])
(define_insn "*movsf_internal"
- [(set (match_operand:SF 0 "general_operand" "=r,r,r,r,r,Q,r,m,m,r")
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,r,Q,r,m,m,r")
(match_operand:SF 1 "general_operand" "Jr,K,L,n,Q,Ir,m,r,IG,iF"))]
"register_operand (operands[0], SFmode)
|| reg_or_0_operand (operands[1], SFmode)"
diff --git a/gcc/configure b/gcc/configure
index fdf0cd0819b..e4527fcec84 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -11287,13 +11287,11 @@ else
/* | A-Za-z:\\/* ) realsrcdir=${srcdir};;
*) realsrcdir=../${srcdir};;
esac
- saved_CFLAGS="${CFLAGS}"
CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \
- LDFLAGS="${LDFLAGS_FOR_BUILD}" \
+ LDFLAGS="${LDFLAGS_FOR_BUILD}" GMPINC="" \
${realsrcdir}/configure \
--enable-languages=${enable_languages-all} \
--target=$target_alias --host=$build_alias --build=$build_alias
- CFLAGS="${saved_CFLAGS}"
# We just finished tests for the build machine, so rename
# the file auto-build.h in the gcc directory.
@@ -11788,6 +11786,7 @@ STMP_FIXINC=stmp-fixinc
if test x$build != x$host || test "x$coverage_flags" != x
then
BUILD_CFLAGS='$(INTERNAL_CFLAGS) $(T_CFLAGS) $(CFLAGS_FOR_BUILD)'
+ BUILD_CXXFLAGS='$(INTERNAL_CFLAGS) $(T_CFLAGS) $(CXXFLAGS_FOR_BUILD)'
BUILD_LDFLAGS='$(LDFLAGS_FOR_BUILD)'
fi
@@ -17919,7 +17918,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 17922 "configure"
+#line 17921 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -18025,7 +18024,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 18028 "configure"
+#line 18027 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -23933,6 +23932,60 @@ _ACEOF
$as_echo "$gcc_cv_lto_plugin" >&6; }
case "$target" in
+ aarch64*-*-*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for -mabi option" >&5
+$as_echo_n "checking assembler for -mabi option... " >&6; }
+if test "${gcc_cv_as_aarch64_mabi+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ gcc_cv_as_aarch64_mabi=no
+ if test x$gcc_cv_as != x; then
+ $as_echo '.text' > conftest.s
+ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -mabi=lp64 -o conftest.o conftest.s >&5'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }
+ then
+ gcc_cv_as_aarch64_mabi=yes
+ else
+ echo "configure: failed program was" >&5
+ cat conftest.s >&5
+ fi
+ rm -f conftest.o conftest.s
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_aarch64_mabi" >&5
+$as_echo "$gcc_cv_as_aarch64_mabi" >&6; }
+
+ if test x$gcc_cv_as_aarch64_mabi = xyes; then
+
+$as_echo "#define HAVE_AS_MABI_OPTION 1" >>confdefs.h
+
+ else
+ if test x$with_abi = xilp32; then
+ as_fn_error "Assembler does not support -mabi=ilp32.\
+ Upgrade the Assembler." "$LINENO" 5
+ fi
+ if test x"$with_multilib_list" = xdefault; then
+ TM_MULTILIB_CONFIG=lp64
+ else
+ aarch64_multilibs=`echo $with_multilib_list | sed -e 's/,/ /g'`
+ for aarch64_multilib in ${aarch64_multilibs}; do
+ case ${aarch64_multilib} in
+ ilp32)
+ as_fn_error "Assembler does not support -mabi=ilp32.\
+ Upgrade the Assembler." "$LINENO" 5
+ ;;
+ *)
+ ;;
+ esac
+ done
+ fi
+ fi
+ ;;
+
# All TARGET_ABI_OSF targets.
alpha*-*-linux* | alpha*-*-*bsd*)
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for explicit relocation support" >&5
@@ -27508,6 +27561,14 @@ fi
echo "source ${srcdir}/gdbinit.in" >> .gdbinit
echo "python import sys; sys.path.append('${srcdir}'); import gdbhooks" >> .gdbinit
+# Put a breakpoint on __asan_report_error to help with debugging buffer
+# overflow.
+case "$CFLAGS" in
+*-fsanitize=address*)
+ echo "source ${srcdir}/gdbasan.in" >> .gdbinit
+ ;;
+esac
+
gcc_tooldir='$(libsubdir)/$(libsubdir_to_prefix)$(target_noncanonical)'
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 91a22d58cf7..59de08da86d 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -1529,13 +1529,11 @@ else
/* | [A-Za-z]:[\\/]* ) realsrcdir=${srcdir};;
*) realsrcdir=../${srcdir};;
esac
- saved_CFLAGS="${CFLAGS}"
CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \
- LDFLAGS="${LDFLAGS_FOR_BUILD}" \
+ LDFLAGS="${LDFLAGS_FOR_BUILD}" GMPINC="" \
${realsrcdir}/configure \
--enable-languages=${enable_languages-all} \
--target=$target_alias --host=$build_alias --build=$build_alias
- CFLAGS="${saved_CFLAGS}"
# We just finished tests for the build machine, so rename
# the file auto-build.h in the gcc directory.
@@ -1900,6 +1898,7 @@ STMP_FIXINC=stmp-fixinc AC_SUBST(STMP_FIXINC)
if test x$build != x$host || test "x$coverage_flags" != x
then
BUILD_CFLAGS='$(INTERNAL_CFLAGS) $(T_CFLAGS) $(CFLAGS_FOR_BUILD)'
+ BUILD_CXXFLAGS='$(INTERNAL_CFLAGS) $(T_CFLAGS) $(CXXFLAGS_FOR_BUILD)'
BUILD_LDFLAGS='$(LDFLAGS_FOR_BUILD)'
fi
@@ -3495,6 +3494,35 @@ AC_DEFINE_UNQUOTED(HAVE_LTO_PLUGIN, $gcc_cv_lto_plugin,
AC_MSG_RESULT($gcc_cv_lto_plugin)
case "$target" in
+ aarch64*-*-*)
+ gcc_GAS_CHECK_FEATURE([-mabi option], gcc_cv_as_aarch64_mabi,,
+ [-mabi=lp64], [.text],,,)
+ if test x$gcc_cv_as_aarch64_mabi = xyes; then
+ AC_DEFINE(HAVE_AS_MABI_OPTION, 1,
+ [Define if your assembler supports the -mabi option.])
+ else
+ if test x$with_abi = xilp32; then
+ AC_MSG_ERROR([Assembler does not support -mabi=ilp32.\
+ Upgrade the Assembler.])
+ fi
+ if test x"$with_multilib_list" = xdefault; then
+ TM_MULTILIB_CONFIG=lp64
+ else
+ aarch64_multilibs=`echo $with_multilib_list | sed -e 's/,/ /g'`
+ for aarch64_multilib in ${aarch64_multilibs}; do
+ case ${aarch64_multilib} in
+ ilp32)
+ AC_MSG_ERROR([Assembler does not support -mabi=ilp32.\
+ Upgrade the Assembler.])
+ ;;
+ *)
+ ;;
+ esac
+ done
+ fi
+ fi
+ ;;
+
# All TARGET_ABI_OSF targets.
alpha*-*-linux* | alpha*-*-*bsd*)
gcc_GAS_CHECK_FEATURE([explicit relocation support],
@@ -5245,6 +5273,14 @@ fi
echo "source ${srcdir}/gdbinit.in" >> .gdbinit
echo "python import sys; sys.path.append('${srcdir}'); import gdbhooks" >> .gdbinit
+# Put a breakpoint on __asan_report_error to help with debugging buffer
+# overflow.
+case "$CFLAGS" in
+*-fsanitize=address*)
+ echo "source ${srcdir}/gdbasan.in" >> .gdbinit
+ ;;
+esac
+
gcc_tooldir='$(libsubdir)/$(libsubdir_to_prefix)$(target_noncanonical)'
AC_SUBST(gcc_tooldir)
AC_SUBST(dollar)
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index 439b0cb013b..1453f293b2d 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -46,9 +46,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
typedef HOST_WIDEST_INT gcov_type;
typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
-struct bitmap_head_def;
-typedef struct bitmap_head_def *bitmap;
-typedef const struct bitmap_head_def *const_bitmap;
+struct bitmap_head;
+typedef struct bitmap_head *bitmap;
+typedef const struct bitmap_head *const_bitmap;
struct simple_bitmap_def;
typedef struct simple_bitmap_def *sbitmap;
typedef const struct simple_bitmap_def *const_sbitmap;
@@ -67,8 +67,7 @@ typedef const union tree_node *const_tree;
typedef struct gimple_statement_base *gimple;
typedef const struct gimple_statement_base *const_gimple;
typedef gimple gimple_seq;
-struct gimple_stmt_iterator_d;
-typedef struct gimple_stmt_iterator_d gimple_stmt_iterator;
+struct gimple_stmt_iterator;
union section;
typedef union section section;
struct gcc_options;
diff --git a/gcc/coverage.c b/gcc/coverage.c
index f2ac5fcaa46..f7a2924707a 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -588,7 +588,7 @@ coverage_compute_cfg_checksum (void)
basic_block bb;
unsigned chksum = n_basic_blocks_for_fn (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index e2fc2a2d125..59c1d5322e7 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,75 @@
+2013-12-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/58954
+ * pt.c (resolve_overloaded_unification): Use instantiate_template.
+
+2013-12-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/58627
+ * call.c (add_template_candidate_real): Don't call ggc_free on targs.
+
+2013-12-11 Balaji V. Iyer <balaji.v.iyer@intel.com>
+
+ * cp-tree.h (cilk_valid_spawn): New prototype.
+ (gimplify_cilk_spawn): Likewise.
+ (create_try_catch_expr): Likewise.
+ * decl.c (finish_function): Insert Cilk function-calls when a
+ _Cilk_spawn is used in a function.
+ * parser.c (cp_parser_postfix_expression): Added RID_CILK_SPAWN and
+ RID_CILK_SYNC cases.
+ * cp-cilkplus.c (set_cilk_except_flag): New function.
+ (set_cilk_except_data): Likewise.
+ (cilk_install_body_with_frame_cleanup): Likewise.
+ * except.c (create_try_catch_expr): Likewise.
+ * parser.h (IN_CILK_SPAWN): New #define.
+ * pt.c (tsubst_expr): Added CILK_SPAWN_STMT and CILK_SYNC_STMT cases.
+ * semantics.c (potential_constant_expression_1): Likewise.
+ * typeck.c (cp_build_compound_expr): Reject a spawned function in a
+ compound expression.
+ (check_return_expr): Reject a spawned function in a return expression.
+ * cp-gimplify.c (cp_gimplify_expr): Added a CILK_SPAWN_STMT and
+ CALL_EXPR case. Added handling of spawned function in MODIFY_EXPR
+ and INIT_EXPR.
+
+2013-12-09 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/59435
+ * parser.c (cp_parser_cache_defarg): sizeof ... ( p ) can
+ occur in a default argument too.
+
+2013-12-06 Caroline Tice <cmtice@google.com>
+
+ Submitting patch from Stephen Checkoway, s@cs.jhu.edu
+ * vtable-class-hierarchy.c (init_functions): Make the libvtv
+ function decls externally visible.
+
+2013-12-06 Oleg Endo <olegendo@gcc.gnu.org>
+
+ * decl2.c: Remove struct tags when referring to class varpool_node.
+
+2013-12-05 Jason Merrill <jason@redhat.com>
+
+ PR c++/59044
+ PR c++/59052
+ * pt.c (most_specialized_class): Use the partially instantiated
+ template for deduction. Drop the TMPL parameter.
+
+2013-12-05 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * decl.c (duplicate_decls): Replace pairs of errors and permerrors
+ with error + inform (permerror + inform, respectively).
+
+2013-12-04 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/52023
+ * typeck.c (cxx_sizeof_or_alignof_type): Update call to
+ c_sizeof_or_alignof_type.
+
+2013-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/59268
+ * pt.c (tsubst_copy_and_build): Handle POINTER_PLUS_EXPR.
+
2013-11-29 Marek Polacek <polacek@redhat.com>
PR sanitizer/59331
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 7520f72f680..9062ed071d7 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -7476,8 +7476,6 @@ resolve_address_of_overloaded_function (tree target_type,
/* See if there's a match. */
if (same_type_p (target_fn_type, static_fn_type (instantiation)))
matches = tree_cons (instantiation, fn, matches);
-
- ggc_free (targs);
}
/* Now, remove all but the most specialized of the matches. */
diff --git a/gcc/cp/cp-cilkplus.c b/gcc/cp/cp-cilkplus.c
index 5c1090a097f..d3f3323721e 100644
--- a/gcc/cp/cp-cilkplus.c
+++ b/gcc/cp/cp-cilkplus.c
@@ -25,7 +25,10 @@
#include "coretypes.h"
#include "cp-tree.h"
#include "diagnostic-core.h"
-
+#include "tree-iterator.h"
+#include "tree-inline.h" /* for copy_tree_body_r. */
+#include "ggc.h"
+#include "cilk.h"
/* Callback for cp_walk_tree to validate the body of a pragma simd loop
or _cilk_for loop.
@@ -75,3 +78,68 @@ cpp_validate_cilk_plus_loop (tree body)
(void *) &valid, NULL);
return valid;
}
+
+/* Sets the EXCEPTION bit (0x10) in the FRAME.flags field. */
+
+static tree
+set_cilk_except_flag (tree frame)
+{
+ tree flags = cilk_dot (frame, CILK_TI_FRAME_FLAGS, 0);
+
+ flags = build2 (MODIFY_EXPR, void_type_node, flags,
+ build2 (BIT_IOR_EXPR, TREE_TYPE (flags), flags,
+ build_int_cst (TREE_TYPE (flags),
+ CILK_FRAME_EXCEPTING)));
+ return flags;
+}
+
+/* Sets the frame.EXCEPT_DATA field to the head of the exception pointer. */
+
+static tree
+set_cilk_except_data (tree frame)
+{
+ tree except_data = cilk_dot (frame, CILK_TI_FRAME_EXCEPTION, 0);
+ tree uresume_fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
+ tree ret_expr;
+ uresume_fn = build_call_expr (uresume_fn, 1,
+ build_int_cst (integer_type_node, 0));
+ ret_expr = build2 (MODIFY_EXPR, void_type_node, except_data, uresume_fn);
+ return ret_expr;
+}
+
+/* Installs BODY into function FNDECL with appropriate exception handling
+ code. WD holds information of wrapper function used to pass into the
+ outlining function, cilk_outline. */
+
+void
+cilk_install_body_with_frame_cleanup (tree fndecl, tree orig_body, void *wd)
+{
+ tree frame = make_cilk_frame (fndecl);
+ tree dtor = create_cilk_function_exit (frame, false, false);
+ add_local_decl (cfun, frame);
+
+ cfun->language = ggc_alloc_cleared_language_function ();
+
+ location_t loc = EXPR_LOCATION (orig_body);
+ tree list = alloc_stmt_list ();
+ DECL_SAVED_TREE (fndecl) = list;
+ tree fptr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (frame)), frame);
+ tree body = cilk_install_body_pedigree_operations (fptr);
+ gcc_assert (TREE_CODE (body) == STATEMENT_LIST);
+ tree detach_expr = build_call_expr (cilk_detach_fndecl, 1, fptr);
+ append_to_statement_list (detach_expr, &body);
+ cilk_outline (fndecl, &orig_body, (struct wrapper_data *) wd);
+ append_to_statement_list (orig_body, &body);
+ if (flag_exceptions)
+ {
+ tree except_flag = set_cilk_except_flag (frame);
+ tree except_data = set_cilk_except_data (frame);
+ tree catch_list = alloc_stmt_list ();
+ append_to_statement_list (except_flag, &catch_list);
+ append_to_statement_list (except_data, &catch_list);
+ body = create_try_catch_expr (body, catch_list);
+ }
+ append_to_statement_list (build_stmt (loc, TRY_FINALLY_EXPR, body, dtor),
+ &list);
+}
+
diff --git a/gcc/cp/cp-gimplify.c b/gcc/cp/cp-gimplify.c
index b1270a187f1..5fa564c57d1 100644
--- a/gcc/cp/cp-gimplify.c
+++ b/gcc/cp/cp-gimplify.c
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "splay-tree.h"
#include "target.h"
#include "c-family/c-ubsan.h"
+#include "cilk.h"
/* Forward declarations. */
@@ -584,12 +585,21 @@ cp_gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
LHS of an assignment might also be involved in the RHS, as in bug
25979. */
case INIT_EXPR:
+ if (fn_contains_cilk_spawn_p (cfun)
+ && cilk_detect_spawn_and_unwrap (expr_p)
+ && !seen_error ())
+ return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
cp_gimplify_init_expr (expr_p);
if (TREE_CODE (*expr_p) != INIT_EXPR)
return GS_OK;
/* Otherwise fall through. */
case MODIFY_EXPR:
{
+ if (fn_contains_cilk_spawn_p (cfun)
+ && cilk_detect_spawn_and_unwrap (expr_p)
+ && !seen_error ())
+ return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
+
/* If the back end isn't clever enough to know that the lhs and rhs
types are the same, add an explicit conversion. */
tree op0 = TREE_OPERAND (*expr_p, 0);
@@ -698,6 +708,21 @@ cp_gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
}
break;
+ case CILK_SPAWN_STMT:
+ gcc_assert
+ (fn_contains_cilk_spawn_p (cfun)
+ && cilk_detect_spawn_and_unwrap (expr_p));
+
+ /* If errors are seen, then just process it as a CALL_EXPR. */
+ if (!seen_error ())
+ return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
+
+ case CALL_EXPR:
+ if (fn_contains_cilk_spawn_p (cfun)
+ && cilk_detect_spawn_and_unwrap (expr_p)
+ && !seen_error ())
+ return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
+
default:
ret = (enum gimplify_status) c_gimplify_expr (expr_p, pre_p, post_p);
break;
diff --git a/gcc/cp/cp-objcp-common.h b/gcc/cp/cp-objcp-common.h
index ee22423ebd4..0a8fdeea2fc 100644
--- a/gcc/cp/cp-objcp-common.h
+++ b/gcc/cp/cp-objcp-common.h
@@ -153,5 +153,4 @@ extern void cp_common_init_ts (void);
#undef LANG_HOOKS_EH_PROTECT_CLEANUP_ACTIONS
#define LANG_HOOKS_EH_PROTECT_CLEANUP_ACTIONS cp_protect_cleanup_actions
-
#endif /* GCC_CP_OBJCP_COMMON */
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 1fc4b59d846..06868250a95 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -5356,6 +5356,7 @@ extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
+extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
@@ -6185,6 +6186,9 @@ extern bool cpp_validate_cilk_plus_loop (tree);
extern tree expand_array_notation_exprs (tree);
bool cilkplus_an_triplet_types_ok_p (location_t, tree, tree, tree,
tree);
+/* In c-family/cilk.c */
+extern bool cilk_valid_spawn (tree);
+
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 1092a4354e4..b86433ceb32 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -59,6 +59,7 @@ along with GCC; see the file COPYING3. If not see
#include "splay-tree.h"
#include "plugin.h"
#include "cgraph.h"
+#include "cilk.h"
#include "wide-int.h"
/* Possible cases of bad specifiers type used by bad_specifiers. */
@@ -1300,8 +1301,9 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
{
if (warning (OPT_Wattributes, "function %q+D redeclared as inline",
newdecl))
- inform (input_location, "previous declaration of %q+D "
- "with attribute noinline", olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration of %qD with attribute noinline",
+ olddecl);
}
else if (DECL_DECLARED_INLINE_P (olddecl)
&& DECL_UNINLINABLE (newdecl)
@@ -1309,7 +1311,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
{
if (warning (OPT_Wattributes, "function %q+D redeclared with "
"attribute noinline", newdecl))
- inform (input_location, "previous declaration of %q+D was inline",
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration of %qD was inline",
olddecl);
}
}
@@ -1344,11 +1347,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
warning (0, "library function %q#D redeclared as non-function %q#D",
olddecl, newdecl);
else
- {
- error ("declaration of %q#D", newdecl);
- error ("conflicts with built-in declaration %q#D",
- olddecl);
- }
+ error ("declaration of %q#D conflicts with built-in "
+ "declaration %q#D", newdecl, olddecl);
return NULL_TREE;
}
else if (DECL_OMP_DECLARE_REDUCTION_P (olddecl))
@@ -1356,8 +1356,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
gcc_assert (DECL_OMP_DECLARE_REDUCTION_P (newdecl));
error_at (DECL_SOURCE_LOCATION (newdecl),
"redeclaration of %<pragma omp declare reduction%>");
- error_at (DECL_SOURCE_LOCATION (olddecl),
- "previous %<pragma omp declare reduction%> declaration");
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous %<pragma omp declare reduction%> declaration");
return error_mark_node;
}
else if (!types_match)
@@ -1408,11 +1408,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
/* A near match; override the builtin. */
if (TREE_PUBLIC (newdecl))
- {
- warning (0, "new declaration %q#D", newdecl);
- warning (0, "ambiguates built-in declaration %q#D",
- olddecl);
- }
+ warning (0, "new declaration %q#D ambiguates built-in "
+ "declaration %q#D", newdecl, olddecl);
else
warning (OPT_Wshadow,
DECL_BUILT_IN (olddecl)
@@ -1505,7 +1502,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
error ("%q#D redeclared as different kind of symbol", newdecl);
if (TREE_CODE (olddecl) == TREE_LIST)
olddecl = TREE_VALUE (olddecl);
- inform (input_location, "previous declaration of %q+#D", olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration %q#D", olddecl);
return error_mark_node;
}
@@ -1524,8 +1522,9 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
if (TREE_CODE (DECL_TEMPLATE_RESULT (olddecl)) == TYPE_DECL
|| TREE_CODE (DECL_TEMPLATE_RESULT (newdecl)) == TYPE_DECL)
{
- error ("declaration of template %q#D", newdecl);
- error ("conflicts with previous declaration %q+#D", olddecl);
+ error ("conflicting declaration of template %q#D", newdecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration %q#D", olddecl);
return error_mark_node;
}
else if (TREE_CODE (DECL_TEMPLATE_RESULT (olddecl)) == FUNCTION_DECL
@@ -1539,8 +1538,9 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
&& same_type_p (TREE_TYPE (TREE_TYPE (newdecl)),
TREE_TYPE (TREE_TYPE (olddecl))))
{
- error ("new declaration %q#D", newdecl);
- error ("ambiguates old declaration %q+#D", olddecl);
+ error ("ambiguating new declaration %q#D", newdecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "old declaration %q#D", olddecl);
}
return NULL_TREE;
}
@@ -1548,9 +1548,10 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
{
if (DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl))
{
- error ("declaration of C function %q#D conflicts with",
+ error ("conflicting declaration of C function %q#D",
newdecl);
- error ("previous declaration %q+#D here", olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration %q#D", olddecl);
return NULL_TREE;
}
/* For function versions, params and types match, but they
@@ -1560,8 +1561,9 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
&& compparms (TYPE_ARG_TYPES (TREE_TYPE (newdecl)),
TYPE_ARG_TYPES (TREE_TYPE (olddecl))))
{
- error ("new declaration %q#D", newdecl);
- error ("ambiguates old declaration %q+#D", olddecl);
+ error ("ambiguating new declaration of %q#D", newdecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "old declaration %q#D", olddecl);
return error_mark_node;
}
else
@@ -1570,8 +1572,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
else
{
error ("conflicting declaration %q#D", newdecl);
- inform (input_location,
- "%q+D has a previous declaration as %q#D", olddecl, olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration as %q#D", olddecl);
return error_mark_node;
}
}
@@ -1623,8 +1625,9 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
A namespace-name defined at global scope shall not be
declared as the name of any other entity in any global scope
of the program. */
- error ("declaration of namespace %qD conflicts with", newdecl);
- error ("previous declaration of namespace %q+D here", olddecl);
+ error ("conflicting declaration of namespace %qD", newdecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration of namespace %qD here", olddecl);
return error_mark_node;
}
else
@@ -1646,9 +1649,10 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
&& prototype_p (TREE_TYPE (newdecl)))
{
/* Prototype decl follows defn w/o prototype. */
- warning_at (input_location, 0, "prototype for %q+#D", newdecl);
- warning_at (DECL_SOURCE_LOCATION (olddecl), 0,
- "follows non-prototype definition here");
+ warning_at (DECL_SOURCE_LOCATION (newdecl), 0,
+ "prototype specified for %q#D", newdecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous non-prototype definition here");
}
else if (VAR_OR_FUNCTION_DECL_P (olddecl)
&& DECL_LANGUAGE (newdecl) != DECL_LANGUAGE (olddecl))
@@ -1687,10 +1691,11 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
}
else
{
- error ("previous declaration of %q+#D with %qL linkage",
- olddecl, DECL_LANGUAGE (olddecl));
- error ("conflicts with new declaration with %qL linkage",
- DECL_LANGUAGE (newdecl));
+ error ("conflicting declaration of %q#D with %qL linkage",
+ newdecl, DECL_LANGUAGE (newdecl));
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration with %qL linkage",
+ DECL_LANGUAGE (olddecl));
}
}
@@ -1730,19 +1735,20 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
if (1 == simple_cst_equal (TREE_PURPOSE (t1),
TREE_PURPOSE (t2)))
{
- permerror (input_location,
- "default argument given for parameter %d "
- "of %q#D", i, newdecl);
- permerror (input_location,
- "after previous specification in %q+#D",
- olddecl);
+ if (permerror (input_location,
+ "default argument given for parameter "
+ "%d of %q#D", i, newdecl))
+ permerror (DECL_SOURCE_LOCATION (olddecl),
+ "previous specification in %q#D here",
+ olddecl);
}
else
{
error ("default argument given for parameter %d "
"of %q#D", i, newdecl);
- error ("after previous specification in %q+#D",
- olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous specification in %q#D here",
+ olddecl);
}
}
}
@@ -1806,7 +1812,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
if (warning (OPT_Wredundant_decls,
"redundant redeclaration of %qD in same scope",
newdecl))
- inform (input_location, "previous declaration of %q+D", olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration of %qD", olddecl);
}
if (!(DECL_TEMPLATE_INSTANTIATION (olddecl)
@@ -1815,7 +1822,8 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
if (DECL_DELETED_FN (newdecl))
{
error ("deleted definition of %qD", newdecl);
- error ("after previous declaration %q+D", olddecl);
+ inform (DECL_SOURCE_LOCATION (olddecl),
+ "previous declaration of %qD", olddecl);
}
DECL_DELETED_FN (newdecl) |= DECL_DELETED_FN (olddecl);
}
@@ -13882,6 +13890,9 @@ finish_function (int flags)
/* If we're saving up tree structure, tie off the function now. */
DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl));
+ if (fn_contains_cilk_spawn_p (cfun) && !processing_template_decl)
+ cfun->cilk_frame_decl = insert_cilk_frame (fndecl);
+
finish_fname_decls ();
/* If this function can't throw any exceptions, remember that. */
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index 0e37a5d246c..e5b98b13a36 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -1749,7 +1749,7 @@ maybe_make_one_only (tree decl)
if (VAR_P (decl))
{
- struct varpool_node *node = varpool_node_for_decl (decl);
+ varpool_node *node = varpool_node_for_decl (decl);
DECL_COMDAT (decl) = 1;
/* Mark it needed so we don't forget to emit it. */
node->forced_by_abi = true;
@@ -1871,7 +1871,7 @@ mark_needed (tree decl)
}
else if (TREE_CODE (decl) == VAR_DECL)
{
- struct varpool_node *node = varpool_node_for_decl (decl);
+ varpool_node *node = varpool_node_for_decl (decl);
/* C++ frontend use mark_decl_references to force COMDAT variables
to be output that might appear dead otherwise. */
node->forced_by_abi = true;
@@ -1920,7 +1920,7 @@ maybe_emit_vtables (tree ctype)
tree vtbl;
tree primary_vtbl;
int needed = 0;
- struct varpool_node *current = NULL, *last = NULL;
+ varpool_node *current = NULL, *last = NULL;
/* If the vtables for this class have already been emitted there is
nothing more to do. */
@@ -3466,7 +3466,7 @@ one_static_initialization_or_destruction (tree decl, tree init, bool initp)
finish_expr_stmt (init);
if (flag_sanitize & SANITIZE_ADDRESS)
{
- struct varpool_node *vnode = varpool_get_node (decl);
+ varpool_node *vnode = varpool_get_node (decl);
if (vnode)
vnode->dynamically_initialized = 1;
}
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index d7d009bdd69..be487cd8ae8 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -1342,4 +1342,22 @@ build_noexcept_spec (tree expr, int complain)
}
}
+/* Returns a TRY_CATCH_EXPR that will put TRY_LIST and CATCH_LIST in the
+ TRY and CATCH locations. CATCH_LIST must be a STATEMENT_LIST */
+
+tree
+create_try_catch_expr (tree try_expr, tree catch_list)
+{
+ location_t loc = EXPR_LOCATION (try_expr);
+
+ append_to_statement_list (do_begin_catch (), &catch_list);
+ append_to_statement_list (build_throw (NULL_TREE), &catch_list);
+ tree catch_tf_expr = build_stmt (loc, TRY_FINALLY_EXPR, catch_list,
+ do_end_catch (NULL_TREE));
+ catch_list = build2 (CATCH_EXPR, void_type_node, NULL_TREE,
+ catch_tf_expr);
+ tree try_catch_expr = build_stmt (loc, TRY_CATCH_EXPR, try_expr, catch_list);
+ return try_catch_expr;
+}
+
#include "gt-cp-except.h"
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index bd4ead74dc8..dd027342178 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -5627,6 +5627,7 @@ cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p,
cp_id_kind idk = CP_ID_KIND_NONE;
tree postfix_expression = NULL_TREE;
bool is_member_access = false;
+ int saved_in_statement = -1;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
@@ -5771,6 +5772,66 @@ cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p,
}
break;
+ case RID_CILK_SPAWN:
+ {
+ cp_lexer_consume_token (parser->lexer);
+ token = cp_lexer_peek_token (parser->lexer);
+ if (token->type == CPP_SEMICOLON)
+ {
+ error_at (token->location, "%<_Cilk_spawn%> must be followed by "
+ "an expression");
+ postfix_expression = error_mark_node;
+ break;
+ }
+ else if (!current_function_decl)
+ {
+ error_at (token->location, "%<_Cilk_spawn%> may only be used "
+ "inside a function");
+ postfix_expression = error_mark_node;
+ break;
+ }
+ else
+ {
+ /* Consecutive _Cilk_spawns are not allowed in a statement. */
+ saved_in_statement = parser->in_statement;
+ parser->in_statement |= IN_CILK_SPAWN;
+ }
+ cfun->calls_cilk_spawn = 1;
+ postfix_expression =
+ cp_parser_postfix_expression (parser, false, false,
+ false, false, &idk);
+ if (saved_in_statement & IN_CILK_SPAWN)
+ {
+ error_at (token->location, "consecutive %<_Cilk_spawn%> keywords "
+ "are not permitted");
+ postfix_expression = error_mark_node;
+ cfun->calls_cilk_spawn = 0;
+ }
+ else
+ {
+ postfix_expression = build_cilk_spawn (token->location,
+ postfix_expression);
+ if (postfix_expression != error_mark_node)
+ SET_EXPR_LOCATION (postfix_expression, input_location);
+ parser->in_statement = parser->in_statement & ~IN_CILK_SPAWN;
+ }
+ break;
+ }
+
+ case RID_CILK_SYNC:
+ if (flag_enable_cilkplus)
+ {
+ tree sync_expr = build_cilk_sync ();
+ SET_EXPR_LOCATION (sync_expr,
+ cp_lexer_peek_token (parser->lexer)->location);
+ finish_expr_stmt (sync_expr);
+ }
+ else
+ error_at (input_location, "_Cilk_sync cannot be used without enabling "
+ "Cilk Plus");
+ cp_lexer_consume_token (parser->lexer);
+ break;
+
case RID_BUILTIN_SHUFFLE:
{
vec<tree, va_gc> *vec;
@@ -24513,7 +24574,7 @@ cp_parser_cache_defarg (cp_parser *parser, bool nsdmi)
case CPP_CLOSE_SQUARE:
if (depth == 0
/* Handle correctly int n = sizeof ... ( p ); */
- && !(nsdmi && token->type == CPP_ELLIPSIS))
+ && token->type != CPP_ELLIPSIS)
done = true;
/* Update DEPTH, if necessary. */
else if (token->type == CPP_CLOSE_PAREN
diff --git a/gcc/cp/parser.h b/gcc/cp/parser.h
index edd4e6e8b98..e26e350cd99 100644
--- a/gcc/cp/parser.h
+++ b/gcc/cp/parser.h
@@ -301,6 +301,7 @@ typedef struct GTY(()) cp_parser {
#define IN_OMP_FOR 8
#define IN_IF_STMT 16
#define IN_CILK_SIMD_FOR 32
+#define IN_CILK_SPAWN 64
unsigned char in_statement;
/* TRUE if we are presently parsing the body of a switch statement.
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index b58c7556e2e..61994787d2d 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -176,7 +176,7 @@ static tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree);
static tree tsubst_template_args (tree, tree, tsubst_flags_t, tree);
static tree tsubst_template_parms (tree, tree, tsubst_flags_t);
static void regenerate_decl_from_template (tree, tree);
-static tree most_specialized_class (tree, tree, tsubst_flags_t);
+static tree most_specialized_class (tree, tsubst_flags_t);
static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int);
static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree);
static tree tsubst_function_type (tree, tree, tsubst_flags_t, tree);
@@ -4305,7 +4305,7 @@ process_partial_specialization (tree decl)
if (COMPLETE_TYPE_P (inst_type)
&& CLASSTYPE_IMPLICIT_INSTANTIATION (inst_type))
{
- tree spec = most_specialized_class (inst_type, maintmpl, tf_none);
+ tree spec = most_specialized_class (inst_type, tf_none);
if (spec && TREE_TYPE (spec) == type)
permerror (input_location,
"partial specialization of %qT after instantiation "
@@ -8716,7 +8716,7 @@ instantiate_class_template_1 (tree type)
/* Determine what specialization of the original template to
instantiate. */
- t = most_specialized_class (type, templ, tf_warning_or_error);
+ t = most_specialized_class (type, tf_warning_or_error);
if (t == error_mark_node)
{
TYPE_BEING_DEFINED (type) = 1;
@@ -13762,6 +13762,13 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
error ("use %<...%> to expand argument pack");
RETURN (error_mark_node);
+ case CILK_SPAWN_STMT:
+ cfun->calls_cilk_spawn = 1;
+ RETURN (build_cilk_spawn (EXPR_LOCATION (t), RECUR (CILK_SPAWN_FN (t))));
+
+ case CILK_SYNC_STMT:
+ RETURN (build_cilk_sync ());
+
case COMPOUND_EXPR:
tmp = RECUR (TREE_OPERAND (t, 0));
if (tmp == NULL_TREE)
@@ -14159,6 +14166,10 @@ tsubst_copy_and_build (tree t,
RETURN (r);
}
+ case POINTER_PLUS_EXPR:
+ return fold_build_pointer_plus (RECUR (TREE_OPERAND (t, 0)),
+ RECUR (TREE_OPERAND (t, 1)));
+
case SCOPE_REF:
RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true,
/*address_p=*/false));
@@ -15411,9 +15422,9 @@ pack_deducible_p (tree parm, tree fn)
it. TARGS is a vector into which the deduced template arguments
are placed.
- Return zero for success, 2 for an incomplete match that doesn't resolve
- all the types, and 1 for complete failure. An error message will be
- printed only for an incomplete match.
+ Returns either a FUNCTION_DECL for the matching specialization of FN or
+ NULL_TREE if no suitable specialization can be found. If EXPLAIN_P is
+ true, diagnostics will be printed to explain why it failed.
If FN is a conversion operator, or we are trying to produce a specific
specialization, RETURN_TYPE is the return type desired.
@@ -16396,7 +16407,7 @@ resolve_overloaded_unification (tree tparms,
if (subargs != error_mark_node
&& !any_dependent_template_arguments_p (subargs))
{
- elem = tsubst (TREE_TYPE (fn), subargs, tf_none, NULL_TREE);
+ elem = TREE_TYPE (instantiate_template (fn, subargs, tf_none));
if (try_one_overload (tparms, targs, tempargs, parm,
elem, strict, sub_strict, addr_p, explain_p)
&& (!goodfn || !same_type_p (goodfn, elem)))
@@ -18238,7 +18249,7 @@ more_specialized_fn (tree pat1, tree pat2, int len)
return -1;
}
-/* Determine which of two partial specializations of MAIN_TMPL is more
+/* Determine which of two partial specializations of TMPL is more
specialized.
PAT1 is a TREE_LIST whose TREE_TYPE is the _TYPE node corresponding
@@ -18254,7 +18265,7 @@ more_specialized_fn (tree pat1, tree pat2, int len)
two templates is more specialized. */
static int
-more_specialized_class (tree main_tmpl, tree pat1, tree pat2)
+more_specialized_class (tree tmpl, tree pat1, tree pat2)
{
tree targs;
tree tmpl1, tmpl2;
@@ -18269,7 +18280,7 @@ more_specialized_class (tree main_tmpl, tree pat1, tree pat2)
types in the arguments, and we need our dependency check functions
to behave correctly. */
++processing_template_decl;
- targs = get_class_bindings (main_tmpl, TREE_VALUE (pat1),
+ targs = get_class_bindings (tmpl, TREE_VALUE (pat1),
CLASSTYPE_TI_ARGS (tmpl1),
CLASSTYPE_TI_ARGS (tmpl2));
if (targs)
@@ -18278,7 +18289,7 @@ more_specialized_class (tree main_tmpl, tree pat1, tree pat2)
any_deductions = true;
}
- targs = get_class_bindings (main_tmpl, TREE_VALUE (pat2),
+ targs = get_class_bindings (tmpl, TREE_VALUE (pat2),
CLASSTYPE_TI_ARGS (tmpl2),
CLASSTYPE_TI_ARGS (tmpl1));
if (targs)
@@ -18359,7 +18370,7 @@ get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype)
}
/* Return the innermost template arguments that, when applied to a partial
- specialization of MAIN_TMPL whose innermost template parameters are
+ specialization of TMPL whose innermost template parameters are
TPARMS, and whose specialization arguments are SPEC_ARGS, yield the
ARGS.
@@ -18374,7 +18385,7 @@ get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype)
is bound to `double'. */
static tree
-get_class_bindings (tree main_tmpl, tree tparms, tree spec_args, tree args)
+get_class_bindings (tree tmpl, tree tparms, tree spec_args, tree args)
{
int i, ntparms = TREE_VEC_LENGTH (tparms);
tree deduced_args;
@@ -18414,8 +18425,8 @@ get_class_bindings (tree main_tmpl, tree tparms, tree spec_args, tree args)
`T' is `A' but unify () does not check whether `typename T::X'
is `int'. */
spec_args = tsubst (spec_args, deduced_args, tf_none, NULL_TREE);
- spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (main_tmpl),
- spec_args, main_tmpl,
+ spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl),
+ spec_args, tmpl,
tf_none, false, false);
if (spec_args == error_mark_node
/* We only need to check the innermost arguments; the other
@@ -18563,30 +18574,30 @@ most_general_template (tree decl)
}
/* Return the most specialized of the class template partial
- specializations of TMPL which can produce TYPE, a specialization of
- TMPL. The value returned is actually a TREE_LIST; the TREE_TYPE is
+ specializations which can produce TYPE, a specialization of some class
+ template. The value returned is actually a TREE_LIST; the TREE_TYPE is
a _TYPE node corresponding to the partial specialization, while the
TREE_PURPOSE is the set of template arguments that must be
substituted into the TREE_TYPE in order to generate TYPE.
If the choice of partial specialization is ambiguous, a diagnostic
is issued, and the error_mark_node is returned. If there are no
- partial specializations of TMPL matching TYPE, then NULL_TREE is
- returned. */
+ partial specializations matching TYPE, then NULL_TREE is
+ returned, indicating that the primary template should be used. */
static tree
-most_specialized_class (tree type, tree tmpl, tsubst_flags_t complain)
+most_specialized_class (tree type, tsubst_flags_t complain)
{
tree list = NULL_TREE;
tree t;
tree champ;
int fate;
bool ambiguous_p;
- tree args;
tree outer_args = NULL_TREE;
- tmpl = most_general_template (tmpl);
- args = CLASSTYPE_TI_ARGS (type);
+ tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
+ tree main_tmpl = most_general_template (tmpl);
+ tree args = CLASSTYPE_TI_ARGS (type);
/* For determining which partial specialization to use, only the
innermost args are interesting. */
@@ -18596,7 +18607,7 @@ most_specialized_class (tree type, tree tmpl, tsubst_flags_t complain)
args = INNERMOST_TEMPLATE_ARGS (args);
}
- for (t = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); t; t = TREE_CHAIN (t))
+ for (t = DECL_TEMPLATE_SPECIALIZATIONS (main_tmpl); t; t = TREE_CHAIN (t))
{
tree partial_spec_args;
tree spec_args;
@@ -18621,8 +18632,7 @@ most_specialized_class (tree type, tree tmpl, tsubst_flags_t complain)
partial_spec_args =
coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl),
- add_to_template_args (outer_args,
- partial_spec_args),
+ partial_spec_args,
tmpl, tf_none,
/*require_all_args=*/true,
/*use_default_args=*/true);
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 7c1b18e11f8..63f50fb4705 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -10429,6 +10429,8 @@ potential_constant_expression_1 (tree t, bool want_rval, tsubst_flags_t flags)
return false;
return true;
+ case CILK_SYNC_STMT:
+ case CILK_SPAWN_STMT:
case ARRAY_NOTATION_REF:
return false;
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 9f9f7b6775b..01afbac7350 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -1562,7 +1562,7 @@ cxx_sizeof_or_alignof_type (tree type, enum tree_code op, bool complain)
}
return c_sizeof_or_alignof_type (input_location, complete_type (type),
- op == SIZEOF_EXPR,
+ op == SIZEOF_EXPR, false,
complain);
}
@@ -6166,6 +6166,17 @@ cp_build_compound_expr (tree lhs, tree rhs, tsubst_flags_t complain)
if (lhs == error_mark_node || rhs == error_mark_node)
return error_mark_node;
+ if (flag_enable_cilkplus
+ && (TREE_CODE (lhs) == CILK_SPAWN_STMT
+ || TREE_CODE (rhs) == CILK_SPAWN_STMT))
+ {
+ location_t loc = (EXPR_HAS_LOCATION (lhs) ? EXPR_LOCATION (lhs)
+ : EXPR_LOCATION (rhs));
+ error_at (loc,
+ "spawned function call cannot be part of a comma expression");
+ return error_mark_node;
+ }
+
if (TREE_CODE (rhs) == TARGET_EXPR)
{
/* If the rhs is a TARGET_EXPR, then build the compound
@@ -8290,6 +8301,13 @@ check_return_expr (tree retval, bool *no_warning)
*no_warning = false;
+ if (flag_enable_cilkplus && retval && TREE_CODE (retval) == CILK_SPAWN_STMT)
+ {
+ error_at (EXPR_LOCATION (retval), "use of %<_Cilk_spawn%> in a return "
+ "statement is not allowed");
+ return NULL_TREE;
+ }
+
/* A `volatile' function is one that isn't supposed to return, ever.
(This is a G++ extension, used to get better code for functions
that call the `volatile' function.) */
diff --git a/gcc/cp/vtable-class-hierarchy.c b/gcc/cp/vtable-class-hierarchy.c
index b6637248951..4eb78eee4be 100644
--- a/gcc/cp/vtable-class-hierarchy.c
+++ b/gcc/cp/vtable-class-hierarchy.c
@@ -258,6 +258,7 @@ init_functions (void)
DECL_ATTRIBUTES (vlt_register_set_fndecl) =
tree_cons (get_identifier ("leaf"), NULL,
DECL_ATTRIBUTES (vlt_register_set_fndecl));
+ DECL_EXTERNAL(vlt_register_set_fndecl) = 1;
TREE_PUBLIC (vlt_register_set_fndecl) = 1;
DECL_PRESERVE_P (vlt_register_set_fndecl) = 1;
SET_DECL_LANGUAGE (vlt_register_set_fndecl, lang_cplusplus);
@@ -301,6 +302,7 @@ init_functions (void)
DECL_ATTRIBUTES (vlt_register_pairs_fndecl) =
tree_cons (get_identifier ("leaf"), NULL,
DECL_ATTRIBUTES (vlt_register_pairs_fndecl));
+ DECL_EXTERNAL(vlt_register_pairs_fndecl) = 1;
TREE_PUBLIC (vlt_register_pairs_fndecl) = 1;
DECL_PRESERVE_P (vlt_register_pairs_fndecl) = 1;
SET_DECL_LANGUAGE (vlt_register_pairs_fndecl, lang_cplusplus);
diff --git a/gcc/cprop.c b/gcc/cprop.c
index 9b8bd1e0c4b..7d07246cd0d 100644
--- a/gcc/cprop.c
+++ b/gcc/cprop.c
@@ -400,7 +400,7 @@ compute_hash_table_work (struct hash_table_d *table)
/* Allocate vars to track sets of regs. */
reg_set_bitmap = ALLOC_REG_SET (NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
@@ -595,8 +595,8 @@ compute_local_properties (sbitmap *kill, sbitmap *comp,
unsigned int i;
/* Initialize the bitmaps that were passed in. */
- bitmap_vector_clear (kill, last_basic_block);
- bitmap_vector_clear (comp, last_basic_block);
+ bitmap_vector_clear (kill, last_basic_block_for_fn (cfun));
+ bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
for (i = 0; i < table->size; i++)
{
@@ -649,7 +649,7 @@ compute_cprop_data (void)
aren't recorded for the local pass so they cannot be propagated within
their basic block by this pass and 2) the global pass would otherwise
propagate them only in the successors of their basic block. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int index = implicit_set_indexes[bb->index];
if (index != -1)
@@ -1234,7 +1234,7 @@ local_cprop_pass (void)
unsigned i;
cselib_init (0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
{
@@ -1355,11 +1355,11 @@ find_implicit_sets (void)
rtx cond, new_rtx;
unsigned int count = 0;
bool edges_split = false;
- size_t implicit_sets_size = last_basic_block + 10;
+ size_t implicit_sets_size = last_basic_block_for_fn (cfun) + 10;
implicit_sets = XCNEWVEC (rtx, implicit_sets_size);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Check for more than one successor. */
if (EDGE_COUNT (bb->succs) <= 1)
@@ -1667,7 +1667,7 @@ bypass_conditional_jumps (void)
if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return 0;
- bypass_last_basic_block = last_basic_block;
+ bypass_last_basic_block = last_basic_block_for_fn (cfun);
mark_dfs_back_edges ();
changed = 0;
@@ -1809,8 +1809,8 @@ one_cprop_pass (void)
df_analyze ();
/* Initialize implicit_set_indexes array. */
- implicit_set_indexes = XNEWVEC (int, last_basic_block);
- for (i = 0; i < last_basic_block; i++)
+ implicit_set_indexes = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ for (i = 0; i < last_basic_block_for_fn (cfun); i++)
implicit_set_indexes[i] = -1;
alloc_hash_table (&set_hash_table);
@@ -1827,7 +1827,8 @@ one_cprop_pass (void)
basic_block bb;
rtx insn;
- alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
+ alloc_cprop_mem (last_basic_block_for_fn (cfun),
+ set_hash_table.n_elems);
compute_cprop_data ();
free (implicit_set_indexes);
diff --git a/gcc/cse.c b/gcc/cse.c
index 15e582cd223..5c8bfd0e663 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -6531,7 +6531,7 @@ cse_main (rtx f ATTRIBUTE_UNUSED, int nregs)
{
struct cse_basic_block_data ebb_data;
basic_block bb;
- int *rc_order = XNEWVEC (int, last_basic_block);
+ int *rc_order = XNEWVEC (int, last_basic_block_for_fn (cfun));
int i, n_blocks;
df_set_flags (DF_LR_RUN_DCE);
@@ -6560,7 +6560,7 @@ cse_main (rtx f ATTRIBUTE_UNUSED, int nregs)
reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs);
/* Set up the table of already visited basic blocks. */
- cse_visited_basic_blocks = sbitmap_alloc (last_basic_block);
+ cse_visited_basic_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (cse_visited_basic_blocks);
/* Loop over basic blocks in reverse completion order (RPO),
@@ -6573,7 +6573,7 @@ cse_main (rtx f ATTRIBUTE_UNUSED, int nregs)
processed before. */
do
{
- bb = BASIC_BLOCK (rc_order[i++]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, rc_order[i++]);
}
while (bitmap_bit_p (cse_visited_basic_blocks, bb->index)
&& i < n_blocks);
@@ -7344,7 +7344,7 @@ cse_condition_code_reg (void)
else
cc_reg_2 = NULL_RTX;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx last_insn;
rtx cc_reg;
diff --git a/gcc/cselib.h b/gcc/cselib.h
index e1224405d7f..541db8eaba4 100644
--- a/gcc/cselib.h
+++ b/gcc/cselib.h
@@ -18,7 +18,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Describe a value. */
-typedef struct cselib_val_struct {
+struct cselib_val {
/* The hash value. */
unsigned int hash;
@@ -36,8 +36,8 @@ typedef struct cselib_val_struct {
use it as an address in a MEM. */
struct elt_list *addr_list;
- struct cselib_val_struct *next_containing_mem;
-} cselib_val;
+ struct cselib_val *next_containing_mem;
+};
/* A list of rtl expressions that hold the same value. */
struct elt_loc_list {
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index 5da1e0d0e8a..d80c5a41a3c 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -2429,7 +2429,7 @@ dbxout_expand_expr (tree expr)
/* If this is a var that might not be actually output,
return NULL, otherwise stabs might reference an undefined
symbol. */
- struct varpool_node *node = varpool_get_node (expr);
+ varpool_node *node = varpool_get_node (expr);
if (!node || !node->definition)
return NULL;
}
diff --git a/gcc/dce.c b/gcc/dce.c
index 5c11cbeef0d..7e8278faaf6 100644
--- a/gcc/dce.c
+++ b/gcc/dce.c
@@ -511,7 +511,7 @@ reset_unmarked_insns_debug_uses (void)
basic_block bb;
rtx insn, next;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
FOR_BB_INSNS_REVERSE_SAFE (bb, insn, next)
if (DEBUG_INSN_P (insn))
{
@@ -550,7 +550,7 @@ delete_unmarked_insns (void)
rtx insn, next;
bool must_clean = false;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
FOR_BB_INSNS_REVERSE_SAFE (bb, insn, next)
if (NONDEBUG_INSN_P (insn))
{
@@ -623,7 +623,7 @@ prescan_insns_for_dce (bool fast)
if (!df_in_progress && ACCUMULATE_OUTGOING_ARGS)
arg_stores = BITMAP_ALLOC (NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_REVERSE_SAFE (bb, insn, prev)
if (NONDEBUG_INSN_P (insn))
@@ -663,7 +663,7 @@ mark_artificial_uses (void)
struct df_link *defs;
df_ref *use_rec;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
for (use_rec = df_get_artificial_uses (bb->index);
*use_rec; use_rec++)
@@ -1065,7 +1065,7 @@ fast_dce (bool word_level)
for (i = 0; i < n_blocks; i++)
{
int index = postorder[i];
- basic_block bb = BASIC_BLOCK (index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, index);
bool local_changed;
if (index < NUM_FIXED_BLOCKS)
diff --git a/gcc/ddg.h b/gcc/ddg.h
index e16c8c9d773..739ff030d27 100644
--- a/gcc/ddg.h
+++ b/gcc/ddg.h
@@ -33,9 +33,8 @@ typedef struct ddg *ddg_ptr;
typedef struct ddg_scc *ddg_scc_ptr;
typedef struct ddg_all_sccs *ddg_all_sccs_ptr;
-typedef enum {TRUE_DEP, OUTPUT_DEP, ANTI_DEP} dep_type;
-typedef enum {REG_OR_MEM_DEP, REG_DEP, MEM_DEP, REG_AND_MEM_DEP}
- dep_data_type;
+enum dep_type {TRUE_DEP, OUTPUT_DEP, ANTI_DEP};
+enum dep_data_type {REG_OR_MEM_DEP, REG_DEP, MEM_DEP, REG_AND_MEM_DEP};
/* The following two macros enables direct access to the successors and
predecessors bitmaps held in each ddg_node. Do not make changes to
diff --git a/gcc/defaults.h b/gcc/defaults.h
index 92da5a95bda..3d3b2066c9a 100644
--- a/gcc/defaults.h
+++ b/gcc/defaults.h
@@ -471,10 +471,6 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
your target, you should override these values by defining the
appropriate symbols in your tm.h file. */
-#ifndef BITS_PER_UNIT
-#define BITS_PER_UNIT 8
-#endif
-
#if BITS_PER_UNIT == 8
#define LOG2_BITS_PER_UNIT 3
#elif BITS_PER_UNIT == 16
diff --git a/gcc/df-core.c b/gcc/df-core.c
index 37876af7b01..045b54f4b82 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -520,7 +520,7 @@ df_set_blocks (bitmap blocks)
EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
if (bb)
{
void *bb_info = df_get_bb_info (dflow, bb_index);
@@ -549,7 +549,7 @@ df_set_blocks (bitmap blocks)
{
basic_block bb;
bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_set_bit (&blocks_to_reset, bb->index);
}
@@ -721,8 +721,8 @@ rest_of_handle_df_initialize (void)
if (optimize > 1)
df_live_add_problem ();
- df->postorder = XNEWVEC (int, last_basic_block);
- df->postorder_inverted = XNEWVEC (int, last_basic_block);
+ df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
df->n_blocks = post_order_compute (df->postorder, true, true);
df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
gcc_assert (df->n_blocks == df->n_blocks_inverted);
@@ -933,7 +933,7 @@ df_worklist_propagate_forward (struct dataflow *dataflow,
{
edge e;
edge_iterator ei;
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
bool changed = !age;
/* Calculate <conf_op> of incoming edges. */
@@ -978,7 +978,7 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
{
edge e;
edge_iterator ei;
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
bool changed = !age;
/* Calculate <conf_op> of incoming edges. */
@@ -1067,7 +1067,7 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
bitmap_clear_bit (pending, index);
bb_index = blocks_in_postorder[index];
- bb = BASIC_BLOCK (bb_index);
+ bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
prev_age = last_visit_age[index];
if (dir == DF_FORWARD)
changed = df_worklist_propagate_forward (dataflow, bb_index,
@@ -1086,7 +1086,7 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
bitmap_clear (worklist);
}
for (i = 0; i < n_blocks; i++)
- BASIC_BLOCK (blocks_in_postorder[i])->aux = NULL;
+ BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
BITMAP_FREE (worklist);
BITMAP_FREE (pending);
@@ -1115,7 +1115,7 @@ df_worklist_dataflow (struct dataflow *dataflow,
int n_blocks)
{
bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
- sbitmap considered = sbitmap_alloc (last_basic_block);
+ sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_iterator bi;
unsigned int *bbindex_to_postorder;
int i;
@@ -1125,11 +1125,12 @@ df_worklist_dataflow (struct dataflow *dataflow,
gcc_assert (dir != DF_NONE);
/* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
- bbindex_to_postorder = XNEWVEC (unsigned int, last_basic_block);
+ bbindex_to_postorder = XNEWVEC (unsigned int,
+ last_basic_block_for_fn (cfun));
/* Initialize the array to an out-of-bound value. */
- for (i = 0; i < last_basic_block; i++)
- bbindex_to_postorder[i] = last_basic_block;
+ for (i = 0; i < last_basic_block_for_fn (cfun); i++)
+ bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
/* Initialize the considered map. */
bitmap_clear (considered);
@@ -1236,8 +1237,8 @@ df_analyze (void)
free (df->postorder);
free (df->postorder_inverted);
- df->postorder = XNEWVEC (int, last_basic_block);
- df->postorder_inverted = XNEWVEC (int, last_basic_block);
+ df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
df->n_blocks = post_order_compute (df->postorder, true, true);
df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
@@ -1481,7 +1482,7 @@ df_set_bb_dirty (basic_block bb)
void
df_grow_bb_info (struct dataflow *dflow)
{
- unsigned int new_size = last_basic_block + 1;
+ unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
if (dflow->block_info_size < new_size)
{
new_size += new_size / 4;
@@ -1542,7 +1543,7 @@ df_compact_blocks (void)
bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (&tmp, bb->index))
bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
@@ -1553,7 +1554,8 @@ df_compact_blocks (void)
/* Now shuffle the block info for the problem. */
if (dflow->problem->free_bb_fun)
{
- int size = last_basic_block * dflow->problem->block_info_elt_size;
+ int size = (last_basic_block_for_fn (cfun)
+ * dflow->problem->block_info_elt_size);
problem_temps = XNEWVAR (char, size);
df_grow_bb_info (dflow);
memcpy (problem_temps, dflow->block_info, size);
@@ -1562,7 +1564,7 @@ df_compact_blocks (void)
place in the block_info vector. Null out the copied
item. The entry and exit blocks never move. */
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
df_set_bb_info (dflow, i,
(char *)problem_temps
@@ -1571,7 +1573,7 @@ df_compact_blocks (void)
}
memset ((char *)dflow->block_info
+ i * dflow->problem->block_info_elt_size, 0,
- (last_basic_block - i)
+ (last_basic_block_for_fn (cfun) - i)
* dflow->problem->block_info_elt_size);
free (problem_temps);
}
@@ -1588,7 +1590,7 @@ df_compact_blocks (void)
bitmap_copy (&tmp, df->blocks_to_analyze);
bitmap_clear (df->blocks_to_analyze);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (&tmp, bb->index))
bitmap_set_bit (df->blocks_to_analyze, i);
@@ -1599,17 +1601,17 @@ df_compact_blocks (void)
bitmap_clear (&tmp);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- SET_BASIC_BLOCK (i, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
bb->index = i;
i++;
}
gcc_assert (i == n_basic_blocks_for_fn (cfun));
- for (; i < last_basic_block; i++)
- SET_BASIC_BLOCK (i, NULL);
+ for (; i < last_basic_block_for_fn (cfun); i++)
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
#ifdef DF_DEBUG_CFG
if (!df_lr->solutions_dirty)
@@ -1631,7 +1633,7 @@ df_bb_replace (int old_index, basic_block new_block)
fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
gcc_assert (df);
- gcc_assert (BASIC_BLOCK (old_index) == NULL);
+ gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
for (p = 0; p < df->num_problems_defined; p++)
{
@@ -1645,10 +1647,10 @@ df_bb_replace (int old_index, basic_block new_block)
}
df_clear_bb_dirty (new_block);
- SET_BASIC_BLOCK (old_index, new_block);
+ SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
new_block->index = old_index;
- df_set_bb_dirty (BASIC_BLOCK (old_index));
- SET_BASIC_BLOCK (new_block_index, NULL);
+ df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
+ SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
}
@@ -1659,7 +1661,7 @@ df_bb_replace (int old_index, basic_block new_block)
void
df_bb_delete (int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
int i;
if (!df)
@@ -1718,7 +1720,7 @@ df_compute_cfg_image (void)
int i;
int * map;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
size += EDGE_COUNT (bb->succs);
}
@@ -1726,7 +1728,7 @@ df_compute_cfg_image (void)
map = XNEWVEC (int, size);
map[0] = size;
i = 1;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
edge_iterator ei;
edge e;
@@ -2019,7 +2021,7 @@ df_dump (FILE *file)
basic_block bb;
df_dump_start (file);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
df_print_bb_index (bb, file);
df_dump_top (bb, file);
@@ -2045,7 +2047,7 @@ df_dump_region (FILE *file)
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
dump_bb (file, bb, 0, TDF_DETAILS);
}
fprintf (file, "\n");
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index c6349c8b0a5..4b926b6ee74 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -353,7 +353,7 @@ df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
static void
df_rd_bb_local_compute (unsigned int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
rtx insn;
@@ -835,7 +835,7 @@ df_lr_reset (bitmap all_blocks)
static void
df_lr_bb_local_compute (unsigned int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
rtx insn;
df_ref *def_rec;
@@ -1173,10 +1173,10 @@ df_lr_verify_solution_start (void)
df_lr->solutions_dirty = true;
problem_data = (struct df_lr_problem_data *)df_lr->problem_data;
- problem_data->in = XNEWVEC (bitmap_head, last_basic_block);
- problem_data->out = XNEWVEC (bitmap_head, last_basic_block);
+ problem_data->in = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ problem_data->out = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_initialize (&problem_data->in[bb->index], &problem_data->lr_bitmaps);
bitmap_initialize (&problem_data->out[bb->index], &problem_data->lr_bitmaps);
@@ -1205,7 +1205,7 @@ df_lr_verify_solution_end (void)
in df_lr_finalize for details. */
df_lr->solutions_dirty = false;
else
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
if ((!bitmap_equal_p (&problem_data->in[bb->index], DF_LR_IN (bb)))
|| (!bitmap_equal_p (&problem_data->out[bb->index], DF_LR_OUT (bb))))
@@ -1217,7 +1217,7 @@ df_lr_verify_solution_end (void)
/* Cannot delete them immediately because you may want to dump them
if the comparison fails. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_clear (&problem_data->in[bb->index]);
bitmap_clear (&problem_data->out[bb->index]);
@@ -1294,7 +1294,7 @@ df_lr_verify_transfer_functions (void)
bitmap_initialize (&saved_use, &bitmap_default_obstack);
bitmap_initialize (&all_blocks, &bitmap_default_obstack);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
bitmap_set_bit (&all_blocks, bb->index);
@@ -1462,7 +1462,7 @@ df_live_reset (bitmap all_blocks)
static void
df_live_bb_local_compute (unsigned int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
rtx insn;
df_ref *def_rec;
@@ -1710,10 +1710,10 @@ df_live_verify_solution_start (void)
df_live->solutions_dirty = true;
problem_data = (struct df_live_problem_data *)df_live->problem_data;
- problem_data->in = XNEWVEC (bitmap_head, last_basic_block);
- problem_data->out = XNEWVEC (bitmap_head, last_basic_block);
+ problem_data->in = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ problem_data->out = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_initialize (&problem_data->in[bb->index], &problem_data->live_bitmaps);
bitmap_initialize (&problem_data->out[bb->index], &problem_data->live_bitmaps);
@@ -1736,7 +1736,7 @@ df_live_verify_solution_end (void)
if (!problem_data->out)
return;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
if ((!bitmap_equal_p (&problem_data->in[bb->index], DF_LIVE_IN (bb)))
|| (!bitmap_equal_p (&problem_data->out[bb->index], DF_LIVE_OUT (bb))))
@@ -1748,7 +1748,7 @@ df_live_verify_solution_end (void)
/* Cannot delete them immediately because you may want to dump them
if the comparison fails. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_clear (&problem_data->in[bb->index]);
bitmap_clear (&problem_data->out[bb->index]);
@@ -1814,7 +1814,7 @@ void
df_live_set_all_dirty (void)
{
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bitmap_set_bit (df_live->out_of_date_transfer_functions,
bb->index);
}
@@ -1840,7 +1840,7 @@ df_live_verify_transfer_functions (void)
df_grow_insn_info ();
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
bitmap_set_bit (&all_blocks, bb->index);
@@ -1987,7 +1987,7 @@ df_chain_remove_problem (void)
rtx insn;
df_ref *def_rec;
df_ref *use_rec;
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
if (df_chain_problem_p (DF_DU_CHAIN))
for (def_rec = df_get_artificial_defs (bb->index); *def_rec; def_rec++)
@@ -2105,7 +2105,7 @@ df_chain_create_bb_process_use (bitmap local_rd,
static void
df_chain_create_bb (unsigned int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
rtx insn;
bitmap_head cpy;
@@ -2427,7 +2427,7 @@ df_word_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
bitmap_obstack_initialize (&problem_data->word_lr_bitmaps);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_set_bit (df_word_lr->out_of_date_transfer_functions, bb->index);
bitmap_set_bit (df_word_lr->out_of_date_transfer_functions, ENTRY_BLOCK);
@@ -2531,7 +2531,7 @@ df_word_lr_mark_ref (df_ref ref, bool is_set, regset live)
static void
df_word_lr_bb_local_compute (unsigned int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
rtx insn;
df_ref *def_rec;
@@ -3154,7 +3154,7 @@ static void
df_note_bb_compute (unsigned int bb_index,
bitmap live, bitmap do_not_gen, bitmap artificial_uses)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
rtx insn;
df_ref *def_rec;
df_ref *use_rec;
@@ -4271,7 +4271,7 @@ df_md_bb_local_compute_process_def (struct df_md_bb_info *bb_info,
static void
df_md_bb_local_compute (unsigned int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
rtx insn;
@@ -4315,8 +4315,8 @@ df_md_local_compute (bitmap all_blocks)
bitmap_clear (&seen_in_insn);
- frontiers = XNEWVEC (bitmap_head, last_basic_block);
- FOR_ALL_BB (bb)
+ frontiers = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ FOR_ALL_BB_FN (bb, cfun)
bitmap_initialize (&frontiers[bb->index], &bitmap_default_obstack);
compute_dominance_frontiers (frontiers);
@@ -4327,14 +4327,14 @@ df_md_local_compute (bitmap all_blocks)
bitmap kill = &df_md_get_bb_info (bb_index)->kill;
EXECUTE_IF_SET_IN_BITMAP (&frontiers[bb_index], 0, df_bb_index, bi2)
{
- basic_block bb = BASIC_BLOCK (df_bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, df_bb_index);
if (bitmap_bit_p (all_blocks, df_bb_index))
bitmap_ior_and_into (&df_md_get_bb_info (df_bb_index)->init, kill,
df_get_live_in (bb));
}
}
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bitmap_clear (&frontiers[bb->index]);
free (frontiers);
}
@@ -4360,7 +4360,7 @@ df_md_reset (bitmap all_blocks)
static bool
df_md_transfer_function (int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index eb7e4d47e0c..a35b12fbebb 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -213,7 +213,7 @@ df_scan_free_internal (void)
}
}
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
unsigned int bb_index = bb->index;
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb_index);
@@ -355,7 +355,7 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
df_grow_insn_info ();
df_grow_bb_info (df_scan);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
unsigned int bb_index = bb->index;
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb_index);
@@ -449,7 +449,7 @@ df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED)
fprintf (file, "} ");
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
@@ -669,11 +669,11 @@ df_scan_blocks (void)
df_record_entry_block_defs (df->entry_block_defs);
df_get_exit_block_use_set (df->exit_block_uses);
df_record_exit_block_uses (df->exit_block_uses);
- df_set_bb_dirty (BASIC_BLOCK (ENTRY_BLOCK));
- df_set_bb_dirty (BASIC_BLOCK (EXIT_BLOCK));
+ df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK));
+ df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK));
/* Regular blocks */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
unsigned int bb_index = bb->index;
df_bb_refs_record (bb_index, true);
@@ -1415,7 +1415,7 @@ df_insn_rescan_all (void)
bitmap_clear (&df->insns_to_rescan);
bitmap_clear (&df->insns_to_notes_rescan);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
@@ -1637,7 +1637,7 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
rtx insn;
df_ref *ref_rec;
@@ -1691,7 +1691,7 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
rtx insn;
df_ref *ref_rec;
@@ -1876,7 +1876,9 @@ df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, index, bi)
{
- offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK (index), offset, ref_info,
+ offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK_FOR_FN (cfun,
+ index),
+ offset, ref_info,
include_defs, include_uses,
include_eq_uses);
}
@@ -1885,7 +1887,7 @@ df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
}
else
{
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
offset = df_reorganize_refs_by_insn_bb (bb, offset, ref_info,
include_defs, include_uses,
include_eq_uses);
@@ -3616,7 +3618,7 @@ df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
void
df_bb_refs_record (int bb_index, bool scan_insns)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
rtx insn;
int luid = 0;
@@ -3890,7 +3892,9 @@ df_record_entry_block_defs (bitmap entry_block_defs)
df_entry_block_defs_collect (&collection_rec, entry_block_defs);
/* Process bb_refs chain */
- df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (ENTRY_BLOCK), NULL,
+ df_refs_add_to_chains (&collection_rec,
+ BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK),
+ NULL,
copy_defs);
}
@@ -3929,7 +3933,7 @@ df_update_entry_block_defs (void)
{
df_record_entry_block_defs (&refs);
bitmap_copy (df->entry_block_defs, &refs);
- df_set_bb_dirty (BASIC_BLOCK (ENTRY_BLOCK));
+ df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK));
}
bitmap_clear (&refs);
}
@@ -4061,7 +4065,9 @@ df_record_exit_block_uses (bitmap exit_block_uses)
df_exit_block_uses_collect (&collection_rec, exit_block_uses);
/* Process bb_refs chain */
- df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (EXIT_BLOCK), NULL,
+ df_refs_add_to_chains (&collection_rec,
+ BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK),
+ NULL,
copy_uses);
}
@@ -4100,7 +4106,7 @@ df_update_exit_block_uses (void)
{
df_record_exit_block_uses (&refs);
bitmap_copy (df->exit_block_uses,& refs);
- df_set_bb_dirty (BASIC_BLOCK (EXIT_BLOCK));
+ df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK));
}
bitmap_clear (&refs);
}
@@ -4148,7 +4154,7 @@ df_update_entry_exit_and_calls (void)
/* The call insns need to be rescanned because there may be changes
in the set of registers clobbered across the call. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
@@ -4563,7 +4569,7 @@ df_scan_verify (void)
clear a mark that has not been set as this means that the ref in
the block or insn was not in the reg chain. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
df_bb_verify (bb);
/* (4) See if all reg chains are traversed a second time. This time
diff --git a/gcc/df.h b/gcc/df.h
index e3ca67b6841..579712ca245 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -176,7 +176,7 @@ enum df_ref_order
DF_REF_ORDER_BY_REG_WITH_NOTES,
/* Organize the refs in insn order. The insns are ordered within a
- block, and the blocks are ordered by FOR_ALL_BB. */
+ block, and the blocks are ordered by FOR_ALL_BB_FN. */
DF_REF_ORDER_BY_INSN,
/* For uses, the refs within eq notes may be added for
diff --git a/gcc/diagnostic.h b/gcc/diagnostic.h
index cb38d370cee..49cb8c00b0e 100644
--- a/gcc/diagnostic.h
+++ b/gcc/diagnostic.h
@@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
/* A diagnostic is described by the MESSAGE to send, the FILE and LINE of
its context and its KIND (ice, error, warning, note, ...) See complete
list in diagnostic.def. */
-typedef struct diagnostic_info
+struct diagnostic_info
{
text_info message;
location_t location;
@@ -38,17 +38,17 @@ typedef struct diagnostic_info
diagnostic_t kind;
/* Which OPT_* directly controls this diagnostic. */
int option_index;
-} diagnostic_info;
+};
/* Each time a diagnostic's classification is changed with a pragma,
we record the change and the location of the change in an array of
these structs. */
-typedef struct diagnostic_classification_change_t
+struct diagnostic_classification_change_t
{
location_t location;
int option;
diagnostic_t kind;
-} diagnostic_classification_change_t;
+};
/* Forward declarations. */
typedef void (*diagnostic_starter_fn) (diagnostic_context *,
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index da2c63ef00b..af258d72faf 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -2919,6 +2919,13 @@ upon exit. Reentrant functions cannot also have the @code{naked}
or @code{critical} attributes. They can have the @code{interrupt}
attribute.
+@item wakeup
+@cindex @code{wakeup} attribute
+This attribute only applies to interrupt functions. It is silently
+ignored if applied to a non-interrupt function. A wakeup interrupt
+function will rouse the processor from any low-power state that it
+might be in when the function exits.
+
@end table
On Epiphany targets one or more optional parameters can be added like this:
diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi
index a8f9f8a7980..71aa7fc9866 100644
--- a/gcc/doc/install.texi
+++ b/gcc/doc/install.texi
@@ -255,6 +255,22 @@ may need to use @option{--disable-stage1-checking}, though
bootstrapping the compiler with such earlier compilers is strongly
discouraged.
+@item C standard library and headers
+
+In order to build GCC, the C standard library and headers must be present
+for all target variants for which target libraries will be built (and not
+only the variant of the host C++ compiler).
+
+This affects the popular @samp{x86_64-unknown-linux-gnu} platform (among
+other multilib targets), for which 64-bit (@samp{x86_64}) and 32-bit
+(@samp{i386}) libc headers are usually packaged separately. If you do a
+build of a native compiler on @samp{x86_64-unknown-linux-gnu}, make sure you
+either have the 32-bit libc developer package properly installed (the exact
+name of the package depends on your distro) or you must build GCC as a
+64-bit only compiler by configuring with the option
+@option{--disable-multilib}. Otherwise, you may encounter an error such as
+@samp{fatal error: gnu/stubs-32.h: No such file}
+
@item GNAT
In order to build the Ada compiler (GNAT) you must already have GNAT
@@ -3735,6 +3751,15 @@ removed and the system libunwind library will always be used.
@html
<hr />
+@end html
+@anchor{aarch64-x-x}
+@heading aarch64*-*-*
+Pre 2.24 binutils does not have support for selecting -mabi and does not
+support ILP32. If GCC 4.9 or later is built with pre 2.24, GCC will not
+support option -mabi=ilp32.
+
+@html
+<hr />
<!-- rs6000-ibm-aix*, powerpc-ibm-aix* -->
@end html
@anchor{x-ibm-aix}
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index b30e889764d..b655a6411b1 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -378,7 +378,7 @@ Objective-C and Objective-C++ Dialects}.
-fira-region=@var{region} -fira-hoist-pressure @gol
-fira-loop-pressure -fno-ira-share-save-slots @gol
-fno-ira-share-spill-slots -fira-verbose=@var{n} @gol
--fisolate-erroneous-paths
+-fisolate-erroneous-paths-dereference -fisolate-erroneous-paths-attribute
-fivopts -fkeep-inline-functions -fkeep-static-consts -flive-range-shrinkage @gol
-floop-block -floop-interchange -floop-strip-mine -floop-nest-optimize @gol
-floop-parallelize-all -flto -flto-compression-level @gol
@@ -959,7 +959,7 @@ See RS/6000 and PowerPC Options.
-mindexed-addressing -mgettrcost=@var{number} -mpt-fixed @gol
-maccumulate-outgoing-args -minvalid-symbols @gol
-matomic-model=@var{atomic-model} @gol
--mbranch-cost=@var{num} -mzdcbranch -mno-zdcbranch -mcbranchdi -mcmpeqdi @gol
+-mbranch-cost=@var{num} -mzdcbranch -mno-zdcbranch @gol
-mfused-madd -mno-fused-madd -mfsca -mno-fsca -mfsrra -mno-fsrra @gol
-mpretend-cmove -mtas}
@@ -5363,6 +5363,19 @@ built with this option turned on will issue an error message
when the end of a non-void function is reached without actually
returning a value. This option works in C++ only.
+@item -fsanitize=signed-integer-overflow
+@opindex fsanitize=signed-integer-overflow
+
+This option enables signed integer overflow checking. We check that
+the result of @code{+}, @code{*}, and both unary and binary @code{-}
+does not overflow in the signed arithmetics. Note, integer promotion
+rules must be taken into account. That is, the following is not an
+overflow:
+@smallexample
+signed char a = SCHAR_MAX;
+a++;
+@end smallexample
+
@end table
While @option{-ftrapv} causes traps for signed overflows to be emitted,
@@ -6848,7 +6861,7 @@ also turns on the following optimization flags:
-finline-small-functions @gol
-findirect-inlining @gol
-fipa-sra @gol
--fisolate-erroneous-paths @gol
+-fisolate-erroneous-paths-dereference @gol
-foptimize-sibling-calls @gol
-fpartial-inlining @gol
-fpeephole2 @gol
@@ -7742,10 +7755,17 @@ it may significantly increase code size
(see @option{--param ipcp-unit-growth=@var{value}}).
This flag is enabled by default at @option{-O3}.
-@item -fisolate-erroneous-paths
-Detect paths which trigger erroneous or undefined behaviour. Isolate those
-paths from the main control flow and turn the statement with erroneous or
-undefined behaviour into a trap.
+@item -fisolate-erroneous-paths-dereference
+Detect paths which trigger erroneous or undefined behaviour due to
+dereferencing a NULL pointer. Isolate those paths from the main control
+flow and turn the statement with erroneous or undefined behaviour into a trap.
+
+@item -fisolate-erroneous-paths-attribute
+Detect paths which trigger erroneous or undefined behaviour due a NULL value
+being used in a way which is forbidden by a @code{returns_nonnull} or @code{nonnull}
+attribute. Isolate those paths from the main control flow and turn the
+statement with erroneous or undefined behaviour into a trap. This is not
+currently enabled, but may be enabled by @code{-O2} in the future.
@item -ftree-sink
@opindex ftree-sink
@@ -12136,9 +12156,10 @@ assembly code. Permissible names are: @samp{arm2}, @samp{arm250},
@samp{arm10e}, @samp{arm1020e}, @samp{arm1022e},
@samp{arm1136j-s}, @samp{arm1136jf-s}, @samp{mpcore}, @samp{mpcorenovfp},
@samp{arm1156t2-s}, @samp{arm1156t2f-s}, @samp{arm1176jz-s}, @samp{arm1176jzf-s},
-@samp{cortex-a5}, @samp{cortex-a7}, @samp{cortex-a8}, @samp{cortex-a9},
-@samp{cortex-a15}, @samp{cortex-a53}, @samp{cortex-r4}, @samp{cortex-r4f},
-@samp{cortex-r5}, @samp{cortex-r7}, @samp{cortex-m4}, @samp{cortex-m3},
+@samp{cortex-a5}, @samp{cortex-a7}, @samp{cortex-a8}, @samp{cortex-a9},
+@samp{cortex-a12}, @samp{cortex-a15}, @samp{cortex-a53}, @samp{cortex-r4},
+@samp{cortex-r4f}, @samp{cortex-r5}, @samp{cortex-r7}, @samp{cortex-m4},
+@samp{cortex-m3},
@samp{cortex-m1},
@samp{cortex-m0},
@samp{cortex-m0plus},
@@ -14736,7 +14757,7 @@ then @option{-mtune=pentium4} generates code that is tuned for Pentium 4
but still runs on i686 machines.
The choices for @var{cpu-type} are the same as for @option{-march}.
-In addition, @option{-mtune} supports an extra choice for @var{cpu-type}:
+In addition, @option{-mtune} supports 2 extra choices for @var{cpu-type}:
@table @samp
@item generic
@@ -14757,6 +14778,26 @@ indicates the instruction set the compiler can use, and there is no
generic instruction set applicable to all processors. In contrast,
@option{-mtune} indicates the processor (or, in this case, collection of
processors) for which the code is optimized.
+
+@item intel
+Produce code optimized for the most current Intel processors, which are
+Haswell and Silvermont for this version of GCC. If you know the CPU
+on which your code will run, then you should use the corresponding
+@option{-mtune} or @option{-march} option instead of @option{-mtune=intel}.
+But, if you want your application performs better on both Haswell and
+Silvermont, then you should use this option.
+
+As new Intel processors are deployed in the marketplace, the behavior of
+this option will change. Therefore, if you upgrade to a newer version of
+GCC, code generation controlled by this option will change to reflect
+the most current Intel processors at the time that version of GCC is
+released.
+
+There is no @option{-march=intel} option because @option{-march} indicates
+the instruction set the compiler can use, and there is no common
+instruction set applicable to all processors. In contrast,
+@option{-mtune} indicates the processor (or, in this case, collection of
+processors) for which the code is optimized.
@end table
@item -mcpu=@var{cpu-type}
@@ -20212,15 +20253,6 @@ compiler will try to prefer zero displacement branch code sequences. This is
enabled by default when generating code for SH4 and SH4A. It can be explicitly
disabled by specifying @option{-mno-zdcbranch}.
-@item -mcbranchdi
-@opindex mcbranchdi
-Enable the @code{cbranchdi4} instruction pattern.
-
-@item -mcmpeqdi
-@opindex mcmpeqdi
-Emit the @code{cmpeqdi_t} instruction pattern even when @option{-mcbranchdi}
-is in effect.
-
@item -mfused-madd
@itemx -mno-fused-madd
@opindex mfused-madd
@@ -21969,6 +22001,12 @@ instruction, even though that accesses bytes that do not contain
any portion of the bit-field, or memory-mapped registers unrelated to
the one being updated.
+In some cases, such as when the @code{packed} attribute is applied to a
+structure field, it may not be possible to access the field with a single
+read or write that is correctly aligned for the target machine. In this
+case GCC falls back to generating multiple accesses rather than code that
+will fault or truncate the result at run time.
+
The default value of this option is determined by the application binary
interface for the target processor.
diff --git a/gcc/doc/rtl.texi b/gcc/doc/rtl.texi
index a7e98af4a86..995f1086cf5 100644
--- a/gcc/doc/rtl.texi
+++ b/gcc/doc/rtl.texi
@@ -1462,14 +1462,25 @@ Returns the number of units contained in a mode, i.e.,
Returns the narrowest mode in mode class @var{c}.
@end table
-The following 4 variables are defined on every target. They can be
+The following 3 variables are defined on every target. They can be
used to allocate buffers that are guaranteed to be large enough to
-hold any value that can be represented on the target.
+hold any value that can be represented on the target. The first two
+can be overridden by defining them in the target's mode.def file,
+however, the value must be a constant that can determined very early
+in the compilation process. The third symbol cannot be overridden.
@table @code
+@findex BITS_PER_UNIT
+@item BITS_PER_UNIT
+The number of bits in an addressable storage unit (byte). If you do
+not define this, the default is 8.
+
@findex MAX_BITSIZE_MODE_ANY_INT
@item MAX_BITSIZE_MODE_ANY_INT
-The maximum of MAX_BITSIZE_MODE_INT and MAX_BITSIZE_MODE_PARTIAL_INT.
+The maximum bitsize of any mode that is used in integer math. This
+should be overridden by the target if it uses large integers as
+containers for larger vectors but otherwise never uses the contents to
+compute integer values.
@findex MAX_BITSIZE_MODE_ANY_MODE
@item MAX_BITSIZE_MODE_ANY_MODE
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index f3775a95573..4579ad954b4 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -879,11 +879,6 @@ You need not define this macro if the ordering is the same as for
multi-word integers.
@end defmac
-@defmac BITS_PER_UNIT
-Define this macro to be the number of bits in an addressable storage
-unit (byte). If you do not define this macro the default is 8.
-@end defmac
-
@defmac BITS_PER_WORD
Number of bits in a word. If you do not define this macro, the default
is @code{BITS_PER_UNIT * UNITS_PER_WORD}.
@@ -4357,7 +4352,7 @@ with machine mode @var{mode}. The default version of this
hook returns true for both @code{ptr_mode} and @code{Pmode}.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_REF_MAY_ALIAS_ERRNO (struct ao_ref_s *@var{ref})
+@deftypefn {Target Hook} bool TARGET_REF_MAY_ALIAS_ERRNO (struct ao_ref *@var{ref})
Define this to return nonzero if the memory reference @var{ref} may alias with the system C library errno location. The default version of this hook assumes the system C library errno location is either a declaration of type int or accessed by dereferencing a pointer to int.
@end deftypefn
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 3d6a9d03dc1..26223122b11 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -853,11 +853,6 @@ You need not define this macro if the ordering is the same as for
multi-word integers.
@end defmac
-@defmac BITS_PER_UNIT
-Define this macro to be the number of bits in an addressable storage
-unit (byte). If you do not define this macro the default is 8.
-@end defmac
-
@defmac BITS_PER_WORD
Number of bits in a word. If you do not define this macro, the default
is @code{BITS_PER_UNIT * UNITS_PER_WORD}.
diff --git a/gcc/dominance.c b/gcc/dominance.c
index 5ece3f68b94..77f94716cf6 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -159,7 +159,8 @@ init_dom_info (struct dom_info *di, enum cdi_direction dir)
init_ar (di->set_size, unsigned int, num, 1);
init_ar (di->set_child, TBB, num, 0);
- init_ar (di->dfs_order, TBB, (unsigned int) last_basic_block + 1, 0);
+ init_ar (di->dfs_order, TBB,
+ (unsigned int) last_basic_block_for_fn (cfun) + 1, 0);
init_ar (di->dfs_to_bb, basic_block, num, 0);
di->dfsnum = 1;
@@ -296,7 +297,7 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, bool reverse)
if (bb != en_block)
my_i = di->dfs_order[bb->index];
else
- my_i = di->dfs_order[last_basic_block];
+ my_i = di->dfs_order[last_basic_block_for_fn (cfun)];
child_i = di->dfs_order[bn->index] = di->dfsnum++;
di->dfs_to_bb[child_i] = bn;
di->dfs_parent[child_i] = my_i;
@@ -335,7 +336,7 @@ calc_dfs_tree (struct dom_info *di, bool reverse)
/* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */
basic_block begin = (reverse
? EXIT_BLOCK_PTR_FOR_FN (cfun) : ENTRY_BLOCK_PTR_FOR_FN (cfun));
- di->dfs_order[last_basic_block] = di->dfsnum;
+ di->dfs_order[last_basic_block_for_fn (cfun)] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = begin;
di->dfsnum++;
@@ -356,7 +357,7 @@ calc_dfs_tree (struct dom_info *di, bool reverse)
basic_block b;
bool saw_unconnected = false;
- FOR_EACH_BB_REVERSE (b)
+ FOR_EACH_BB_REVERSE_FN (b, cfun)
{
if (EDGE_COUNT (b->succs) > 0)
{
@@ -367,14 +368,15 @@ calc_dfs_tree (struct dom_info *di, bool reverse)
bitmap_set_bit (di->fake_exit_edge, b->index);
di->dfs_order[b->index] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = b;
- di->dfs_parent[di->dfsnum] = di->dfs_order[last_basic_block];
+ di->dfs_parent[di->dfsnum] =
+ di->dfs_order[last_basic_block_for_fn (cfun)];
di->dfsnum++;
calc_dfs_tree_nonrec (di, b, reverse);
}
if (saw_unconnected)
{
- FOR_EACH_BB_REVERSE (b)
+ FOR_EACH_BB_REVERSE_FN (b, cfun)
{
basic_block b2;
if (di->dfs_order[b->index])
@@ -384,7 +386,8 @@ calc_dfs_tree (struct dom_info *di, bool reverse)
bitmap_set_bit (di->fake_exit_edge, b2->index);
di->dfs_order[b2->index] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = b2;
- di->dfs_parent[di->dfsnum] = di->dfs_order[last_basic_block];
+ di->dfs_parent[di->dfsnum] =
+ di->dfs_order[last_basic_block_for_fn (cfun)];
di->dfsnum++;
calc_dfs_tree_nonrec (di, b2, reverse);
gcc_checking_assert (di->dfs_order[b->index]);
@@ -546,7 +549,7 @@ calc_idoms (struct dom_info *di, bool reverse)
if (b == en_block)
{
do_fake_exit_edge:
- k1 = di->dfs_order[last_basic_block];
+ k1 = di->dfs_order[last_basic_block_for_fn (cfun)];
}
else
k1 = di->dfs_order[b->index];
@@ -621,7 +624,7 @@ compute_dom_fast_query (enum cdi_direction dir)
if (dom_computed[dir_index] == DOM_OK)
return;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
if (!bb->dom[dir_index]->father)
assign_dfs_numbers (bb->dom[dir_index], &num);
@@ -649,7 +652,7 @@ calculate_dominance_info (enum cdi_direction dir)
{
gcc_assert (!n_bbs_in_dom_tree[dir_index]);
- FOR_ALL_BB (b)
+ FOR_ALL_BB_FN (b, cfun)
{
b->dom[dir_index] = et_new_tree (b);
}
@@ -659,7 +662,7 @@ calculate_dominance_info (enum cdi_direction dir)
calc_dfs_tree (&di, reverse);
calc_idoms (&di, reverse);
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
TBB d = di.dom[di.dfs_order[b->index]];
@@ -686,7 +689,7 @@ free_dominance_info (enum cdi_direction dir)
if (!dom_info_available_p (dir))
return;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
et_free_tree_force (bb->dom[dir_index]);
bb->dom[dir_index] = NULL;
@@ -884,10 +887,10 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
basic_block dom;
first = bitmap_first_set_bit (blocks);
- dom = BASIC_BLOCK (first);
+ dom = BASIC_BLOCK_FOR_FN (cfun, first);
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
- if (dom != BASIC_BLOCK (i))
- dom = nearest_common_dominator (dir, dom, BASIC_BLOCK (i));
+ if (dom != BASIC_BLOCK_FOR_FN (cfun, i))
+ dom = nearest_common_dominator (dir, dom, BASIC_BLOCK_FOR_FN (cfun, i));
return dom;
}
@@ -1022,7 +1025,7 @@ verify_dominators (enum cdi_direction dir)
calc_dfs_tree (&di, reverse);
calc_idoms (&di, reverse);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
imm_bb = get_immediate_dominator (dir, bb);
if (!imm_bb)
@@ -1489,7 +1492,7 @@ DEBUG_FUNCTION void
debug_dominance_info (enum cdi_direction dir)
{
basic_block bb, bb2;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if ((bb2 = get_immediate_dominator (dir, bb)))
fprintf (stderr, "%i %i\n", bb->index, bb2->index);
}
diff --git a/gcc/domwalk.c b/gcc/domwalk.c
index 3350e4bb510..e84c8f711a0 100644
--- a/gcc/domwalk.c
+++ b/gcc/domwalk.c
@@ -159,7 +159,7 @@ dom_walker::walk (basic_block bb)
{
postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
postorder_num = inverted_post_order_compute (postorder);
- bb_postorder = XNEWVEC (int, last_basic_block);
+ bb_postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
for (int i = 0; i < postorder_num; ++i)
bb_postorder[postorder[i]] = i;
free (postorder);
diff --git a/gcc/dse.c b/gcc/dse.c
index 2d8ce1e4d78..958097d2d1c 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -772,7 +772,7 @@ dse_step0 (void)
rtx_group_table.create (11);
- bb_table = XNEWVEC (bb_info_t, last_basic_block);
+ bb_table = XNEWVEC (bb_info_t, last_basic_block_for_fn (cfun));
rtx_group_next_id = 0;
stores_off_frame_dead_at_return = !cfun->stdarg;
@@ -2708,7 +2708,7 @@ dse_step1 (void)
bitmap_set_bit (all_blocks, ENTRY_BLOCK);
bitmap_set_bit (all_blocks, EXIT_BLOCK);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
insn_info_t ptr;
bb_info_t bb_info = (bb_info_t) pool_alloc (bb_info_pool);
@@ -3283,14 +3283,14 @@ static void
dse_step3 (bool for_spills)
{
basic_block bb;
- sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
sbitmap_iterator sbi;
bitmap all_ones = NULL;
unsigned int i;
bitmap_ones (unreachable_blocks);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bb_info_t bb_info = bb_table[bb->index];
if (bb_info->gen)
@@ -3469,7 +3469,7 @@ dse_step4 (void)
basic_block bb;
fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bb_info_t bb_info = bb_table[bb->index];
@@ -3507,7 +3507,7 @@ static void
dse_step5_nospill (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb_info_t bb_info = bb_table[bb->index];
insn_info_t insn_info = bb_info->last_insn;
@@ -3617,7 +3617,7 @@ dse_step6 (void)
{
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bb_info_t bb_info = bb_table[bb->index];
insn_info_t insn_info = bb_info->last_insn;
diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c
index b8e25bc9964..330836b66e8 100644
--- a/gcc/dwarf2cfi.c
+++ b/gcc/dwarf2cfi.c
@@ -460,9 +460,9 @@ update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
descriptor sequence. */
static void
-get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
+get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
{
- struct dw_loc_descr_struct *ptr;
+ struct dw_loc_descr_node *ptr;
cfa->offset = 0;
cfa->base_offset = 0;
cfa->indirect = 0;
@@ -755,7 +755,7 @@ def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
/* Construct a DW_CFA_def_cfa_expression instruction to
calculate the CFA using a full location expression since no
register-offset pair is available. */
- struct dw_loc_descr_struct *loc_list;
+ struct dw_loc_descr_node *loc_list;
cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
loc_list = build_cfa_loc (new_cfa, 0);
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index e9588edd447..d968afb0bc0 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -2354,10 +2354,10 @@ output_loc_sequence_raw (dw_loc_descr_ref loc)
dw_cfa_location, adding the given OFFSET to the result of the
expression. */
-struct dw_loc_descr_struct *
+struct dw_loc_descr_node *
build_cfa_loc (dw_cfa_location *cfa, HOST_WIDE_INT offset)
{
- struct dw_loc_descr_struct *head, *tmp;
+ struct dw_loc_descr_node *head, *tmp;
offset += cfa->offset;
@@ -2384,11 +2384,11 @@ build_cfa_loc (dw_cfa_location *cfa, HOST_WIDE_INT offset)
the address at OFFSET from the CFA when stack is aligned to
ALIGNMENT byte. */
-struct dw_loc_descr_struct *
+struct dw_loc_descr_node *
build_cfa_aligned_loc (dw_cfa_location *cfa,
HOST_WIDE_INT offset, HOST_WIDE_INT alignment)
{
- struct dw_loc_descr_struct *head;
+ struct dw_loc_descr_node *head;
unsigned int dwarf_fp
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
@@ -3231,6 +3231,7 @@ static inline int is_redundant_typedef (const_tree);
static bool is_naming_typedef_decl (const_tree);
static inline dw_die_ref get_context_die (tree);
static void gen_namespace_die (tree, dw_die_ref);
+static dw_die_ref gen_namelist_decl (tree, dw_die_ref, tree);
static dw_die_ref gen_decl_die (tree, tree, dw_die_ref);
static dw_die_ref force_decl_die (tree);
static dw_die_ref force_type_die (tree);
@@ -15321,7 +15322,7 @@ reference_to_unused (tree * tp, int * walk_subtrees,
return *tp;
else if (TREE_CODE (*tp) == VAR_DECL)
{
- struct varpool_node *node = varpool_get_node (*tp);
+ varpool_node *node = varpool_get_node (*tp);
if (!node || !node->definition)
return *tp;
}
@@ -17998,7 +17999,7 @@ premark_types_used_by_global_vars_helper (void **slot,
{
/* Ask cgraph if the global variable really is to be emitted.
If yes, then we'll keep the DIE of ENTRY->TYPE. */
- struct varpool_node *node = varpool_get_node (entry->var_decl);
+ varpool_node *node = varpool_get_node (entry->var_decl);
if (node && node->definition)
{
die->die_perennial_p = 1;
@@ -20600,6 +20601,11 @@ gen_decl_die (tree decl, tree origin, dw_die_ref context_die)
gen_namespace_die (decl, context_die);
break;
+ case NAMELIST_DECL:
+ gen_namelist_decl (DECL_NAME (decl), context_die,
+ NAMELIST_DECL_ASSOCIATED_DECL (decl));
+ break;
+
default:
/* Probably some frontend-internal decl. Assume we don't care. */
gcc_assert ((int)TREE_CODE (decl) > NUM_TREE_CODES);
@@ -20689,7 +20695,12 @@ dwarf2out_imported_module_or_decl_1 (tree decl,
gen_type_die_for_member (type, decl,
get_context_die (TYPE_CONTEXT (type)));
}
- at_import_die = force_decl_die (decl);
+ if (TREE_CODE (decl) == NAMELIST_DECL)
+ at_import_die = gen_namelist_decl (DECL_NAME (decl),
+ get_context_die (DECL_CONTEXT (decl)),
+ NULL_TREE);
+ else
+ at_import_die = force_decl_die (decl);
}
}
@@ -20761,6 +20772,43 @@ dwarf2out_imported_module_or_decl (tree decl, tree name, tree context,
}
+/* Output debug information for namelists. */
+
+static dw_die_ref
+gen_namelist_decl (tree name, dw_die_ref scope_die, tree item_decls)
+{
+ dw_die_ref nml_die, nml_item_die, nml_item_ref_die;
+ tree value;
+ unsigned i;
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return NULL;
+
+ gcc_assert (scope_die != NULL);
+ nml_die = new_die (DW_TAG_namelist, scope_die, NULL);
+ add_AT_string (nml_die, DW_AT_name, IDENTIFIER_POINTER (name));
+
+ /* If there are no item_decls, we have a nondefining namelist, e.g.
+ with USE association; hence, set DW_AT_declaration. */
+ if (item_decls == NULL_TREE)
+ {
+ add_AT_flag (nml_die, DW_AT_declaration, 1);
+ return nml_die;
+ }
+
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (item_decls), i, value)
+ {
+ nml_item_ref_die = lookup_decl_die (value);
+ if (!nml_item_ref_die)
+ nml_item_ref_die = force_decl_die (value);
+
+ nml_item_die = new_die (DW_TAG_namelist_item, nml_die, NULL);
+ add_AT_die_ref (nml_item_die, DW_AT_namelist_items, nml_item_ref_die);
+ }
+ return nml_die;
+}
+
+
/* Write the debugging output for DECL. */
void
@@ -20881,6 +20929,9 @@ dwarf2out_decl (tree decl)
break;
+ case NAMELIST_DECL:
+ break;
+
default:
return;
}
diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h
index 78d8cc0a80c..53070760577 100644
--- a/gcc/dwarf2out.h
+++ b/gcc/dwarf2out.h
@@ -26,9 +26,9 @@ along with GCC; see the file COPYING3. If not see
typedef struct die_struct *dw_die_ref;
typedef const struct die_struct *const_dw_die_ref;
-typedef struct dw_val_struct *dw_val_ref;
-typedef struct dw_cfi_struct *dw_cfi_ref;
-typedef struct dw_loc_descr_struct *dw_loc_descr_ref;
+typedef struct dw_val_node *dw_val_ref;
+typedef struct dw_cfi_node *dw_cfi_ref;
+typedef struct dw_loc_descr_node *dw_loc_descr_ref;
typedef struct dw_loc_list_struct *dw_loc_list_ref;
typedef wide_int *wide_int_ptr;
@@ -46,27 +46,25 @@ enum dw_cfi_oprnd_type {
dw_cfi_oprnd_loc
};
-typedef union GTY(()) dw_cfi_oprnd_struct {
+typedef union GTY(()) {
unsigned int GTY ((tag ("dw_cfi_oprnd_reg_num"))) dw_cfi_reg_num;
HOST_WIDE_INT GTY ((tag ("dw_cfi_oprnd_offset"))) dw_cfi_offset;
const char * GTY ((tag ("dw_cfi_oprnd_addr"))) dw_cfi_addr;
- struct dw_loc_descr_struct * GTY ((tag ("dw_cfi_oprnd_loc"))) dw_cfi_loc;
-}
-dw_cfi_oprnd;
+ struct dw_loc_descr_node * GTY ((tag ("dw_cfi_oprnd_loc"))) dw_cfi_loc;
+} dw_cfi_oprnd;
-typedef struct GTY(()) dw_cfi_struct {
+struct GTY(()) dw_cfi_node {
enum dwarf_call_frame_info dw_cfi_opc;
dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd1_desc (%1.dw_cfi_opc)")))
dw_cfi_oprnd1;
dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd2_desc (%1.dw_cfi_opc)")))
dw_cfi_oprnd2;
-}
-dw_cfi_node;
+};
typedef vec<dw_cfi_ref, va_gc> *cfi_vec;
-typedef struct dw_fde_struct *dw_fde_ref;
+typedef struct dw_fde_node *dw_fde_ref;
/* All call frame descriptions (FDE's) in the GCC generated DWARF
refer to a single Common Information Entry (CIE), defined at
@@ -74,7 +72,7 @@ typedef struct dw_fde_struct *dw_fde_ref;
CIE obviates the need to keep track of multiple CIE's
in the DWARF generation routines below. */
-typedef struct GTY(()) dw_fde_struct {
+struct GTY(()) dw_fde_node {
tree decl;
const char *dw_fde_begin;
const char *dw_fde_current_label;
@@ -107,8 +105,7 @@ typedef struct GTY(()) dw_fde_struct {
/* True iff dw_fde_second_begin label is in text_section or
cold_text_section. */
unsigned second_in_std_section : 1;
-}
-dw_fde_node;
+};
/* This is how we define the location of the CFA. We use to handle it
@@ -116,14 +113,14 @@ dw_fde_node;
It can now be either REG + CFA_OFFSET or *(REG + BASE_OFFSET) + CFA_OFFSET.
Instead of passing around REG and OFFSET, we pass a copy
of this structure. */
-typedef struct GTY(()) cfa_loc {
+struct GTY(()) dw_cfa_location {
HOST_WIDE_INT offset;
HOST_WIDE_INT base_offset;
/* REG is in DWARF_FRAME_REGNUM space, *not* normal REGNO space. */
unsigned int reg;
BOOL_BITFIELD indirect : 1; /* 1 if CFA is accessed via a dereference. */
BOOL_BITFIELD in_use : 1; /* 1 if a saved cfa is stored here. */
-} dw_cfa_location;
+};
/* Each DIE may have a series of attribute/value pairs. Values
@@ -159,19 +156,18 @@ enum dw_val_class
/* Describe a floating point constant value, or a vector constant value. */
-typedef struct GTY(()) dw_vec_struct {
+struct GTY(()) dw_vec_const {
unsigned char * GTY((atomic)) array;
unsigned length;
unsigned elt_size;
-}
-dw_vec_const;
+};
struct addr_table_entry_struct;
/* The dw_val_node describes an attribute's value, as it is
represented internally. */
-typedef struct GTY(()) dw_val_struct {
+struct GTY(()) dw_val_node {
enum dw_val_class val_class;
struct addr_table_entry_struct * GTY(()) val_entry;
union dw_val_struct_union
@@ -204,13 +200,12 @@ typedef struct GTY(()) dw_val_struct {
} GTY ((tag ("dw_val_class_vms_delta"))) val_vms_delta;
}
GTY ((desc ("%1.val_class"))) v;
-}
-dw_val_node;
+};
/* Locations in memory are described using a sequence of stack machine
operations. */
-typedef struct GTY(()) dw_loc_descr_struct {
+struct GTY(()) dw_loc_descr_node {
dw_loc_descr_ref dw_loc_next;
ENUM_BITFIELD (dwarf_location_atom) dw_loc_opc : 8;
/* Used to distinguish DW_OP_addr with a direct symbol relocation
@@ -219,16 +214,15 @@ typedef struct GTY(()) dw_loc_descr_struct {
int dw_loc_addr;
dw_val_node dw_loc_oprnd1;
dw_val_node dw_loc_oprnd2;
-}
-dw_loc_descr_node;
+};
/* Interface from dwarf2out.c to dwarf2cfi.c. */
-extern struct dw_loc_descr_struct *build_cfa_loc
+extern struct dw_loc_descr_node *build_cfa_loc
(dw_cfa_location *, HOST_WIDE_INT);
-extern struct dw_loc_descr_struct *build_cfa_aligned_loc
+extern struct dw_loc_descr_node *build_cfa_aligned_loc
(dw_cfa_location *, HOST_WIDE_INT offset, HOST_WIDE_INT alignment);
-extern struct dw_loc_descr_struct *mem_loc_descriptor
+extern struct dw_loc_descr_node *mem_loc_descriptor
(rtx, enum machine_mode mode, enum machine_mode mem_mode,
enum var_init_status);
extern bool loc_descr_equal_p (dw_loc_descr_ref, dw_loc_descr_ref);
@@ -253,7 +247,7 @@ extern enum dw_cfi_oprnd_type dw_cfi_oprnd1_desc
extern enum dw_cfi_oprnd_type dw_cfi_oprnd2_desc
(enum dwarf_call_frame_info cfi);
-extern void output_cfi_directive (FILE *f, struct dw_cfi_struct *cfi);
+extern void output_cfi_directive (FILE *f, struct dw_cfi_node *cfi);
extern void dwarf2out_decl (tree);
extern void dwarf2out_emit_cfi (dw_cfi_ref cfi);
diff --git a/gcc/except.c b/gcc/except.c
index e4b8cad06af..cf4fd149dfc 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -1511,7 +1511,7 @@ finish_eh_generation (void)
commit_edge_insertions ();
/* Redirect all EH edges from the post_landing_pad to the landing pad. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
eh_landing_pad lp;
edge_iterator ei;
diff --git a/gcc/expmed.c b/gcc/expmed.c
index deb78962938..044ac2bcd07 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -48,6 +48,9 @@ static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
rtx);
+static void store_fixed_bit_field_1 (rtx, unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ rtx);
static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
@@ -56,6 +59,9 @@ static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
static rtx extract_fixed_bit_field (enum machine_mode, rtx,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, rtx, int);
+static rtx extract_fixed_bit_field_1 (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT, rtx, int);
static rtx lshift_value (enum machine_mode, unsigned HOST_WIDE_INT, int);
static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, int);
@@ -428,6 +434,53 @@ lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
return bitnum % BITS_PER_WORD == 0;
}
+/* Return true if -fstrict-volatile-bitfields applies an access of OP0
+ containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
+ Return false if the access would touch memory outside the range
+ BITREGION_START to BITREGION_END for conformance to the C++ memory
+ model. */
+
+static bool
+strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitnum,
+ enum machine_mode fieldmode,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end)
+{
+ unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
+
+ /* -fstrict-volatile-bitfields must be enabled and we must have a
+ volatile MEM. */
+ if (!MEM_P (op0)
+ || !MEM_VOLATILE_P (op0)
+ || flag_strict_volatile_bitfields <= 0)
+ return false;
+
+ /* Non-integral modes likely only happen with packed structures.
+ Punt. */
+ if (!SCALAR_INT_MODE_P (fieldmode))
+ return false;
+
+ /* The bit size must not be larger than the field mode, and
+ the field mode must not be larger than a word. */
+ if (bitsize > modesize || modesize > BITS_PER_WORD)
+ return false;
+
+ /* Check for cases of unaligned fields that must be split. */
+ if (bitnum % BITS_PER_UNIT + bitsize > modesize
+ || (STRICT_ALIGNMENT
+ && bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
+ return false;
+
+ /* Check for cases where the C++ memory model applies. */
+ if (bitregion_end != 0
+ && (bitnum - bitnum % modesize < bitregion_start
+ || bitnum - bitnum % modesize + modesize > bitregion_end))
+ return false;
+
+ return true;
+}
+
/* Return true if OP is a memory and if a bitfield of size BITSIZE at
bit number BITNUM can be treated as a simple value of mode MODE. */
@@ -841,12 +894,8 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
cheap register alternative is available. */
if (MEM_P (op0))
{
- /* Do not use unaligned memory insvs for volatile bitfields when
- -fstrict-volatile-bitfields is in effect. */
- if (!(MEM_VOLATILE_P (op0)
- && flag_strict_volatile_bitfields > 0)
- && get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
- fieldmode)
+ if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
+ fieldmode)
&& store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
return true;
@@ -899,6 +948,34 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
enum machine_mode fieldmode,
rtx value)
{
+ /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
+ if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode,
+ bitregion_start, bitregion_end))
+ {
+
+ /* Storing any naturally aligned field can be done with a simple
+ store. For targets that support fast unaligned memory, any
+ naturally sized, unit aligned field can be done directly. */
+ if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
+ {
+ str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
+ bitnum / BITS_PER_UNIT);
+ emit_move_insn (str_rtx, value);
+ }
+ else
+ {
+ str_rtx = narrow_bit_field_mem (str_rtx, fieldmode, bitsize, bitnum,
+ &bitnum);
+ /* Explicitly override the C/C++ memory model; ignore the
+ bit range so that we can do the access in the mode mandated
+ by -fstrict-volatile-bitfields instead. */
+ store_fixed_bit_field_1 (str_rtx, bitsize, bitnum,
+ value);
+ }
+
+ return;
+ }
+
/* Under the C++0x memory model, we must not touch bits outside the
bit region. Adjust the address to start at the beginning of the
bit region. */
@@ -938,9 +1015,6 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
rtx value)
{
enum machine_mode mode;
- rtx temp;
- int all_zero = 0;
- int all_one = 0;
/* There is a case not handled here:
a structure with a known alignment of just a halfword
@@ -951,29 +1025,12 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
if (MEM_P (op0))
{
- unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
-
- if (bitregion_end)
- maxbits = bitregion_end - bitregion_start + 1;
-
- /* Get the proper mode to use for this field. We want a mode that
- includes the entire field. If such a mode would be larger than
- a word, we won't be doing the extraction the normal way.
- We don't want a mode bigger than the destination. */
-
mode = GET_MODE (op0);
if (GET_MODE_BITSIZE (mode) == 0
|| GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
mode = word_mode;
-
- if (MEM_VOLATILE_P (op0)
- && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
- && GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
- && flag_strict_volatile_bitfields > 0)
- mode = GET_MODE (op0);
- else
- mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
- MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
+ mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
+ MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
{
@@ -987,6 +1044,23 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
}
+ store_fixed_bit_field_1 (op0, bitsize, bitnum, value);
+ return;
+}
+
+/* Helper function for store_fixed_bit_field, stores
+ the bit field always using the MODE of OP0. */
+
+static void
+store_fixed_bit_field_1 (rtx op0, unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitnum,
+ rtx value)
+{
+ enum machine_mode mode;
+ rtx temp;
+ int all_zero = 0;
+ int all_one = 0;
+
mode = GET_MODE (op0);
gcc_assert (SCALAR_INT_MODE_P (mode));
@@ -1095,6 +1169,12 @@ store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
else
unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
+ /* If OP0 is a memory with a mode, then UNIT must not be larger than
+ OP0's mode as well. Otherwise, store_fixed_bit_field will call us
+ again, and we will mutually recurse forever. */
+ if (MEM_P (op0) && GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
+ unit = MIN (unit, GET_MODE_BITSIZE (GET_MODE (op0)));
+
/* If VALUE is a constant other than a CONST_INT, get it into a register in
WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
that VALUE might be a floating-point constant. */
@@ -1457,19 +1537,8 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
If that's wrong, the solution is to test for it and set TARGET to 0
if needed. */
- /* If the bitfield is volatile, we need to make sure the access
- remains on a type-aligned boundary. */
- if (GET_CODE (op0) == MEM
- && MEM_VOLATILE_P (op0)
- && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
- && flag_strict_volatile_bitfields > 0)
- goto no_subreg_mode_swap;
-
- /* Only scalar integer modes can be converted via subregs. There is an
- additional problem for FP modes here in that they can have a precision
- which is different from the size. mode_for_size uses precision, but
- we want a mode based on the size, so we must avoid calling it for FP
- modes. */
+ /* Get the mode of the field to use for atomic access or subreg
+ conversion. */
mode1 = mode;
if (SCALAR_INT_MODE_P (tmode))
{
@@ -1502,8 +1571,6 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
}
- no_subreg_mode_swap:
-
/* Handle fields bigger than a word. */
if (bitsize > BITS_PER_WORD)
@@ -1623,11 +1690,8 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
cheap register alternative is available. */
if (MEM_P (op0))
{
- /* Do not use extv/extzv for volatile bitfields when
- -fstrict-volatile-bitfields is in effect. */
- if (!(MEM_VOLATILE_P (op0) && flag_strict_volatile_bitfields > 0)
- && get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
- tmode))
+ if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
+ tmode))
{
rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
bitnum, unsignedp,
@@ -1693,6 +1757,36 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
enum machine_mode mode, enum machine_mode tmode)
{
+ enum machine_mode mode1;
+
+ /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
+ if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
+ mode1 = GET_MODE (str_rtx);
+ else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
+ mode1 = GET_MODE (target);
+ else
+ mode1 = tmode;
+
+ if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0))
+ {
+ rtx result;
+
+ /* Extraction of a full MODE1 value can be done with a load as long as
+ the field is on a byte boundary and is sufficiently aligned. */
+ if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, mode1))
+ result = adjust_bitfield_address (str_rtx, mode1,
+ bitnum / BITS_PER_UNIT);
+ else
+ {
+ str_rtx = narrow_bit_field_mem (str_rtx, mode1, bitsize, bitnum,
+ &bitnum);
+ result = extract_fixed_bit_field_1 (mode, str_rtx, bitsize, bitnum,
+ target, unsignedp);
+ }
+
+ return convert_extracted_bit_field (result, mode, tmode, unsignedp);
+ }
+
return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
target, mode, tmode, true);
}
@@ -1715,51 +1809,32 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
if (MEM_P (op0))
{
- /* Get the proper mode to use for this field. We want a mode that
- includes the entire field. If such a mode would be larger than
- a word, we won't be doing the extraction the normal way. */
-
- if (MEM_VOLATILE_P (op0)
- && flag_strict_volatile_bitfields > 0)
- {
- if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
- mode = GET_MODE (op0);
- else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
- mode = GET_MODE (target);
- else
- mode = tmode;
- }
- else
- mode = get_best_mode (bitsize, bitnum, 0, 0,
- MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
+ mode = get_best_mode (bitsize, bitnum, 0, 0,
+ MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
/* The only way this should occur is if the field spans word
boundaries. */
return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
- unsigned int total_bits = GET_MODE_BITSIZE (mode);
- HOST_WIDE_INT bit_offset = bitnum - bitnum % total_bits;
-
- /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
- if it results in a multi-word access where we otherwise wouldn't
- have one. So, check for that case here. */
- if (MEM_P (op0)
- && MEM_VOLATILE_P (op0)
- && flag_strict_volatile_bitfields > 0
- && bitnum % BITS_PER_UNIT + bitsize <= total_bits
- && bitnum % GET_MODE_BITSIZE (mode) + bitsize > total_bits)
- {
- /* If the target doesn't support unaligned access, give up and
- split the access into two. */
- if (STRICT_ALIGNMENT)
- return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
- bit_offset = bitnum - bitnum % BITS_PER_UNIT;
- }
- op0 = adjust_bitfield_address (op0, mode, bit_offset / BITS_PER_UNIT);
- bitnum -= bit_offset;
+ op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
}
+ return extract_fixed_bit_field_1 (tmode, op0, bitsize, bitnum,
+ target, unsignedp);
+}
+
+/* Helper function for extract_fixed_bit_field, extracts
+ the bit field always using the MODE of OP0. */
+
+static rtx
+extract_fixed_bit_field_1 (enum machine_mode tmode, rtx op0,
+ unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitnum, rtx target,
+ int unsignedp)
+{
+ enum machine_mode mode;
+
mode = GET_MODE (op0);
gcc_assert (SCALAR_INT_MODE_P (mode));
diff --git a/gcc/expr.c b/gcc/expr.c
index cded0bd521a..6e23c883138 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -4808,13 +4808,13 @@ expand_assignment (tree to, tree from, bool nontemporal)
to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
- /* If the bitfield is volatile, we want to access it in the
+ /* If the field has a mode, we want to access it in the
field's mode, not the computed mode.
If a MEM has VOIDmode (external with incomplete type),
use BLKmode for it instead. */
if (MEM_P (to_rtx))
{
- if (volatilep && flag_strict_volatile_bitfields > 0)
+ if (mode1 != VOIDmode)
to_rtx = adjust_address (to_rtx, mode1, 0);
else if (GET_MODE (to_rtx) == VOIDmode)
to_rtx = adjust_address (to_rtx, BLKmode, 0);
@@ -4839,8 +4839,8 @@ expand_assignment (tree to, tree from, bool nontemporal)
if (GET_MODE (offset_rtx) != address_mode)
offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
- /* A constant address in TO_RTX can have VOIDmode, we must not try
- to call force_reg for that case. Avoid that case. */
+ /* The check for a constant address in TO_RTX not having VOIDmode
+ is probably no longer necessary. */
if (MEM_P (to_rtx)
&& GET_MODE (to_rtx) == BLKmode
&& GET_MODE (XEXP (to_rtx, 0)) != VOIDmode
@@ -4850,6 +4850,9 @@ expand_assignment (tree to, tree from, bool nontemporal)
&& MEM_ALIGN (to_rtx) == GET_MODE_ALIGNMENT (mode1))
{
to_rtx = adjust_address (to_rtx, mode1, bitpos / BITS_PER_UNIT);
+ bitregion_start = 0;
+ if (bitregion_end >= (unsigned HOST_WIDE_INT) bitpos)
+ bitregion_end -= bitpos;
bitpos = 0;
}
@@ -9457,13 +9460,11 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
the same mode we got when the variable was declared. */
if (code == SSA_NAME
&& (g = SSA_NAME_DEF_STMT (ssa_name))
- && gimple_code (g) == GIMPLE_CALL)
- {
- gcc_assert (!gimple_call_internal_p (g));
- pmode = promote_function_mode (type, mode, &unsignedp,
- gimple_call_fntype (g),
- 2);
- }
+ && gimple_code (g) == GIMPLE_CALL
+ && !gimple_call_internal_p (g))
+ pmode = promote_function_mode (type, mode, &unsignedp,
+ gimple_call_fntype (g),
+ 2);
else
pmode = promote_decl_mode (exp, &unsignedp);
gcc_assert (GET_MODE (decl_rtl) == pmode);
@@ -9954,13 +9955,13 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
VOIDmode,
modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
- /* If the bitfield is volatile, we want to access it in the
+ /* If the field has a mode, we want to access it in the
field's mode, not the computed mode.
If a MEM has VOIDmode (external with incomplete type),
use BLKmode for it instead. */
if (MEM_P (op0))
{
- if (volatilep && flag_strict_volatile_bitfields > 0)
+ if (mode1 != VOIDmode)
op0 = adjust_address (op0, mode1, 0);
else if (GET_MODE (op0) == VOIDmode)
op0 = adjust_address (op0, BLKmode, 0);
@@ -10047,8 +10048,8 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
if (GET_MODE (op0) == BLKmode
- /* A constant address in OP0 can have VOIDmode, we must
- not try to call force_reg in that case. */
+ /* The check for a constant address in OP0 not having VOIDmode
+ is probably no longer necessary. */
&& GET_MODE (XEXP (op0, 0)) != VOIDmode
&& bitsize != 0
&& (bitpos % bitsize) == 0
@@ -10092,17 +10093,13 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
&& modifier != EXPAND_MEMORY)
- /* If the field is volatile, we always want an aligned
- access. Do this in following two situations:
- 1. the access is not already naturally
- aligned, otherwise "normal" (non-bitfield) volatile fields
- become non-addressable.
- 2. the bitsize is narrower than the access size. Need
- to extract bitfields from the access. */
- || (volatilep && flag_strict_volatile_bitfields > 0
- && (bitpos % GET_MODE_ALIGNMENT (mode) != 0
- || (mode1 != BLKmode
- && bitsize < GET_MODE_SIZE (mode1) * BITS_PER_UNIT)))
+ /* If the bitfield is volatile and the bitsize
+ is narrower than the access size of the bitfield,
+ we need to extract bitfields from the access. */
+ || (volatilep && TREE_CODE (exp) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
+ && mode1 != BLKmode
+ && bitsize < GET_MODE_SIZE (mode1) * BITS_PER_UNIT)
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
@@ -10139,6 +10136,8 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
if (target == 0)
target = assign_temp (type, 1, 1);
+ /* ??? Unlike the similar test a few lines below, this one is
+ very likely obsolete. */
if (bitsize == 0)
return target;
@@ -10159,6 +10158,13 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
return target;
}
+ /* If we have nothing to extract, the result will be 0 for targets
+ with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
+ return 0 for the sake of consistency, as reading a zero-sized
+ bitfield is valid in Ada and the value is fully specified. */
+ if (bitsize == 0)
+ return const0_rtx;
+
op0 = validize_mem (op0);
if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
diff --git a/gcc/final.c b/gcc/final.c
index 1128b5b68ea..9d201b8bf58 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -701,14 +701,14 @@ compute_alignments (void)
flow_loops_dump (dump_file, NULL, 1);
}
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->frequency > freq_max)
freq_max = bb->frequency;
freq_threshold = freq_max / PARAM_VALUE (PARAM_ALIGN_THRESHOLD);
if (dump_file)
fprintf (dump_file, "freq_max: %i\n",freq_max);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx label = BB_HEAD (bb);
int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0;
@@ -1997,7 +1997,7 @@ final (rtx first, FILE *file, int optimize_p)
/* There is no cfg for a thunk. */
if (!cfun->is_thunk)
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
start_to_bb[INSN_UID (BB_HEAD (bb))] = bb;
end_to_bb[INSN_UID (BB_END (bb))] = bb;
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index 5ba909766fe..bea268f9aba 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -215,8 +215,10 @@ enum sanitize_code {
SANITIZE_VLA = 1 << 6,
SANITIZE_NULL = 1 << 7,
SANITIZE_RETURN = 1 << 8,
+ SANITIZE_SI_OVERFLOW = 1 << 9,
SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE
| SANITIZE_VLA | SANITIZE_NULL | SANITIZE_RETURN
+ | SANITIZE_SI_OVERFLOW
};
/* flag_vtable_verify initialization levels. */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index a0d8e917d68..507d40f11e8 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -10261,14 +10261,16 @@ fold_binary_loc (location_t loc,
case PLUS_EXPR:
/* A + (-B) -> A - B */
- if (TREE_CODE (arg1) == NEGATE_EXPR)
+ if (TREE_CODE (arg1) == NEGATE_EXPR
+ && (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0)
return fold_build2_loc (loc, MINUS_EXPR, type,
fold_convert_loc (loc, type, arg0),
fold_convert_loc (loc, type,
TREE_OPERAND (arg1, 0)));
/* (-A) + B -> B - A */
if (TREE_CODE (arg0) == NEGATE_EXPR
- && reorder_operands_p (TREE_OPERAND (arg0, 0), arg1))
+ && reorder_operands_p (TREE_OPERAND (arg0, 0), arg1)
+ && (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0)
return fold_build2_loc (loc, MINUS_EXPR, type,
fold_convert_loc (loc, type, arg1),
fold_convert_loc (loc, type,
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index a92561cf565..9d8f1ba4127 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,64 @@
+2013-12-12 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/59440
+ * trans-decl.c (generate_namelist_decl): Ensure debug DIE
+ is created by setting DECL_IGNORED_P to 0.
+
+2013-12-11 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/58916
+ * resolve.c (conformable_arrays): Treat scalar 'e2'.
+ (resolve_allocate_expr): Check rank also for unlimited-polymorphic
+ variables.
+
+2013-12-10 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/35831
+ * interface.c (check_dummy_characteristics): Add checks for several
+ attributes.
+
+2013-12-10 Janus Weil <janus@gcc.gnu.org>
+
+ * gfortran.texi: Add possible kind values (and default) for
+ DOUBLE PRECISION.
+ * invoke.texi: Correct documentation of -fdefault-integer-8,
+ -fdefault-real-8 and -fdefault-double-8.
+
+2013-12-10 Janus Weil <janus@gcc.gnu.org>
+
+ * gfortran.texi: Modify documentation of kind type parameters.
+ * invoke.texi: Extend documentation of -fdefault-integer-8 and
+ -fdefault-real-8.
+
+2013-12-10 Janus Weil <janus@gcc.gnu.org>
+
+ * invoke.texi: Add -freal-4-real-16. Rearrange kind promotion options.
+
+2013-12-08 Tobias Burnus <burnus@net-b.de>
+ Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/58099
+ PR fortran/58676
+ PR fortran/41724
+ * resolve.c (gfc_resolve_intrinsic): Set elemental/pure.
+ (resolve_fl_procedure): Reject pure dummy procedures/procedure
+ pointers.
+ (gfc_explicit_interface_required): Don't require a
+ match of ELEMENTAL for intrinsics.
+
+2013-12-07 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/59414
+ * resolve.c (resolve_specific_f0): Handle CLASS-valued functions.
+
+2013-12-04 Tobias Burnus <burnus@net-b.de>
+
+ PR debug/37132
+ * trans-decl.c (generate_namelist_decl, create_module_nml_decl):
+ New static functions.
+ (gfc_generate_module_vars, generate_local_vars): Call them.
+ (gfc_trans_use_stmts): Handle namelists for debug genertion.
+
2013-12-01 Paul Thomas <pault@gcc.gnu.org>
PR fortran/57354
diff --git a/gcc/fortran/gfortran.texi b/gcc/fortran/gfortran.texi
index 292569b5376..f2f2c80c29d 100644
--- a/gcc/fortran/gfortran.texi
+++ b/gcc/fortran/gfortran.texi
@@ -1134,16 +1134,19 @@ data types are:
@table @code
@item INTEGER
-1, 2, 4, 8*, 16*, default: 4 (1)
+1, 2, 4, 8*, 16*, default: 4**
@item LOGICAL
-1, 2, 4, 8*, 16*, default: 4 (1)
+1, 2, 4, 8*, 16*, default: 4**
@item REAL
-4, 8, 10*, 16*, default: 4 (2)
+4, 8, 10*, 16*, default: 4***
@item COMPLEX
-4, 8, 10*, 16*, default: 4 (2)
+4, 8, 10*, 16*, default: 4***
+
+@item DOUBLE PRECISION
+4, 8, 10*, 16*, default: 8***
@item CHARACTER
1, 4, default: 1
@@ -1151,23 +1154,22 @@ data types are:
@end table
@noindent
-* = not available on all systems @*
-(1) Unless -fdefault-integer-8 is used @*
-(2) Unless -fdefault-real-8 is used
+* not available on all systems @*
+** unless @option{-fdefault-integer-8} is used @*
+*** unless @option{-fdefault-real-8} is used (see @ref{Fortran Dialect Options})
@noindent
The @code{KIND} value matches the storage size in bytes, except for
@code{COMPLEX} where the storage size is twice as much (or both real and
imaginary part are a real value of the given size). It is recommended to use
-the @code{SELECTED_CHAR_KIND}, @code{SELECTED_INT_KIND} and
-@code{SELECTED_REAL_KIND} intrinsics or the @code{INT8}, @code{INT16},
+the @ref{SELECTED_CHAR_KIND}, @ref{SELECTED_INT_KIND} and
+@ref{SELECTED_REAL_KIND} intrinsics or the @code{INT8}, @code{INT16},
@code{INT32}, @code{INT64}, @code{REAL32}, @code{REAL64}, and @code{REAL128}
parameters of the @code{ISO_FORTRAN_ENV} module instead of the concrete values.
The available kind parameters can be found in the constant arrays
@code{CHARACTER_KINDS}, @code{INTEGER_KINDS}, @code{LOGICAL_KINDS} and
-@code{REAL_KINDS} in the @code{ISO_FORTRAN_ENV} module
-(see @ref{ISO_FORTRAN_ENV}). For C interoperability, the kind parameters of
-the @code{ISO_C_BINDING} module should be used (see @ref{ISO_C_BINDING}).
+@code{REAL_KINDS} in the @ref{ISO_FORTRAN_ENV} module. For C interoperability,
+the kind parameters of the @ref{ISO_C_BINDING} module should be used.
@node Internal representation of LOGICAL variables
diff --git a/gcc/fortran/interface.c b/gcc/fortran/interface.c
index da3db7e096c..1cd1c2b0e3a 100644
--- a/gcc/fortran/interface.c
+++ b/gcc/fortran/interface.c
@@ -1114,8 +1114,37 @@ check_dummy_characteristics (gfc_symbol *s1, gfc_symbol *s2,
return false;
}
- /* FIXME: Do more comprehensive testing of attributes, like e.g.
- ASYNCHRONOUS, CONTIGUOUS, VALUE, VOLATILE, etc. */
+ /* Check ASYNCHRONOUS attribute. */
+ if (s1->attr.asynchronous != s2->attr.asynchronous)
+ {
+ snprintf (errmsg, err_len, "ASYNCHRONOUS mismatch in argument '%s'",
+ s1->name);
+ return false;
+ }
+
+ /* Check CONTIGUOUS attribute. */
+ if (s1->attr.contiguous != s2->attr.contiguous)
+ {
+ snprintf (errmsg, err_len, "CONTIGUOUS mismatch in argument '%s'",
+ s1->name);
+ return false;
+ }
+
+ /* Check VALUE attribute. */
+ if (s1->attr.value != s2->attr.value)
+ {
+ snprintf (errmsg, err_len, "VALUE mismatch in argument '%s'",
+ s1->name);
+ return false;
+ }
+
+ /* Check VOLATILE attribute. */
+ if (s1->attr.volatile_ != s2->attr.volatile_)
+ {
+ snprintf (errmsg, err_len, "VOLATILE mismatch in argument '%s'",
+ s1->name);
+ return false;
+ }
/* Check interface of dummy procedures. */
if (s1->attr.flavor == FL_PROCEDURE)
diff --git a/gcc/fortran/invoke.texi b/gcc/fortran/invoke.texi
index eb678d1f043..6a5c8a14471 100644
--- a/gcc/fortran/invoke.texi
+++ b/gcc/fortran/invoke.texi
@@ -227,29 +227,6 @@ given they are treated as if the first column contained a blank. If the
@option{-fd-lines-as-comments} option is given, they are treated as
comment lines.
-@item -fdefault-double-8
-@opindex @code{fdefault-double-8}
-Set the @code{DOUBLE PRECISION} type to an 8 byte wide type. If
-@option{-fdefault-real-8} is given, @code{DOUBLE PRECISION} would
-instead be promoted to 16 bytes if possible, and @option{-fdefault-double-8}
-can be used to prevent this. The kind of real constants like @code{1.d0} will
-not be changed by @option{-fdefault-real-8} though, so also
-@option{-fdefault-double-8} does not affect it.
-
-@item -fdefault-integer-8
-@opindex @code{fdefault-integer-8}
-Set the default integer and logical types to an 8 byte wide type.
-Do nothing if this is already the default. This option also affects
-the kind of integer constants like @code{42}.
-
-@item -fdefault-real-8
-@opindex @code{fdefault-real-8}
-Set the default real type to an 8 byte wide type.
-Do nothing if this is already the default. This option also affects
-the kind of non-double real constants like @code{1.0}, and does promote
-the default width of @code{DOUBLE PRECISION} to 16 bytes if possible, unless
-@code{-fdefault-double-8} is given, too.
-
@item -fdollar-ok
@opindex @code{fdollar-ok}
@cindex @code{$}
@@ -320,17 +297,6 @@ Specify that no implicit typing is allowed, unless overridden by explicit
@code{IMPLICIT} statements. This is the equivalent of adding
@code{implicit none} to the start of every procedure.
-@item -finteger-4-integer-8
-@opindex @code{finteger-4-integer-8}
-Promote all @code{INTEGER(KIND=4)} entities to an @code{INTEGER(KIND=8)}
-entities. If @code{KIND=8} is unavailable, then an error will be issued.
-This option should be used with care and may not be suitable for your codes.
-Areas of possible concern include calls to external procedures,
-alignment in @code{EQUIVALENCE} and/or @code{COMMON}, generic interfaces,
-BOZ literal constant conversion, and I/O. Inspection of the intermediate
-representation of the translated Fortran code, produced by
-@option{-fdump-tree-original}, is suggested.
-
@item -fcray-pointer
@opindex @code{fcray-pointer}
Enable the Cray pointer extension, which provides C-like pointer
@@ -361,8 +327,44 @@ Similarly, @code{DATA i/Z'FFFFFFFF'/} will result in an integer overflow
on most systems, but with @option{-fno-range-check} the value will
``wrap around'' and @code{i} will be initialized to @math{-1} instead.
+@item -fdefault-integer-8
+@opindex @code{fdefault-integer-8}
+Set the default integer and logical types to an 8 byte wide type. This option
+also affects the kind of integer constants like @code{42}. Unlike
+@option{-finteger-4-integer-8}, it does not promote variables with explicit
+kind declaration.
+
+@item -fdefault-real-8
+@opindex @code{fdefault-real-8}
+Set the default real type to an 8 byte wide type. This option also affects
+the kind of non-double real constants like @code{1.0}, and does promote
+the default width of @code{DOUBLE PRECISION} to 16 bytes if possible, unless
+@code{-fdefault-double-8} is given, too. Unlike @option{-freal-4-real-8},
+it does not promote variables with explicit kind declaration.
+
+@item -fdefault-double-8
+@opindex @code{fdefault-double-8}
+Set the @code{DOUBLE PRECISION} type to an 8 byte wide type. Do nothing if this
+is already the default. If @option{-fdefault-real-8} is given,
+@code{DOUBLE PRECISION} would instead be promoted to 16 bytes if possible, and
+@option{-fdefault-double-8} can be used to prevent this. The kind of real
+constants like @code{1.d0} will not be changed by @option{-fdefault-real-8}
+though, so also @option{-fdefault-double-8} does not affect it.
+
+@item -finteger-4-integer-8
+@opindex @code{finteger-4-integer-8}
+Promote all @code{INTEGER(KIND=4)} entities to an @code{INTEGER(KIND=8)}
+entities. If @code{KIND=8} is unavailable, then an error will be issued.
+This option should be used with care and may not be suitable for your codes.
+Areas of possible concern include calls to external procedures,
+alignment in @code{EQUIVALENCE} and/or @code{COMMON}, generic interfaces,
+BOZ literal constant conversion, and I/O. Inspection of the intermediate
+representation of the translated Fortran code, produced by
+@option{-fdump-tree-original}, is suggested.
+
@item -freal-4-real-8
@itemx -freal-4-real-10
+@itemx -freal-4-real-16
@itemx -freal-8-real-4
@itemx -freal-8-real-10
@itemx -freal-8-real-16
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index d16347d034e..db2f5eb705a 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -1679,6 +1679,9 @@ gfc_resolve_intrinsic (gfc_symbol *sym, locus *loc)
gfc_copy_formal_args_intr (sym, isym);
+ sym->attr.pure = isym->pure;
+ sym->attr.elemental = isym->elemental;
+
/* Check it is actually available in the standard settings. */
if (!gfc_check_intrinsic_standard (isym, &symstd, false, sym->declared_at))
{
@@ -2314,7 +2317,7 @@ gfc_explicit_interface_required (gfc_symbol *sym, char *errmsg, int err_len)
}
}
- if (sym->attr.elemental) /* (4) */
+ if (sym->attr.elemental && !sym->attr.intrinsic) /* (4) */
{
strncpy (errmsg, _("elemental procedure"), err_len);
return true;
@@ -2616,7 +2619,9 @@ found:
expr->ts = sym->ts;
expr->value.function.name = sym->name;
expr->value.function.esym = sym;
- if (sym->as != NULL)
+ if (sym->ts.type == BT_CLASS && CLASS_DATA (sym)->as)
+ expr->rank = CLASS_DATA (sym)->as->rank;
+ else if (sym->as != NULL)
expr->rank = sym->as->rank;
return MATCH_YES;
@@ -6592,7 +6597,8 @@ conformable_arrays (gfc_expr *e1, gfc_expr *e2)
for (tail = e2->ref; tail && tail->next; tail = tail->next);
/* First compare rank. */
- if (tail && e1->rank != tail->u.ar.as->rank)
+ if ((tail && e1->rank != tail->u.ar.as->rank)
+ || (!tail && e1->rank != e2->rank))
{
gfc_error ("Source-expr at %L must be scalar or have the "
"same rank as the allocate-object at %L",
@@ -6789,8 +6795,7 @@ resolve_allocate_expr (gfc_expr *e, gfc_code *code)
}
/* Check F03:C632 and restriction following Note 6.18. */
- if (code->expr3->rank > 0 && !unlimited
- && !conformable_arrays (code->expr3, e))
+ if (code->expr3->rank > 0 && !conformable_arrays (code->expr3, e))
goto failure;
/* Check F03:C633. */
@@ -11092,6 +11097,23 @@ resolve_fl_procedure (gfc_symbol *sym, int mp_flag)
sym->name, &sym->declared_at);
}
+ /* F2008, C1218. */
+ if (sym->attr.elemental)
+ {
+ if (sym->attr.proc_pointer)
+ {
+ gfc_error ("Procedure pointer '%s' at %L shall not be elemental",
+ sym->name, &sym->declared_at);
+ return false;
+ }
+ if (sym->attr.dummy)
+ {
+ gfc_error ("Dummy procedure '%s' at %L shall not be elemental",
+ sym->name, &sym->declared_at);
+ return false;
+ }
+ }
+
if (sym->attr.is_bind_c && sym->attr.is_c_interop != 1)
{
gfc_formal_arglist *curr_arg;
diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c
index 6a29ba110b6..45f64bd69ed 100644
--- a/gcc/fortran/trans-decl.c
+++ b/gcc/fortran/trans-decl.c
@@ -4144,6 +4144,38 @@ gfc_module_add_decl (struct module_htab_entry *entry, tree decl)
static struct module_htab_entry *cur_module;
+
+/* Generate debugging symbols for namelists. This function must come after
+ generate_local_decl to ensure that the variables in the namelist are
+ already declared. */
+
+static tree
+generate_namelist_decl (gfc_symbol * sym)
+{
+ gfc_namelist *nml;
+ tree decl;
+ vec<constructor_elt, va_gc> *nml_decls = NULL;
+
+ gcc_assert (sym->attr.flavor == FL_NAMELIST);
+ for (nml = sym->namelist; nml; nml = nml->next)
+ {
+ if (nml->sym->backend_decl == NULL_TREE)
+ {
+ nml->sym->attr.referenced = 1;
+ nml->sym->backend_decl = gfc_get_symbol_decl (nml->sym);
+ }
+ DECL_IGNORED_P (nml->sym->backend_decl) = 0;
+ CONSTRUCTOR_APPEND_ELT (nml_decls, NULL_TREE, nml->sym->backend_decl);
+ }
+
+ decl = make_node (NAMELIST_DECL);
+ TREE_TYPE (decl) = void_type_node;
+ NAMELIST_DECL_ASSOCIATED_DECL (decl) = build_constructor (NULL_TREE, nml_decls);
+ DECL_NAME (decl) = get_identifier (sym->name);
+ return decl;
+}
+
+
/* Output an initialized decl for a module variable. */
static void
@@ -4333,6 +4365,18 @@ gfc_trans_use_stmts (gfc_namespace * ns)
DECL_IGNORED_P (decl) = 0;
DECL_INITIAL (decl) = NULL_TREE;
}
+ else if (st->n.sym->attr.flavor == FL_NAMELIST
+ && st->n.sym->attr.use_only
+ && st->n.sym->module
+ && strcmp (st->n.sym->module, use_stmt->module_name)
+ == 0)
+ {
+ decl = generate_namelist_decl (st->n.sym);
+ DECL_CONTEXT (decl) = entry->namespace_decl;
+ DECL_EXTERNAL (decl) = 1;
+ DECL_IGNORED_P (decl) = 0;
+ DECL_INITIAL (decl) = NULL_TREE;
+ }
else
{
*slot = error_mark_node;
@@ -4610,6 +4654,21 @@ generate_coarray_init (gfc_namespace * ns __attribute((unused)))
}
+static void
+create_module_nml_decl (gfc_symbol *sym)
+{
+ if (sym->attr.flavor == FL_NAMELIST)
+ {
+ tree decl = generate_namelist_decl (sym);
+ pushdecl (decl);
+ gcc_assert (sym->ns->proc_name->attr.flavor == FL_MODULE);
+ DECL_CONTEXT (decl) = sym->ns->proc_name->backend_decl;
+ rest_of_decl_compilation (decl, 1, 0);
+ gfc_module_add_decl (cur_module, decl);
+ }
+}
+
+
/* Generate all the required code for module variables. */
void
@@ -4628,6 +4687,7 @@ gfc_generate_module_vars (gfc_namespace * ns)
/* Create decls for all the module variables. */
gfc_traverse_ns (ns, gfc_create_module_variable);
+ gfc_traverse_ns (ns, create_module_nml_decl);
if (gfc_option.coarray == GFC_FCOARRAY_LIB && has_coarray_vars)
generate_coarray_init (ns);
@@ -4893,10 +4953,23 @@ generate_local_decl (gfc_symbol * sym)
sym->backend_decl = gfc_typenode_for_spec (&(sym->ts));
}
+
+static void
+generate_local_nml_decl (gfc_symbol * sym)
+{
+ if (sym->attr.flavor == FL_NAMELIST && !sym->attr.use_assoc)
+ {
+ tree decl = generate_namelist_decl (sym);
+ pushdecl (decl);
+ }
+}
+
+
static void
generate_local_vars (gfc_namespace * ns)
{
gfc_traverse_ns (ns, generate_local_decl);
+ gfc_traverse_ns (ns, generate_local_nml_decl);
}
diff --git a/gcc/function.c b/gcc/function.c
index 2c8d781e2c0..e2d0e233e80 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -6043,7 +6043,7 @@ thread_prologue_and_epilogue_insns (void)
max_grow_size = get_uncond_jump_length ();
max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
unsigned size = 0;
@@ -6120,7 +6120,7 @@ thread_prologue_and_epilogue_insns (void)
needing a prologue. */
bitmap_clear (&bb_on_list);
bitmap_and_compl (&bb_antic_flags, &bb_flags, &bb_tail);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (!bitmap_bit_p (&bb_antic_flags, bb->index))
continue;
@@ -6154,7 +6154,7 @@ thread_prologue_and_epilogue_insns (void)
/* Find exactly one edge that leads to a block in ANTIC from
a block that isn't. */
if (!bitmap_bit_p (&bb_antic_flags, entry_edge->dest->index))
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (!bitmap_bit_p (&bb_antic_flags, bb->index))
continue;
@@ -6202,7 +6202,7 @@ thread_prologue_and_epilogue_insns (void)
/* Find tail blocks reachable from both blocks needing a
prologue and blocks not needing a prologue. */
if (!bitmap_empty_p (&bb_tail))
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool some_pro, some_no_pro;
if (!bitmap_bit_p (&bb_tail, bb->index))
@@ -6236,7 +6236,7 @@ thread_prologue_and_epilogue_insns (void)
}
/* Now duplicate the tails. */
if (!bitmap_empty_p (&bb_tail))
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
basic_block copy_bb, tbb;
rtx insert_point;
@@ -6480,7 +6480,7 @@ thread_prologue_and_epilogue_insns (void)
we take advantage of cfg_layout_finalize using
fixup_fallthru_exit_predecessor. */
cfg_layout_initialize (0);
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
if (cur_bb->index >= NUM_FIXED_BLOCKS
&& cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
cur_bb->aux = cur_bb->next_bb;
@@ -6498,7 +6498,7 @@ epilogue_done:
commit_edge_insertions ();
/* Look for basic blocks within the prologue insns. */
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
bitmap_set_bit (blocks, entry_edge->dest->index);
bitmap_set_bit (blocks, orig_entry_edge->dest->index);
@@ -7192,7 +7192,7 @@ rest_of_match_asm_constraints (void)
return 0;
df_set_flags (DF_DEFER_INSN_RESCAN);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
{
diff --git a/gcc/function.h b/gcc/function.h
index c00bfe2aa37..c84285d75fa 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -165,10 +165,10 @@ struct gimple_df;
struct temp_slot;
typedef struct temp_slot *temp_slot_p;
struct call_site_record_d;
-struct dw_fde_struct;
+struct dw_fde_node;
-struct ipa_opt_pass_d;
-typedef struct ipa_opt_pass_d *ipa_opt_pass;
+class ipa_opt_pass_d;
+typedef ipa_opt_pass_d *ipa_opt_pass;
struct GTY(()) varasm_status {
@@ -569,7 +569,7 @@ struct GTY(()) function {
/* Dwarf2 Frame Description Entry, containing the Call Frame Instructions
used for unwinding. Only set when either dwarf2 unwinding or dwarf2
debugging is enabled. */
- struct dw_fde_struct *fde;
+ struct dw_fde_node *fde;
/* Last statement uid. */
int last_stmt_uid;
diff --git a/gcc/gcse.c b/gcc/gcse.c
index 2c1ca21586e..fdf0a572379 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -633,8 +633,9 @@ alloc_gcse_mem (void)
pre-processor limitation with template types in macro arguments. */
typedef vec<rtx> vec_rtx_heap;
typedef vec<modify_pair> vec_modify_pair_heap;
- modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block);
- canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap, last_basic_block);
+ modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
+ canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
+ last_basic_block_for_fn (cfun));
modify_mem_list_set = BITMAP_ALLOC (NULL);
blocks_with_calls = BITMAP_ALLOC (NULL);
}
@@ -685,13 +686,13 @@ compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
/* Initialize any bitmaps that were passed in. */
if (transp)
{
- bitmap_vector_ones (transp, last_basic_block);
+ bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
}
if (comp)
- bitmap_vector_clear (comp, last_basic_block);
+ bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
if (antloc)
- bitmap_vector_clear (antloc, last_basic_block);
+ bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
for (i = 0; i < table->size; i++)
{
@@ -1558,7 +1559,7 @@ compute_hash_table_work (struct hash_table_d *table)
for (i = 0; i < max_reg_num (); ++i)
reg_avail_info[i].last_bb = NULL;
- FOR_EACH_BB (current_bb)
+ FOR_EACH_BB_FN (current_bb, cfun)
{
rtx insn;
unsigned int regno;
@@ -1898,7 +1899,7 @@ prune_expressions (bool pre_p)
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
@@ -1972,7 +1973,7 @@ prune_insertions_deletions (int n_elems)
/* Similarly for deletions, but those occur in blocks rather than on
edges. */
- for (i = 0; i < (unsigned) last_basic_block; i++)
+ for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
{
EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
deletions[j]++;
@@ -1993,7 +1994,7 @@ prune_insertions_deletions (int n_elems)
for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
bitmap_clear_bit (pre_insert_map[i], j);
- for (i = 0; i < (unsigned) last_basic_block; i++)
+ for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
bitmap_clear_bit (pre_delete_map[i], j);
}
@@ -2012,14 +2013,14 @@ compute_pre_data (void)
compute_local_properties (transp, comp, antloc, &expr_hash_table);
prune_expressions (true);
- bitmap_vector_clear (ae_kill, last_basic_block);
+ bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
/* Compute ae_kill for each basic block using:
~(TRANSP | COMP)
*/
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
@@ -2103,7 +2104,7 @@ static int
pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
{
int rval;
- char *visited = XCNEWVEC (char, last_basic_block);
+ char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
@@ -2687,7 +2688,7 @@ one_pre_gcse_pass (void)
if (expr_hash_table.n_elems > 0)
{
struct edge_list *edge_list;
- alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
+ alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
edge_list = compute_pre_data ();
changed |= pre_gcse (edge_list);
free_edge_list (edge_list);
@@ -2816,8 +2817,8 @@ compute_code_hoist_vbeinout (void)
int changed, passes;
basic_block bb;
- bitmap_vector_clear (hoist_vbeout, last_basic_block);
- bitmap_vector_clear (hoist_vbein, last_basic_block);
+ bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
+ bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
passes = 0;
changed = 1;
@@ -2828,7 +2829,7 @@ compute_code_hoist_vbeinout (void)
/* We scan the blocks in the reverse order to speed up
the convergence. */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
@@ -2854,7 +2855,7 @@ compute_code_hoist_vbeinout (void)
{
fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (dump_file, "vbein (%d): ", bb->index);
dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
@@ -3033,7 +3034,7 @@ should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
if (visited == NULL)
{
visited_allocated_locally = 1;
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
}
@@ -3166,9 +3167,9 @@ hoist_code (void)
data to restrict distance an expression can travel. */
to_bb_head = XCNEWVEC (int, get_max_uid ());
- bb_size = XCNEWVEC (int, last_basic_block);
+ bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
int to_head;
@@ -3337,7 +3338,7 @@ hoist_code (void)
data->max_reg_pressure[pressure_class] += nregs;
EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
{
- data = BB_DATA (BASIC_BLOCK (k));
+ data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
data->max_reg_pressure[pressure_class] += nregs;
}
}
@@ -3348,7 +3349,7 @@ hoist_code (void)
hoisted. */
EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
{
- data = BB_DATA (BASIC_BLOCK (k));
+ data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
bitmap_copy (data->live_in, data->backup);
data->max_reg_pressure[pressure_class]
= data->old_pressure;
@@ -3511,7 +3512,7 @@ calculate_bb_reg_pressure (void)
ira_setup_eliminable_regset ();
curr_regs_live = BITMAP_ALLOC (&reg_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
curr_bb = bb;
BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
@@ -3561,7 +3562,7 @@ calculate_bb_reg_pressure (void)
return;
fprintf (dump_file, "\nRegister Pressure: \n");
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (dump_file, " Basic block %d: \n", bb->index);
for (i = 0; (int) i < ira_pressure_classes_num; i++)
@@ -3622,7 +3623,8 @@ one_code_hoisting_pass (void)
if (expr_hash_table.n_elems > 0)
{
- alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
+ alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
+ expr_hash_table.n_elems);
compute_code_hoist_data ();
changed = hoist_code ();
free_code_hoist_mem ();
@@ -3886,7 +3888,7 @@ compute_ld_motion_mems (void)
pre_ldst_mems = NULL;
pre_ldst_table.create (13);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
{
diff --git a/gcc/gdbasan.in b/gcc/gdbasan.in
new file mode 100644
index 00000000000..cf05825395b
--- /dev/null
+++ b/gcc/gdbasan.in
@@ -0,0 +1,3 @@
+# Put a breakpoint on __asan_report_error to help with debugging buffer
+# overflow.
+b __asan_report_error
diff --git a/gcc/genmodes.c b/gcc/genmodes.c
index 26386eace85..a76374adce7 100644
--- a/gcc/genmodes.c
+++ b/gcc/genmodes.c
@@ -711,10 +711,27 @@ make_vector_mode (enum mode_class bclass,
#define ADJUST_IBIT(M, X) _ADD_ADJUST (ibit, M, X, ACCUM, UACCUM)
#define ADJUST_FBIT(M, X) _ADD_ADJUST (fbit, M, X, FRACT, UACCUM)
+static int bits_per_unit;
+static int max_bitsize_mode_any_int;
+
static void
create_modes (void)
{
#include "machmode.def"
+
+ /* So put the default value unless the target needs a non standard
+ value. */
+#ifdef BITS_PER_UNIT
+ bits_per_unit = BITS_PER_UNIT;
+#else
+ bits_per_unit = 8;
+#endif
+
+#ifdef MAX_BITSIZE_MODE_ANY_INT
+ max_bitsize_mode_any_int = MAX_BITSIZE_MODE_ANY_INT;
+#else
+ max_bitsize_mode_any_int = 0;
+#endif
}
/* Processing. */
@@ -860,16 +877,24 @@ emit_max_int (void)
int j;
puts ("");
- for (max = 1, i = modes[MODE_INT]; i; i = i->next)
- if (max < i->bytesize)
- max = i->bytesize;
- mmax = max;
- for (max = 1, i = modes[MODE_PARTIAL_INT]; i; i = i->next)
- if (max < i->bytesize)
- max = i->bytesize;
- if (max > mmax)
- mmax = max;
- printf ("#define MAX_BITSIZE_MODE_ANY_INT %d\n", mmax * MAX_BITS_PER_UNIT);
+
+ printf ("#define BITS_PER_UNIT (%d)\n", bits_per_unit);
+
+ if (max_bitsize_mode_any_int == 0)
+ {
+ for (max = 1, i = modes[MODE_INT]; i; i = i->next)
+ if (max < i->bytesize)
+ max = i->bytesize;
+ mmax = max;
+ for (max = 1, i = modes[MODE_PARTIAL_INT]; i; i = i->next)
+ if (max < i->bytesize)
+ max = i->bytesize;
+ if (max > mmax)
+ mmax = max;
+ printf ("#define MAX_BITSIZE_MODE_ANY_INT %d*BITS_PER_UNIT\n", mmax);
+ }
+ else
+ printf ("#define MAX_BITSIZE_MODE_ANY_INT %d\n", max_bitsize_mode_any_int);
mmax = 0;
for (j = 0; j < MAX_MODE_CLASS; j++)
diff --git a/gcc/genrecog.c b/gcc/genrecog.c
index 14a7e1561ee..cdd036b092b 100644
--- a/gcc/genrecog.c
+++ b/gcc/genrecog.c
@@ -457,9 +457,8 @@ validate_pattern (rtx pattern, rtx insn, rtx set, int set_code)
{
pred = lookup_predicate (pred_name);
if (!pred)
- message_with_line (pattern_lineno,
- "warning: unknown predicate '%s'",
- pred_name);
+ error_with_line (pattern_lineno, "unknown predicate '%s'",
+ pred_name);
}
else
pred = 0;
@@ -477,9 +476,9 @@ validate_pattern (rtx pattern, rtx insn, rtx set, int set_code)
|| GET_CODE (insn) == DEFINE_PEEPHOLE2)
{
if (constraints0)
- message_with_line (pattern_lineno,
- "warning: constraints not supported in %s",
- rtx_name[GET_CODE (insn)]);
+ error_with_line (pattern_lineno,
+ "constraints not supported in %s",
+ rtx_name[GET_CODE (insn)]);
}
/* A MATCH_OPERAND that is a SET should have an output reload. */
@@ -510,10 +509,9 @@ validate_pattern (rtx pattern, rtx insn, rtx set, int set_code)
while not likely to occur at runtime, results in less efficient
code from insn-recog.c. */
if (set && pred && pred->allows_non_lvalue)
- message_with_line (pattern_lineno,
- "warning: destination operand %d "
- "allows non-lvalue",
- XINT (pattern, 0));
+ error_with_line (pattern_lineno,
+ "destination operand %d allows non-lvalue",
+ XINT (pattern, 0));
/* A modeless MATCH_OPERAND can be handy when we can check for
multiple modes in the c_test. In most other cases, it is a
@@ -783,16 +781,16 @@ add_to_sequence (rtx pattern, struct decision_head *last,
|| pred->codes[CONST_WIDE_INT]);
if (was_code == MATCH_PARALLEL
&& pred->singleton != PARALLEL)
- message_with_line (pattern_lineno,
- "predicate '%s' used in match_parallel "
- "does not allow only PARALLEL", pred->name);
+ error_with_line (pattern_lineno,
+ "predicate '%s' used in match_parallel "
+ "does not allow only PARALLEL", pred->name);
else
code = pred->singleton;
}
else
- message_with_line (pattern_lineno,
- "warning: unknown predicate '%s' in '%s' expression",
- pred_name, GET_RTX_NAME (was_code));
+ error_with_line (pattern_lineno,
+ "unknown predicate '%s' in '%s' expression",
+ pred_name, GET_RTX_NAME (was_code));
}
/* Can't enforce a mode if we allow const_int. */
diff --git a/gcc/ggc-internal.h b/gcc/ggc-internal.h
index 021961579f2..3d1f3ddd1d2 100644
--- a/gcc/ggc-internal.h
+++ b/gcc/ggc-internal.h
@@ -106,11 +106,11 @@ extern size_t ggc_get_size (const void *);
/* This structure contains the statistics common to all collectors.
Particular collectors can extend this structure. */
-typedef struct ggc_statistics
+struct ggc_statistics
{
/* At present, we don't really gather any interesting statistics. */
int unused;
-} ggc_statistics;
+};
/* Used by the various collectors to gather and print statistics that
do not depend on the collector in use. */
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index d7cd1a38964..90bc3f947ed 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -77,7 +77,7 @@ along with GCC; see the file COPYING3. If not see
static bool
can_refer_decl_in_current_unit_p (tree decl, tree from_decl)
{
- struct varpool_node *vnode;
+ varpool_node *vnode;
struct cgraph_node *node;
symtab_node *snode;
@@ -2660,8 +2660,37 @@ gimple_fold_stmt_to_constant_1 (gimple stmt, tree (*valueize) (tree))
tree fn;
if (gimple_call_internal_p (stmt))
- /* No folding yet for these functions. */
- return NULL_TREE;
+ {
+ enum tree_code subcode = ERROR_MARK;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_UBSAN_CHECK_ADD:
+ subcode = PLUS_EXPR;
+ break;
+ case IFN_UBSAN_CHECK_SUB:
+ subcode = MINUS_EXPR;
+ break;
+ case IFN_UBSAN_CHECK_MUL:
+ subcode = MULT_EXPR;
+ break;
+ default:
+ return NULL_TREE;
+ }
+ tree op0 = (*valueize) (gimple_call_arg (stmt, 0));
+ tree op1 = (*valueize) (gimple_call_arg (stmt, 1));
+
+ if (TREE_CODE (op0) != INTEGER_CST
+ || TREE_CODE (op1) != INTEGER_CST)
+ return NULL_TREE;
+ tree res = fold_binary_loc (loc, subcode,
+ TREE_TYPE (gimple_call_arg (stmt, 0)),
+ op0, op1);
+ if (res
+ && TREE_CODE (res) == INTEGER_CST
+ && !TREE_OVERFLOW (res))
+ return res;
+ return NULL_TREE;
+ }
fn = (*valueize) (gimple_call_fn (stmt));
if (TREE_CODE (fn) == ADDR_EXPR
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index 9f51e6cf07a..2460c616dac 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -839,7 +839,7 @@ gsi_commit_edge_inserts (void)
gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_EACH_EDGE (e, ei, bb->succs)
gsi_commit_one_edge_insert (e, NULL);
}
diff --git a/gcc/gimple-iterator.h b/gcc/gimple-iterator.h
index 11b12763505..7c801e32b4e 100644
--- a/gcc/gimple-iterator.h
+++ b/gcc/gimple-iterator.h
@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see
/* Iterator object for GIMPLE statement sequences. */
-typedef struct gimple_stmt_iterator_d
+struct gimple_stmt_iterator
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
@@ -33,7 +33,7 @@ typedef struct gimple_stmt_iterator_d
block/sequence is removed. */
gimple_seq *seq;
basic_block bb;
-} gimple_stmt_iterator;
+};
enum gsi_iterator_update
{
diff --git a/gcc/gimple-ssa-isolate-paths.c b/gcc/gimple-ssa-isolate-paths.c
index 440d2ed86d9..aaa75378b3e 100644
--- a/gcc/gimple-ssa-isolate-paths.c
+++ b/gcc/gimple-ssa-isolate-paths.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
static bool cfg_altered;
/* Callback for walk_stmt_load_store_ops.
-
+
Return TRUE if OP will dereference the tree stored in DATA, FALSE
otherwise.
@@ -144,7 +144,6 @@ isolate_path (basic_block bb, basic_block duplicate,
gimple_stmt_iterator si, si2;
edge_iterator ei;
edge e2;
-
/* First duplicate BB if we have not done so already and remove all
the duplicate's outgoing edges as duplicate is going to unconditionally
@@ -171,7 +170,7 @@ isolate_path (basic_block bb, basic_block duplicate,
the statement which triggers undefined behaviour. If found, then
transform the statement into a trap and delete everything after the
statement. If not found, then this particular instance was subsumed by
- an earlier instance of undefined behaviour and there's nothing to do.
+ an earlier instance of undefined behaviour and there's nothing to do.
This is made more complicated by the fact that we have STMT, which is in
BB rather than in DUPLICATE. So we set up two iterators, one for each
@@ -180,7 +179,7 @@ isolate_path (basic_block bb, basic_block duplicate,
When we find STMT the second iterator should point to STMT's equivalent in
duplicate. If DUPLICATE ends before STMT is found in BB, then there's
- nothing to do.
+ nothing to do.
Ignore labels and debug statements. */
si = gsi_start_nondebug_after_labels_bb (bb);
@@ -217,7 +216,7 @@ find_implicit_erroneous_behaviour (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
@@ -247,7 +246,7 @@ find_implicit_erroneous_behaviour (void)
continue;
/* PHI produces a pointer result. See if any of the PHI's
- arguments are NULL.
+ arguments are NULL.
When we remove an edge, we want to reprocess the current
index, hence the ugly way we update I for each iteration. */
@@ -259,7 +258,7 @@ find_implicit_erroneous_behaviour (void)
tree op = gimple_phi_arg_def (phi, i);
next_i = i + 1;
-
+
if (!integer_zerop (op))
continue;
@@ -277,7 +276,10 @@ find_implicit_erroneous_behaviour (void)
if (gimple_bb (use_stmt) != bb)
continue;
- if (infer_nonnull_range (use_stmt, lhs))
+ if (infer_nonnull_range (use_stmt, lhs,
+ flag_isolate_erroneous_paths_dereference,
+ flag_isolate_erroneous_paths_attribute))
+
{
duplicate = isolate_path (bb, duplicate,
e, use_stmt, lhs);
@@ -294,7 +296,7 @@ find_implicit_erroneous_behaviour (void)
}
/* Look for statements which exhibit erroneous behaviour. For example
- a NULL pointer dereference.
+ a NULL pointer dereference.
When found, optimize the block containing the erroneous behaviour. */
static void
@@ -302,7 +304,7 @@ find_explicit_erroneous_behaviour (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
@@ -327,7 +329,9 @@ find_explicit_erroneous_behaviour (void)
/* By passing null_pointer_node, we can use infer_nonnull_range
to detect explicit NULL pointer dereferences and other uses
where a non-NULL value is required. */
- if (infer_nonnull_range (stmt, null_pointer_node))
+ if (infer_nonnull_range (stmt, null_pointer_node,
+ flag_isolate_erroneous_paths_dereference,
+ flag_isolate_erroneous_paths_attribute))
{
insert_trap_and_remove_trailing_statements (&si,
null_pointer_node);
@@ -361,7 +365,7 @@ find_explicit_erroneous_behaviour (void)
unconditional trap and eliminate the outgoing edges from the statement's
basic block. This may expose secondary optimization opportunities.
- In the latter case, we isolate the path(s) with the NULL PHI
+ In the latter case, we isolate the path(s) with the NULL PHI
feeding the dereference. We can then replace the offending statement
and eliminate the outgoing edges in the duplicate. Again, this may
expose secondary optimization opportunities.
@@ -398,7 +402,7 @@ gimple_ssa_isolate_erroneous_paths (void)
free_original_copy_tables ();
- /* We scramble the CFG and loop structures a bit, clean up
+ /* We scramble the CFG and loop structures a bit, clean up
appropriately. We really should incrementally update the
loop structures, in theory it shouldn't be that hard. */
if (cfg_altered)
@@ -416,7 +420,8 @@ gate_isolate_erroneous_paths (void)
{
/* If we do not have a suitable builtin function for the trap statement,
then do not perform the optimization. */
- return (flag_isolate_erroneous_paths != 0);
+ return (flag_isolate_erroneous_paths_dereference != 0
+ || flag_isolate_erroneous_paths_attribute != 0);
}
namespace {
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index 6dbb36c1ec0..9c704c4e203 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -438,7 +438,10 @@ static struct pointer_map_t *alt_base_map;
/* Given BASE, use the tree affine combiniation facilities to
find the underlying tree expression for BASE, with any
- immediate offset excluded. */
+ immediate offset excluded.
+
+ N.B. we should eliminate this backtracking with better forward
+ analysis in a future release. */
static tree
get_alternative_base (tree base)
@@ -566,7 +569,7 @@ find_basis_for_candidate (slsr_cand_t c)
}
}
- if (!basis && c->kind == CAND_REF)
+ if (flag_expensive_optimizations && !basis && c->kind == CAND_REF)
{
tree alt_base_expr = get_alternative_base (c->base_expr);
if (alt_base_expr)
@@ -651,7 +654,7 @@ alloc_cand_and_find_basis (enum cand_kind kind, gimple gs, tree base,
c->basis = find_basis_for_candidate (c);
record_potential_basis (c, base);
- if (kind == CAND_REF)
+ if (flag_expensive_optimizations && kind == CAND_REF)
{
tree alt_base = get_alternative_base (base);
if (alt_base)
diff --git a/gcc/gimple-streamer-in.c b/gcc/gimple-streamer-in.c
index 57b0d871a5a..bc85ae9a353 100644
--- a/gcc/gimple-streamer-in.c
+++ b/gcc/gimple-streamer-in.c
@@ -67,7 +67,7 @@ input_phi (struct lto_input_block *ib, basic_block bb, struct data_in *data_in,
int src_index = streamer_read_uhwi (ib);
bitpack_d bp = streamer_read_bitpack (ib);
location_t arg_loc = stream_input_location (&bp, data_in);
- basic_block sbb = BASIC_BLOCK_FOR_FUNCTION (fn, src_index);
+ basic_block sbb = BASIC_BLOCK_FOR_FN (fn, src_index);
edge e = NULL;
int j;
@@ -258,7 +258,7 @@ input_bb (struct lto_input_block *ib, enum LTO_tags tag,
gcc_assert (cfun == fn);
index = streamer_read_uhwi (ib);
- bb = BASIC_BLOCK_FOR_FUNCTION (fn, index);
+ bb = BASIC_BLOCK_FOR_FN (fn, index);
bb->count = apply_scale (streamer_read_gcov_count (ib),
count_materialization_scale);
diff --git a/gcc/gimple.c b/gcc/gimple.c
index e9e38785cc2..e2d49c4c420 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -1475,17 +1475,19 @@ gimple_set_bb (gimple stmt, basic_block bb)
uid = LABEL_DECL_UID (t);
if (uid == -1)
{
- unsigned old_len = vec_safe_length (label_to_block_map);
+ unsigned old_len =
+ vec_safe_length (label_to_block_map_for_fn (cfun));
LABEL_DECL_UID (t) = uid = cfun->cfg->last_label_uid++;
if (old_len <= (unsigned) uid)
{
unsigned new_len = 3 * uid / 2 + 1;
- vec_safe_grow_cleared (label_to_block_map, new_len);
+ vec_safe_grow_cleared (label_to_block_map_for_fn (cfun),
+ new_len);
}
}
- (*label_to_block_map)[uid] = bb;
+ (*label_to_block_map_for_fn (cfun))[uid] = bb;
}
}
@@ -2502,10 +2504,16 @@ check_loadstore (gimple stmt ATTRIBUTE_UNUSED, tree op, void *data)
return false;
}
-/* If OP can be inferred to be non-zero after STMT executes, return true. */
+/* If OP can be inferred to be non-NULL after STMT executes, return true.
+
+ DEREFERENCE is TRUE if we can use a pointer dereference to infer a
+ non-NULL range, FALSE otherwise.
+
+ ATTRIBUTE is TRUE if we can use attributes to infer a non-NULL range
+ for function arguments and return values. FALSE otherwise. */
bool
-infer_nonnull_range (gimple stmt, tree op)
+infer_nonnull_range (gimple stmt, tree op, bool dereference, bool attribute)
{
/* We can only assume that a pointer dereference will yield
non-NULL if -fdelete-null-pointer-checks is enabled. */
@@ -2514,11 +2522,13 @@ infer_nonnull_range (gimple stmt, tree op)
|| gimple_code (stmt) == GIMPLE_ASM)
return false;
- if (walk_stmt_load_store_ops (stmt, (void *)op,
- check_loadstore, check_loadstore))
+ if (dereference
+ && walk_stmt_load_store_ops (stmt, (void *)op,
+ check_loadstore, check_loadstore))
return true;
- if (is_gimple_call (stmt) && !gimple_call_internal_p (stmt))
+ if (attribute
+ && is_gimple_call (stmt) && !gimple_call_internal_p (stmt))
{
tree fntype = gimple_call_fntype (stmt);
tree attrs = TYPE_ATTRIBUTES (fntype);
@@ -2557,7 +2567,8 @@ infer_nonnull_range (gimple stmt, tree op)
/* If this function is marked as returning non-null, then we can
infer OP is non-null if it is used in the return statement. */
- if (gimple_code (stmt) == GIMPLE_RETURN
+ if (attribute
+ && gimple_code (stmt) == GIMPLE_RETURN
&& gimple_return_retval (stmt)
&& operand_equal_p (gimple_return_retval (stmt), op, 0)
&& lookup_attribute ("returns_nonnull",
diff --git a/gcc/gimple.h b/gcc/gimple.h
index a97a5e8b2d4..a49016fcc6c 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -1259,7 +1259,7 @@ extern bool gimple_call_builtin_p (gimple, enum built_in_function);
extern bool gimple_asm_clobbers_memory_p (const_gimple);
extern void dump_decl_set (FILE *, bitmap);
extern bool nonfreeing_call_p (gimple);
-extern bool infer_nonnull_range (gimple, tree);
+extern bool infer_nonnull_range (gimple, tree, bool, bool);
extern void sort_case_labels (vec<tree> );
extern void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *);
extern void gimple_seq_set_location (gimple_seq , location_t);
@@ -5624,7 +5624,13 @@ gimple_expr_type (const_gimple stmt)
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
- type = gimple_call_return_type (stmt);
+ {
+ if (gimple_call_internal_p (stmt)
+ && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ type = TREE_TYPE (gimple_call_arg (stmt, 3));
+ else
+ type = gimple_call_return_type (stmt);
+ }
else
switch (gimple_assign_rhs_code (stmt))
{
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 2e8c657acf7..1ca847ac759 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -2204,12 +2204,6 @@ gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
- if (fn_contains_cilk_spawn_p (cfun)
- && lang_hooks.cilkplus.cilk_detect_spawn_and_unwrap (expr_p)
- && !seen_error ())
- return (enum gimplify_status)
- lang_hooks.cilkplus.gimplify_cilk_spawn (expr_p, pre_p, NULL);
-
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
@@ -4427,12 +4421,6 @@ gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == INIT_EXPR);
-
- if (fn_contains_cilk_spawn_p (cfun)
- && lang_hooks.cilkplus.cilk_detect_spawn_and_unwrap (expr_p)
- && !seen_error ())
- return (enum gimplify_status)
- lang_hooks.cilkplus.gimplify_cilk_spawn (expr_p, pre_p, post_p);
/* Trying to simplify a clobber using normal logic doesn't work,
so handle it here. */
@@ -5829,7 +5817,7 @@ omp_is_private (struct gimplify_omp_ctx *ctx, tree decl, bool simd)
region's REDUCTION clause. */
static bool
-omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
+omp_check_private (struct gimplify_omp_ctx *ctx, tree decl, bool copyprivate)
{
splay_tree_node n;
@@ -5838,8 +5826,11 @@ omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
ctx = ctx->outer_context;
if (ctx == NULL)
return !(is_global_var (decl)
- /* References might be private, but might be shared too. */
- || lang_hooks.decls.omp_privatize_by_reference (decl));
+ /* References might be private, but might be shared too,
+ when checking for copyprivate, assume they might be
+ private, otherwise assume they might be shared. */
+ || (!copyprivate
+ && lang_hooks.decls.omp_privatize_by_reference (decl)));
if ((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0)
continue;
@@ -6049,12 +6040,36 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
remove = true;
break;
}
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_COPYPRIVATE
+ && !remove
+ && !omp_check_private (ctx, decl, true))
+ {
+ remove = true;
+ if (is_global_var (decl))
+ {
+ if (DECL_THREAD_LOCAL_P (decl))
+ remove = false;
+ else if (DECL_HAS_VALUE_EXPR_P (decl))
+ {
+ tree value = get_base_address (DECL_VALUE_EXPR (decl));
+
+ if (value
+ && DECL_P (value)
+ && DECL_THREAD_LOCAL_P (value))
+ remove = false;
+ }
+ }
+ if (remove)
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "copyprivate variable %qE is not threadprivate"
+ " or private in outer context", DECL_NAME (decl));
+ }
do_notice:
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& region_type == ORT_WORKSHARE
- && omp_check_private (ctx, decl))
+ && omp_check_private (ctx, decl, false))
{
error ("%s variable %qE is private in outer context",
check_non_private, DECL_NAME (decl));
@@ -7383,19 +7398,6 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
}
break;
- case CILK_SPAWN_STMT:
- gcc_assert
- (fn_contains_cilk_spawn_p (cfun)
- && lang_hooks.cilkplus.cilk_detect_spawn_and_unwrap (expr_p));
- if (!seen_error ())
- {
- ret = (enum gimplify_status)
- lang_hooks.cilkplus.gimplify_cilk_spawn (expr_p, pre_p,
- post_p);
- break;
- }
- /* If errors are seen, then just process it as a CALL_EXPR. */
-
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index a7dafac8e74..db0212cd37d 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,22 @@
+2013-12-11 Ian Lance Taylor <iant@google.com>
+
+ * go-lang.c (go_langhook_post_options): Disable sibling calls by
+ default.
+
+2013-12-10 Ian Lance Taylor <iant@google.com>
+
+ * Make-lang.in (check_go_parallelize): Test go-test.exp r* tests
+ separately.
+
+2013-12-05 Ian Lance Taylor <iant@google.com>
+
+ Revert this change; no longer required.
+ 2013-11-06 Ian Lance Taylor <iant@google.com>
+
+ * go-lang.c (go_langhook_post_options): If
+ -fisolate-erroneous-paths was turned on by an optimization option,
+ turn it off.
+
2013-11-23 Ian Lance Taylor <iant@google.com>
* go-gcc.cc (Gcc_backend::function_type): Add result_struct
diff --git a/gcc/go/Make-lang.in b/gcc/go/Make-lang.in
index e4bceb21c08..dbb71f601a3 100644
--- a/gcc/go/Make-lang.in
+++ b/gcc/go/Make-lang.in
@@ -132,9 +132,10 @@ go.srcman: doc/gccgo.1
lang_checks += check-go
lang_checks_parallelized += check-go
-check_go_parallelize = go-test.exp=*/test/\[0-57-9a-bd-hj-zA-Z\]* \
+check_go_parallelize = go-test.exp=*/test/\[0-57-9a-bd-hj-qs-zA-Z\]* \
go-test.exp=*/test/c* \
go-test.exp=*/test/i* \
+ go-test.exp=*/test/r* \
go-test.exp=*/test/6*
# Install hooks.
diff --git a/gcc/go/go-lang.c b/gcc/go/go-lang.c
index 580b1b802dc..ae133f7ed2e 100644
--- a/gcc/go/go-lang.c
+++ b/gcc/go/go-lang.c
@@ -270,11 +270,9 @@ go_langhook_post_options (const char **pfilename ATTRIBUTE_UNUSED)
if (flag_excess_precision_cmdline == EXCESS_PRECISION_DEFAULT)
flag_excess_precision_cmdline = EXCESS_PRECISION_STANDARD;
- /* The isolate_erroneous_paths optimization can change a nil
- dereference from a panic to a trap, so we have to disable it for
- Go, even though it is normally enabled by -O2. */
- if (!global_options_set.x_flag_isolate_erroneous_paths)
- global_options.x_flag_isolate_erroneous_paths = 0;
+ /* Tail call optimizations can confuse uses of runtime.Callers. */
+ if (!global_options_set.x_flag_optimize_sibling_calls)
+ global_options.x_flag_optimize_sibling_calls = 0;
/* Returning false means that the backend should be used. */
return false;
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index 4f9368ed255..35bcdbb5145 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -7310,7 +7310,11 @@ Builtin_call_expression::do_lower(Gogo* gogo, Named_object* function,
Type* slice_type = args->front()->type();
if (!slice_type->is_slice_type())
{
- error_at(args->front()->location(), "argument 1 must be a slice");
+ if (slice_type->is_nil_type())
+ error_at(args->front()->location(), "use of untyped nil");
+ else
+ error_at(args->front()->location(),
+ "argument 1 must be a slice");
this->set_is_error();
return this;
}
@@ -8008,7 +8012,10 @@ Builtin_call_expression::do_type()
const Expression_list* args = this->args();
if (args == NULL || args->empty())
return Type::make_error_type();
- return args->front()->type();
+ Type *ret = args->front()->type();
+ if (!ret->is_slice_type())
+ return Type::make_error_type();
+ return ret;
}
case BUILTIN_REAL:
@@ -10252,6 +10259,14 @@ Index_expression::do_lower(Gogo*, Named_object*, Statement_inserter*, int)
{
Expression* deref = Expression::make_unary(OPERATOR_MULT, left,
location);
+
+ // For an ordinary index into the array, the pointer will be
+ // dereferenced. For a slice it will not--the resulting slice
+ // will simply reuse the pointer, which is incorrect if that
+ // pointer is nil.
+ if (end != NULL || cap != NULL)
+ deref->issue_nil_check();
+
return Expression::make_array_index(deref, start, end, cap, location);
}
else if (type->is_string_type())
diff --git a/gcc/go/gofrontend/gogo.cc b/gcc/go/gofrontend/gogo.cc
index eebb75377fa..045763c7bee 100644
--- a/gcc/go/gofrontend/gogo.cc
+++ b/gcc/go/gofrontend/gogo.cc
@@ -440,6 +440,9 @@ Gogo::import_package(const std::string& filename,
return;
}
+ if (local_name == "init")
+ error_at(location, "cannot import package as init");
+
if (filename == "unsafe")
{
this->import_unsafe(local_name, is_local_name_exported, location);
@@ -2822,7 +2825,10 @@ Build_recover_thunks::function(Named_object* orig_no)
if (orig_fntype->is_varargs())
new_fntype->set_is_varargs();
- std::string name = orig_no->name() + "$recover";
+ std::string name = orig_no->name();
+ if (orig_fntype->is_method())
+ name += "$" + orig_fntype->receiver()->type()->mangled_name(gogo);
+ name += "$recover";
Named_object *new_no = gogo->start_function(name, new_fntype, false,
location);
Function *new_func = new_no->func_value();
@@ -2916,7 +2922,25 @@ Build_recover_thunks::function(Named_object* orig_no)
&& !orig_rec_no->var_value()->is_receiver());
orig_rec_no->var_value()->set_is_receiver();
- const std::string& new_receiver_name(orig_fntype->receiver()->name());
+ std::string new_receiver_name(orig_fntype->receiver()->name());
+ if (new_receiver_name.empty())
+ {
+ // Find the receiver. It was named "r.NNN" in
+ // Gogo::start_function.
+ for (Bindings::const_definitions_iterator p =
+ new_bindings->begin_definitions();
+ p != new_bindings->end_definitions();
+ ++p)
+ {
+ const std::string& pname((*p)->name());
+ if (pname[0] == 'r' && pname[1] == '.')
+ {
+ new_receiver_name = pname;
+ break;
+ }
+ }
+ go_assert(!new_receiver_name.empty());
+ }
Named_object* new_rec_no = new_bindings->lookup_local(new_receiver_name);
if (new_rec_no == NULL)
go_assert(saw_errors());
diff --git a/gcc/go/gofrontend/parse.cc b/gcc/go/gofrontend/parse.cc
index 6e56f835699..7614e6fc795 100644
--- a/gcc/go/gofrontend/parse.cc
+++ b/gcc/go/gofrontend/parse.cc
@@ -4287,6 +4287,16 @@ Parse::if_stat()
cond = this->expression(PRECEDENCE_NORMAL, false, false, NULL, NULL);
}
+ // Check for the easy error of a newline before starting the block.
+ if (this->peek_token()->is_op(OPERATOR_SEMICOLON))
+ {
+ Location semi_loc = this->location();
+ if (this->advance_token()->is_op(OPERATOR_LCURLY))
+ error_at(semi_loc, "missing %<{%> after if clause");
+ // Otherwise we will get an error when we call this->block
+ // below.
+ }
+
this->gogo_->start_block(this->location());
Location end_loc = this->block();
Block* then_block = this->gogo_->finish_block(end_loc);
@@ -4431,7 +4441,7 @@ Parse::switch_stat(Label* label)
Location token_loc = this->location();
if (this->peek_token()->is_op(OPERATOR_SEMICOLON)
&& this->advance_token()->is_op(OPERATOR_LCURLY))
- error_at(token_loc, "unexpected semicolon or newline before %<{%>");
+ error_at(token_loc, "missing %<{%> after switch clause");
else if (this->peek_token()->is_op(OPERATOR_COLONEQ))
{
error_at(token_loc, "invalid variable name");
@@ -5158,6 +5168,16 @@ Parse::for_stat(Label* label)
}
}
+ // Check for the easy error of a newline before starting the block.
+ if (this->peek_token()->is_op(OPERATOR_SEMICOLON))
+ {
+ Location semi_loc = this->location();
+ if (this->advance_token()->is_op(OPERATOR_LCURLY))
+ error_at(semi_loc, "missing %<{%> after for clause");
+ // Otherwise we will get an error when we call this->block
+ // below.
+ }
+
// Build the For_statement and note that it is the current target
// for break and continue statements.
@@ -5224,8 +5244,7 @@ Parse::for_clause(Expression** cond, Block** post)
*cond = NULL;
else if (this->peek_token()->is_op(OPERATOR_LCURLY))
{
- error_at(this->location(),
- "unexpected semicolon or newline before %<{%>");
+ error_at(this->location(), "missing %<{%> after for clause");
*cond = NULL;
*post = NULL;
return;
diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc
index 8c5d038a0b1..d079565d18b 100644
--- a/gcc/go/gofrontend/types.cc
+++ b/gcc/go/gofrontend/types.cc
@@ -575,9 +575,6 @@ Type::are_compatible_for_comparison(bool is_equality_op, const Type *t1,
p != fields->end();
++p)
{
- if (Gogo::is_sink_name(p->field_name()))
- continue;
-
if (!p->type()->is_comparable())
{
if (reason != NULL)
@@ -2261,26 +2258,9 @@ Type::method_constructor(Gogo*, Type* method_type,
++p;
go_assert(p->is_field_name("typ"));
- if (!only_value_methods && m->is_value_method())
- {
- // This is a value method on a pointer type. Change the type of
- // the method to use a pointer receiver. The implementation
- // always uses a pointer receiver anyhow.
- Type* rtype = mtype->receiver()->type();
- Type* prtype = Type::make_pointer_type(rtype);
- Typed_identifier* receiver =
- new Typed_identifier(mtype->receiver()->name(), prtype,
- mtype->receiver()->location());
- mtype = Type::make_function_type(receiver,
- (mtype->parameters() == NULL
- ? NULL
- : mtype->parameters()->copy()),
- (mtype->results() == NULL
- ? NULL
- : mtype->results()->copy()),
- mtype->location());
- }
- vals->push_back(Expression::make_type_descriptor(mtype, bloc));
+ bool want_pointer_receiver = !only_value_methods && m->is_value_method();
+ nonmethod_type = mtype->copy_with_receiver_as_param(want_pointer_receiver);
+ vals->push_back(Expression::make_type_descriptor(nonmethod_type, bloc));
++p;
go_assert(p->is_field_name("tfn"));
@@ -4008,6 +3988,32 @@ Function_type::copy_with_receiver(Type* receiver_type) const
return ret;
}
+// Make a copy of a function type with the receiver as the first
+// parameter.
+
+Function_type*
+Function_type::copy_with_receiver_as_param(bool want_pointer_receiver) const
+{
+ go_assert(this->is_method());
+ Typed_identifier_list* new_params = new Typed_identifier_list();
+ Type* rtype = this->receiver_->type();
+ if (want_pointer_receiver)
+ rtype = Type::make_pointer_type(rtype);
+ Typed_identifier receiver(this->receiver_->name(), rtype,
+ this->receiver_->location());
+ new_params->push_back(receiver);
+ const Typed_identifier_list* orig_params = this->parameters_;
+ if (orig_params != NULL && !orig_params->empty())
+ {
+ for (Typed_identifier_list::const_iterator p = orig_params->begin();
+ p != orig_params->end();
+ ++p)
+ new_params->push_back(*p);
+ }
+ return Type::make_function_type(NULL, new_params, this->results_,
+ this->location_);
+}
+
// Make a copy of a function type ignoring any receiver and adding a
// closure parameter.
diff --git a/gcc/go/gofrontend/types.h b/gcc/go/gofrontend/types.h
index 1bd8ce6cf8f..9f965916131 100644
--- a/gcc/go/gofrontend/types.h
+++ b/gcc/go/gofrontend/types.h
@@ -1797,6 +1797,12 @@ class Function_type : public Type
Function_type*
copy_with_receiver(Type*) const;
+ // Return a copy of this type with the receiver treated as the first
+ // parameter. If WANT_POINTER_RECEIVER is true, the receiver is
+ // forced to be a pointer.
+ Function_type*
+ copy_with_receiver_as_param(bool want_pointer_receiver) const;
+
// Return a copy of this type ignoring any receiver and using dummy
// names for all parameters. This is used for thunks for method
// values.
diff --git a/gcc/graph.c b/gcc/graph.c
index b75135af742..545de44a6a9 100644
--- a/gcc/graph.c
+++ b/gcc/graph.c
@@ -157,14 +157,14 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
int i, n;
sbitmap visited;
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, true);
for (i = n_basic_blocks_for_fn (fun) - n;
i < n_basic_blocks_for_fn (fun); i++)
{
- basic_block bb = BASIC_BLOCK (rpo[i]);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
draw_cfg_node (pp, fun->funcdef_no, bb);
bitmap_set_bit (visited, bb->index);
}
@@ -255,7 +255,7 @@ draw_cfg_edges (pretty_printer *pp, struct function *fun)
{
basic_block bb;
mark_dfs_back_edges ();
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
draw_cfg_node_succ_edges (pp, fun->funcdef_no, bb);
/* Add an invisible edge from ENTRY to EXIT, to improve the graph layout. */
diff --git a/gcc/graphite-clast-to-gimple.h b/gcc/graphite-clast-to-gimple.h
index 78e60e2dafa..e3db1e89132 100644
--- a/gcc/graphite-clast-to-gimple.h
+++ b/gcc/graphite-clast-to-gimple.h
@@ -25,18 +25,18 @@ extern CloogState *cloog_state;
/* Data structure for CLooG program representation. */
-typedef struct cloog_prog_clast {
+struct cloog_prog_clast {
CloogProgram *prog;
struct clast_stmt *stmt;
-} cloog_prog_clast;
+};
/* Stores BB's related PBB. */
-typedef struct bb_pbb_def
+struct bb_pbb_def
{
basic_block bb;
poly_bb_p pbb;
-} bb_pbb_def;
+};
extern void debug_clast_stmt (struct clast_stmt *);
extern void print_clast_stmt (FILE *, struct clast_stmt *);
diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c
index a8db98d2706..fea15e55abe 100644
--- a/gcc/graphite-scop-detection.c
+++ b/gcc/graphite-scop-detection.c
@@ -1114,7 +1114,7 @@ print_graphite_scop_statistics (FILE* file, scop_p scop)
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator psi;
loop_p loop = bb->loop_father;
@@ -1450,7 +1450,7 @@ dot_all_scops_1 (FILE *file, vec<scop_p> scops)
fprintf (file, "digraph all {\n");
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
int part_of_scop = false;
@@ -1557,7 +1557,7 @@ dot_all_scops_1 (FILE *file, vec<scop_p> scops)
fprintf (file, " </TABLE>>, shape=box, style=\"setlinewidth(0)\"]\n");
}
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 532af1f0771..fe5cd9951df 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -422,7 +422,7 @@ build_scop_bbs_1 (scop_p scop, sbitmap visited, basic_block bb)
static void
build_scop_bbs (scop_p scop)
{
- sbitmap visited = sbitmap_alloc (last_basic_block);
+ sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
sese region = SCOP_REGION (scop);
bitmap_clear (visited);
@@ -2294,7 +2294,7 @@ rewrite_reductions_out_of_ssa (scop_p scop)
gimple_stmt_iterator psi;
sese region = SCOP_REGION (scop);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_in_sese_p (bb, region))
for (psi = gsi_start_phis (bb); !gsi_end_p (psi);)
{
@@ -2488,7 +2488,7 @@ rewrite_cross_bb_scalar_deps_out_of_ssa (scop_p scop)
/* Create an extra empty BB after the scop. */
split_edge (SESE_EXIT (region));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_in_sese_p (bb, region))
for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi))
changed |= rewrite_cross_bb_scalar_deps (scop, &psi);
diff --git a/gcc/graphite-sese-to-poly.h b/gcc/graphite-sese-to-poly.h
index 055ca825aef..008e86b9bd8 100644
--- a/gcc/graphite-sese-to-poly.h
+++ b/gcc/graphite-sese-to-poly.h
@@ -21,7 +21,6 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_GRAPHITE_SESE_TO_POLY_H
#define GCC_GRAPHITE_SESE_TO_POLY_H
-typedef struct base_alias_pair base_alias_pair;
struct base_alias_pair
{
int base_obj_set;
diff --git a/gcc/graphite.c b/gcc/graphite.c
index e46710ca939..8af040257db 100644
--- a/gcc/graphite.c
+++ b/gcc/graphite.c
@@ -94,7 +94,7 @@ print_global_statistics (FILE* file)
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator psi;
@@ -150,7 +150,7 @@ print_graphite_scop_statistics (FILE* file, scop_p scop)
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator psi;
loop_p loop = bb->loop_father;
@@ -245,7 +245,7 @@ graphite_finalize (bool need_cfg_cleanup_p)
{
scev_reset ();
cleanup_tree_cfg ();
- profile_status = PROFILE_ABSENT;
+ profile_status_for_fn (cfun) = PROFILE_ABSENT;
release_recorded_exits ();
tree_estimate_probability ();
}
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 8d47eb93d53..4f3b05477ce 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -6709,7 +6709,7 @@ haifa_sched_init (void)
sched_init_bbs ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bbs.quick_push (bb);
sched_init_luids (bbs);
sched_deps_init (true);
@@ -8075,7 +8075,7 @@ unlink_bb_notes (basic_block first, basic_block last)
if (first == last)
return;
- bb_header = XNEWVEC (rtx, last_basic_block);
+ bb_header = XNEWVEC (rtx, last_basic_block_for_fn (cfun));
/* Make a sentinel. */
if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
diff --git a/gcc/hard-reg-set.h b/gcc/hard-reg-set.h
index 09a09c550a6..ad987f9b354 100644
--- a/gcc/hard-reg-set.h
+++ b/gcc/hard-reg-set.h
@@ -488,7 +488,7 @@ hard_reg_set_empty_p (const HARD_REG_SET x)
/* Iterator for hard register sets. */
-typedef struct
+struct hard_reg_set_iterator
{
/* Pointer to the current element. */
HARD_REG_ELT_TYPE *pelt;
@@ -503,7 +503,7 @@ typedef struct
it is shifted right, so that the actual bit is always the least
significant bit of ACTUAL. */
HARD_REG_ELT_TYPE bits;
-} hard_reg_set_iterator;
+};
#define HARD_REG_ELT_BITS UHOST_BITS_PER_WIDE_INT
diff --git a/gcc/hw-doloop.c b/gcc/hw-doloop.c
index 77c8149f806..b6184a26d87 100644
--- a/gcc/hw-doloop.c
+++ b/gcc/hw-doloop.c
@@ -357,7 +357,7 @@ discover_loops (bitmap_obstack *loop_stack, struct hw_doloop_hooks *hooks)
/* Find all the possible loop tails. This means searching for every
loop_end instruction. For each one found, create a hwloop_info
structure and add the head block to the work list. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx tail = BB_END (bb);
rtx insn, reg;
@@ -480,7 +480,7 @@ set_bb_indices (void)
intptr_t index;
index = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = (void *) index++;
}
@@ -537,7 +537,7 @@ reorder_loops (hwloop_info loops)
loops = loops->next;
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index ac0276cea04..569b9bf46c4 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -91,16 +91,16 @@ static rtx last_active_insn (basic_block, int);
static rtx find_active_insn_before (basic_block, rtx);
static rtx find_active_insn_after (basic_block, rtx);
static basic_block block_fallthru (basic_block);
-static int cond_exec_process_insns (ce_if_block_t *, rtx, rtx, rtx, int, int);
+static int cond_exec_process_insns (ce_if_block *, rtx, rtx, rtx, int, int);
static rtx cond_exec_get_condition (rtx);
static rtx noce_get_condition (rtx, rtx *, bool);
static int noce_operand_ok (const_rtx);
-static void merge_if_block (ce_if_block_t *);
+static void merge_if_block (ce_if_block *);
static int find_cond_trap (basic_block, edge, edge);
static basic_block find_if_header (basic_block, int);
static int block_jumps_and_fallthru_p (basic_block, basic_block);
static int noce_find_if_block (basic_block, edge, edge, int);
-static int cond_exec_find_if_block (ce_if_block_t *);
+static int cond_exec_find_if_block (ce_if_block *);
static int find_if_case_1 (basic_block, edge, edge);
static int find_if_case_2 (basic_block, edge, edge);
static int dead_or_predicable (basic_block, basic_block, basic_block,
@@ -312,7 +312,7 @@ block_fallthru (basic_block bb)
insns were processed. */
static int
-cond_exec_process_insns (ce_if_block_t *ce_info ATTRIBUTE_UNUSED,
+cond_exec_process_insns (ce_if_block *ce_info ATTRIBUTE_UNUSED,
/* if block information */rtx start,
/* first insn to look at */rtx end,
/* last insn to look at */rtx test,
@@ -434,7 +434,7 @@ cond_exec_get_condition (rtx jump)
converting the block. */
static int
-cond_exec_process_if_block (ce_if_block_t * ce_info,
+cond_exec_process_if_block (ce_if_block * ce_info,
/* if block information */int do_multiple_p)
{
basic_block test_bb = ce_info->test_bb; /* last test block */
@@ -3232,7 +3232,7 @@ merge_if_block (struct ce_if_block * ce_info)
static basic_block
find_if_header (basic_block test_bb, int pass)
{
- ce_if_block_t ce_info;
+ ce_if_block ce_info;
edge then_edge;
edge else_edge;
@@ -4408,7 +4408,7 @@ if_convert (bool after_combine)
fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
#endif
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
basic_block new_bb;
while (!df_get_bb_dirty (bb)
diff --git a/gcc/init-regs.c b/gcc/init-regs.c
index 2a15b3e0a6f..d26ee9bd71e 100644
--- a/gcc/init-regs.c
+++ b/gcc/init-regs.c
@@ -59,7 +59,7 @@ initialize_uninitialized_regs (void)
df_analyze ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
bitmap lr = DF_LR_IN (bb);
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index 9ae917ba0ac..ad9c9475265 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -31,6 +31,9 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-expr.h"
#include "is-a.h"
#include "gimple.h"
+#include "ubsan.h"
+#include "target.h"
+#include "predict.h"
/* The names of each internal function, indexed by function number. */
const char *const internal_fn_name_array[] = {
@@ -153,6 +156,365 @@ expand_UBSAN_NULL (gimple stmt ATTRIBUTE_UNUSED)
gcc_unreachable ();
}
+/* Add sub/add overflow checking to the statement STMT.
+ CODE says whether the operation is +, or -. */
+
+void
+ubsan_expand_si_overflow_addsub_check (tree_code code, gimple stmt)
+{
+ rtx res, op0, op1;
+ tree lhs, fn, arg0, arg1;
+ rtx done_label, do_error, target = NULL_RTX;
+
+ lhs = gimple_call_lhs (stmt);
+ arg0 = gimple_call_arg (stmt, 0);
+ arg1 = gimple_call_arg (stmt, 1);
+ done_label = gen_label_rtx ();
+ do_error = gen_label_rtx ();
+ do_pending_stack_adjust ();
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
+ if (lhs)
+ target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+
+ enum insn_code icode
+ = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
+ if (icode != CODE_FOR_nothing)
+ {
+ struct expand_operand ops[4];
+ rtx last = get_last_insn ();
+
+ res = gen_reg_rtx (mode);
+ create_output_operand (&ops[0], res, mode);
+ create_input_operand (&ops[1], op0, mode);
+ create_input_operand (&ops[2], op1, mode);
+ create_fixed_operand (&ops[3], do_error);
+ if (maybe_expand_insn (icode, 4, ops))
+ {
+ last = get_last_insn ();
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT
+ && JUMP_P (last)
+ && any_condjump_p (last)
+ && !find_reg_note (last, REG_BR_PROB, 0))
+ add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
+ emit_jump (done_label);
+ }
+ else
+ {
+ delete_insns_since (last);
+ icode = CODE_FOR_nothing;
+ }
+ }
+
+ if (icode == CODE_FOR_nothing)
+ {
+ rtx sub_check = gen_label_rtx ();
+
+ /* Compute the operation. On RTL level, the addition is always
+ unsigned. */
+ res = expand_binop (mode, add_optab, op0, op1,
+ NULL_RTX, false, OPTAB_LIB_WIDEN);
+
+ /* If the op1 is negative, we have to use a different check. */
+ emit_cmp_and_jump_insns (op1, const0_rtx, LT, NULL_RTX, mode,
+ false, sub_check, PROB_EVEN);
+
+ /* Compare the result of the addition with one of the operands. */
+ emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? GE : LE,
+ NULL_RTX, mode, false, done_label,
+ PROB_VERY_LIKELY);
+ /* If we get here, we have to print the error. */
+ emit_jump (do_error);
+
+ emit_label (sub_check);
+ /* We have k = a + b for b < 0 here. k <= a must hold. */
+ emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? LE : GE,
+ NULL_RTX, mode, false, done_label,
+ PROB_VERY_LIKELY);
+ }
+
+ emit_label (do_error);
+ /* Expand the ubsan builtin call. */
+ push_temp_slots ();
+ fn = ubsan_build_overflow_builtin (code, gimple_location (stmt),
+ TREE_TYPE (arg0), arg0, arg1);
+ expand_normal (fn);
+ pop_temp_slots ();
+ do_pending_stack_adjust ();
+
+ /* We're done. */
+ emit_label (done_label);
+
+ if (lhs)
+ emit_move_insn (target, res);
+}
+
+/* Add negate overflow checking to the statement STMT. */
+
+void
+ubsan_expand_si_overflow_neg_check (gimple stmt)
+{
+ rtx res, op1;
+ tree lhs, fn, arg1;
+ rtx done_label, do_error, target = NULL_RTX;
+
+ lhs = gimple_call_lhs (stmt);
+ arg1 = gimple_call_arg (stmt, 1);
+ done_label = gen_label_rtx ();
+ do_error = gen_label_rtx ();
+
+ do_pending_stack_adjust ();
+ op1 = expand_normal (arg1);
+
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
+ if (lhs)
+ target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+
+ enum insn_code icode = optab_handler (negv3_optab, mode);
+ if (icode != CODE_FOR_nothing)
+ {
+ struct expand_operand ops[3];
+ rtx last = get_last_insn ();
+
+ res = gen_reg_rtx (mode);
+ create_output_operand (&ops[0], res, mode);
+ create_input_operand (&ops[1], op1, mode);
+ create_fixed_operand (&ops[2], do_error);
+ if (maybe_expand_insn (icode, 3, ops))
+ {
+ last = get_last_insn ();
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT
+ && JUMP_P (last)
+ && any_condjump_p (last)
+ && !find_reg_note (last, REG_BR_PROB, 0))
+ add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
+ emit_jump (done_label);
+ }
+ else
+ {
+ delete_insns_since (last);
+ icode = CODE_FOR_nothing;
+ }
+ }
+
+ if (icode == CODE_FOR_nothing)
+ {
+ /* Compute the operation. On RTL level, the addition is always
+ unsigned. */
+ res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
+
+ /* Compare the operand with the most negative value. */
+ rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
+ emit_cmp_and_jump_insns (op1, minv, NE, NULL_RTX, mode, false,
+ done_label, PROB_VERY_LIKELY);
+ }
+
+ emit_label (do_error);
+ /* Expand the ubsan builtin call. */
+ push_temp_slots ();
+ fn = ubsan_build_overflow_builtin (NEGATE_EXPR, gimple_location (stmt),
+ TREE_TYPE (arg1), arg1, NULL_TREE);
+ expand_normal (fn);
+ pop_temp_slots ();
+ do_pending_stack_adjust ();
+
+ /* We're done. */
+ emit_label (done_label);
+
+ if (lhs)
+ emit_move_insn (target, res);
+}
+
+/* Add mul overflow checking to the statement STMT. */
+
+void
+ubsan_expand_si_overflow_mul_check (gimple stmt)
+{
+ rtx res, op0, op1;
+ tree lhs, fn, arg0, arg1;
+ rtx done_label, do_error, target = NULL_RTX;
+
+ lhs = gimple_call_lhs (stmt);
+ arg0 = gimple_call_arg (stmt, 0);
+ arg1 = gimple_call_arg (stmt, 1);
+ done_label = gen_label_rtx ();
+ do_error = gen_label_rtx ();
+
+ do_pending_stack_adjust ();
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
+ if (lhs)
+ target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+
+ enum insn_code icode = optab_handler (mulv4_optab, mode);
+ if (icode != CODE_FOR_nothing)
+ {
+ struct expand_operand ops[4];
+ rtx last = get_last_insn ();
+
+ res = gen_reg_rtx (mode);
+ create_output_operand (&ops[0], res, mode);
+ create_input_operand (&ops[1], op0, mode);
+ create_input_operand (&ops[2], op1, mode);
+ create_fixed_operand (&ops[3], do_error);
+ if (maybe_expand_insn (icode, 4, ops))
+ {
+ last = get_last_insn ();
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT
+ && JUMP_P (last)
+ && any_condjump_p (last)
+ && !find_reg_note (last, REG_BR_PROB, 0))
+ add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
+ emit_jump (done_label);
+ }
+ else
+ {
+ delete_insns_since (last);
+ icode = CODE_FOR_nothing;
+ }
+ }
+
+ if (icode == CODE_FOR_nothing)
+ {
+ struct separate_ops ops;
+ ops.op0 = arg0;
+ ops.op1 = arg1;
+ ops.op2 = NULL_TREE;
+ ops.location = gimple_location (stmt);
+ if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
+ && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
+ {
+ enum machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
+ ops.code = WIDEN_MULT_EXPR;
+ ops.type
+ = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), 0);
+
+ res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
+ rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res,
+ GET_MODE_PRECISION (mode), NULL_RTX, 0);
+ hipart = gen_lowpart (mode, hipart);
+ res = gen_lowpart (mode, res);
+ rtx signbit = expand_shift (RSHIFT_EXPR, mode, res,
+ GET_MODE_PRECISION (mode) - 1,
+ NULL_RTX, 0);
+ /* RES is low half of the double width result, HIPART
+ the high half. There was overflow if
+ HIPART is different from RES < 0 ? -1 : 0. */
+ emit_cmp_and_jump_insns (signbit, hipart, EQ, NULL_RTX, mode,
+ false, done_label, PROB_VERY_LIKELY);
+ }
+ else
+ {
+ /* For now we don't instrument this. See __mulvDI3 in libgcc2.c
+ for what could be done. */
+ ops.code = MULT_EXPR;
+ ops.type = TREE_TYPE (arg0);
+ res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
+ emit_jump (done_label);
+ }
+ }
+
+ emit_label (do_error);
+ /* Expand the ubsan builtin call. */
+ push_temp_slots ();
+ fn = ubsan_build_overflow_builtin (MULT_EXPR, gimple_location (stmt),
+ TREE_TYPE (arg0), arg0, arg1);
+ expand_normal (fn);
+ pop_temp_slots ();
+ do_pending_stack_adjust ();
+
+ /* We're done. */
+ emit_label (done_label);
+
+ if (lhs)
+ emit_move_insn (target, res);
+}
+
+/* Expand UBSAN_CHECK_ADD call STMT. */
+
+static void
+expand_UBSAN_CHECK_ADD (gimple stmt)
+{
+ ubsan_expand_si_overflow_addsub_check (PLUS_EXPR, stmt);
+}
+
+/* Expand UBSAN_CHECK_SUB call STMT. */
+
+static void
+expand_UBSAN_CHECK_SUB (gimple stmt)
+{
+ if (integer_zerop (gimple_call_arg (stmt, 0)))
+ ubsan_expand_si_overflow_neg_check (stmt);
+ else
+ ubsan_expand_si_overflow_addsub_check (MINUS_EXPR, stmt);
+}
+
+/* Expand UBSAN_CHECK_MUL call STMT. */
+
+static void
+expand_UBSAN_CHECK_MUL (gimple stmt)
+{
+ ubsan_expand_si_overflow_mul_check (stmt);
+}
+
+/* This should get folded in tree-vectorizer.c. */
+
+static void
+expand_LOOP_VECTORIZED (gimple stmt ATTRIBUTE_UNUSED)
+{
+ gcc_unreachable ();
+}
+
+static void
+expand_MASK_LOAD (gimple stmt)
+{
+ struct expand_operand ops[3];
+ tree type, lhs, rhs, maskt;
+ rtx mem, target, mask;
+
+ maskt = gimple_call_arg (stmt, 2);
+ lhs = gimple_call_lhs (stmt);
+ type = TREE_TYPE (lhs);
+ rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 1));
+
+ mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ gcc_assert (MEM_P (mem));
+ mask = expand_normal (maskt);
+ target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ create_output_operand (&ops[0], target, TYPE_MODE (type));
+ create_fixed_operand (&ops[1], mem);
+ create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
+ expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
+}
+
+static void
+expand_MASK_STORE (gimple stmt)
+{
+ struct expand_operand ops[3];
+ tree type, lhs, rhs, maskt;
+ rtx mem, reg, mask;
+
+ maskt = gimple_call_arg (stmt, 2);
+ rhs = gimple_call_arg (stmt, 3);
+ type = TREE_TYPE (rhs);
+ lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 1));
+
+ mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ gcc_assert (MEM_P (mem));
+ mask = expand_normal (maskt);
+ reg = expand_normal (rhs);
+ create_fixed_operand (&ops[0], mem);
+ create_input_operand (&ops[1], reg, TYPE_MODE (type));
+ create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
+ expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
+}
+
/* Routines to expand each internal function, indexed by function number.
Each routine has the prototype:
diff --git a/gcc/internal-fn.def b/gcc/internal-fn.def
index 7193874c811..fdb1812e430 100644
--- a/gcc/internal-fn.def
+++ b/gcc/internal-fn.def
@@ -43,5 +43,11 @@ DEF_INTERNAL_FN (STORE_LANES, ECF_CONST | ECF_LEAF)
DEF_INTERNAL_FN (GOMP_SIMD_LANE, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (GOMP_SIMD_VF, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (GOMP_SIMD_LAST_LANE, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
+DEF_INTERNAL_FN (LOOP_VECTORIZED, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW)
+DEF_INTERNAL_FN (MASK_LOAD, ECF_PURE | ECF_LEAF)
+DEF_INTERNAL_FN (MASK_STORE, ECF_LEAF)
DEF_INTERNAL_FN (ANNOTATE, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_NULL, ECF_LEAF | ECF_NOTHROW)
+DEF_INTERNAL_FN (UBSAN_CHECK_ADD, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
+DEF_INTERNAL_FN (UBSAN_CHECK_SUB, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
+DEF_INTERNAL_FN (UBSAN_CHECK_MUL, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 6fd7e9cfb95..305ad2d72db 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -1688,10 +1688,10 @@ good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
/* Return all context independent values from aggregate lattices in PLATS in a
vector. Return NULL if there are none. */
-static vec<ipa_agg_jf_item_t, va_gc> *
+static vec<ipa_agg_jf_item, va_gc> *
context_independent_aggregate_values (struct ipcp_param_lattices *plats)
{
- vec<ipa_agg_jf_item_t, va_gc> *res = NULL;
+ vec<ipa_agg_jf_item, va_gc> *res = NULL;
if (plats->aggs_bottom
|| plats->aggs_contain_variable
@@ -1720,7 +1720,7 @@ static bool
gather_context_independent_values (struct ipa_node_params *info,
vec<tree> *known_csts,
vec<tree> *known_binfos,
- vec<ipa_agg_jump_function_t> *known_aggs,
+ vec<ipa_agg_jump_function> *known_aggs,
int *removable_params_cost)
{
int i, count = ipa_get_param_count (info);
@@ -1771,7 +1771,7 @@ gather_context_independent_values (struct ipa_node_params *info,
if (known_aggs)
{
- vec<ipa_agg_jf_item_t, va_gc> *agg_items;
+ vec<ipa_agg_jf_item, va_gc> *agg_items;
struct ipa_agg_jump_function *ajf;
agg_items = context_independent_aggregate_values (plats);
@@ -1793,7 +1793,7 @@ gather_context_independent_values (struct ipa_node_params *info,
issue. */
static vec<ipa_agg_jump_function_p>
-agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function_t> known_aggs)
+agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function> known_aggs)
{
vec<ipa_agg_jump_function_p> ret;
struct ipa_agg_jump_function *ajf;
@@ -1814,7 +1814,7 @@ estimate_local_effects (struct cgraph_node *node)
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts, known_binfos;
- vec<ipa_agg_jump_function_t> known_aggs;
+ vec<ipa_agg_jump_function> known_aggs;
vec<ipa_agg_jump_function_p> known_aggs_ptrs;
bool always_const;
int base_time = inline_summary (node)->time;
@@ -2772,10 +2772,10 @@ find_more_scalar_values_for_callers_subset (struct cgraph_node *node,
/* Go through PLATS and create a vector of values consisting of values and
offsets (minus OFFSET) of lattices that contain only a single value. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
{
- vec<ipa_agg_jf_item_t> res = vNULL;
+ vec<ipa_agg_jf_item> res = vNULL;
if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
return vNULL;
@@ -2796,7 +2796,7 @@ copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
static void
intersect_with_plats (struct ipcp_param_lattices *plats,
- vec<ipa_agg_jf_item_t> *inter,
+ vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
struct ipcp_agg_lattice *aglat;
@@ -2836,12 +2836,12 @@ intersect_with_plats (struct ipcp_param_lattices *plats,
/* Copy agggregate replacement values of NODE (which is an IPA-CP clone) to the
vector result while subtracting OFFSET from the individual value offsets. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
agg_replacements_to_vector (struct cgraph_node *node, int index,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *av;
- vec<ipa_agg_jf_item_t> res = vNULL;
+ vec<ipa_agg_jf_item> res = vNULL;
for (av = ipa_get_agg_replacements_for_node (node); av; av = av->next)
if (av->index == index
@@ -2863,7 +2863,7 @@ agg_replacements_to_vector (struct cgraph_node *node, int index,
static void
intersect_with_agg_replacements (struct cgraph_node *node, int index,
- vec<ipa_agg_jf_item_t> *inter,
+ vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *srcvals;
@@ -2904,9 +2904,9 @@ intersect_with_agg_replacements (struct cgraph_node *node, int index,
copy all incoming values to it. If we determine we ended up with no values
whatsoever, return a released vector. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
- vec<ipa_agg_jf_item_t> inter)
+ vec<ipa_agg_jf_item> inter)
{
struct ipa_jump_func *jfunc;
jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), index);
@@ -3015,7 +3015,7 @@ intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
else
{
inter.release ();
- return vec<ipa_agg_jf_item_t>();
+ return vec<ipa_agg_jf_item>();
}
return inter;
}
@@ -3042,7 +3042,7 @@ find_aggregate_values_for_callers_subset (struct cgraph_node *node,
for (i = 0; i < count ; i++)
{
struct cgraph_edge *cs;
- vec<ipa_agg_jf_item_t> inter = vNULL;
+ vec<ipa_agg_jf_item> inter = vNULL;
struct ipa_agg_jf_item *item;
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
int j;
@@ -3086,7 +3086,7 @@ find_aggregate_values_for_callers_subset (struct cgraph_node *node,
/* Turn KNOWN_AGGS into a list of aggreate replacement values. */
static struct ipa_agg_replacement_value *
-known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function_t> known_aggs)
+known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function> known_aggs)
{
struct ipa_agg_replacement_value *res = NULL;
struct ipa_agg_jump_function *aggjf;
@@ -3168,7 +3168,7 @@ cgraph_edge_brings_all_agg_vals_for_node (struct cgraph_edge *cs,
for (i = 0; i < count; i++)
{
- static vec<ipa_agg_jf_item_t> values = vec<ipa_agg_jf_item_t>();
+ static vec<ipa_agg_jf_item> values = vec<ipa_agg_jf_item>();
struct ipcp_param_lattices *plats;
bool interesting = false;
for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
@@ -3379,7 +3379,7 @@ decide_whether_version_node (struct cgraph_node *node)
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts, known_binfos;
- vec<ipa_agg_jump_function_t> known_aggs = vNULL;
+ vec<ipa_agg_jump_function> known_aggs = vNULL;
bool ret = false;
if (count == 0)
diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c
index f272ccca812..836eaea144f 100644
--- a/gcc/ipa-devirt.c
+++ b/gcc/ipa-devirt.c
@@ -653,7 +653,7 @@ record_target_from_binfo (vec <cgraph_node *> &nodes,
if (!flag_ltrans && anonymous)
{
tree vtable = BINFO_VTABLE (inner_binfo);
- struct varpool_node *vnode;
+ varpool_node *vnode;
if (TREE_CODE (vtable) == POINTER_PLUS_EXPR)
vtable = TREE_OPERAND (TREE_OPERAND (vtable, 0), 0);
@@ -1144,7 +1144,7 @@ record_targets_from_bases (tree otr_type,
/* When virtual table is removed, we may need to flush the cache. */
static void
-devirt_variable_node_removal_hook (struct varpool_node *n,
+devirt_variable_node_removal_hook (varpool_node *n,
void *d ATTRIBUTE_UNUSED)
{
if (cached_polymorphic_call_targets
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index ad6fe8febb6..9e9087f30db 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -494,7 +494,7 @@ evaluate_predicate (struct predicate *p, clause_t possible_truths)
static int
predicate_probability (conditions conds,
struct predicate *p, clause_t possible_truths,
- vec<inline_param_summary_t> inline_param_summary)
+ vec<inline_param_summary> inline_param_summary)
{
int i;
int combined_prob = REG_BR_PROB_BASE;
@@ -2152,7 +2152,7 @@ param_change_prob (gimple stmt, int i)
max = 1;
EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
- max = MIN (max, BASIC_BLOCK (index)->frequency);
+ max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
BITMAP_FREE (info.bb_set);
if (max < bb->frequency)
@@ -2408,7 +2408,7 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
nblocks = pre_and_rev_post_order_compute (NULL, order, false);
for (n = 0; n < nblocks; n++)
{
- bb = BASIC_BLOCK (order[n]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
freq = compute_call_stmt_bb_frequency (node->decl, bb);
/* TODO: Obviously predicates can be propagated down across CFG. */
@@ -2983,7 +2983,7 @@ estimate_node_size_and_time (struct cgraph_node *node,
vec<ipa_agg_jump_function_p> known_aggs,
int *ret_size, int *ret_time,
inline_hints *ret_hints,
- vec<inline_param_summary_t>
+ vec<inline_param_summary>
inline_param_summary)
{
struct inline_summary *info = inline_summary (node);
diff --git a/gcc/ipa-inline.h b/gcc/ipa-inline.h
index 000d1abc968..14ee4bf7335 100644
--- a/gcc/ipa-inline.h
+++ b/gcc/ipa-inline.h
@@ -27,21 +27,21 @@ along with GCC; see the file COPYING3. If not see
vector. They are of simple for function_param OP VAL, where VAL is
IPA invariant. The conditions are then referred by predicates. */
-typedef struct GTY(()) condition
- {
- /* If agg_contents is set, this is the offset from which the used data was
- loaded. */
- HOST_WIDE_INT offset;
- tree val;
- int operand_num;
- ENUM_BITFIELD(tree_code) code : 16;
- /* Set if the used data were loaded from an aggregate parameter or from
- data received by reference. */
- unsigned agg_contents : 1;
- /* If agg_contents is set, this differentiates between loads from data
- passed by reference and by value. */
- unsigned by_ref : 1;
- } condition;
+struct GTY(()) condition
+{
+ /* If agg_contents is set, this is the offset from which the used data was
+ loaded. */
+ HOST_WIDE_INT offset;
+ tree val;
+ int operand_num;
+ ENUM_BITFIELD(tree_code) code : 16;
+ /* Set if the used data were loaded from an aggregate parameter or from
+ data received by reference. */
+ unsigned agg_contents : 1;
+ /* If agg_contents is set, this differentiates between loads from data
+ passed by reference and by value. */
+ unsigned by_ref : 1;
+};
/* Inline hints are reasons why inline heuristics should preffer inlining given
function. They are represtented as bitmap of the following values. */
@@ -99,12 +99,12 @@ struct GTY(()) predicate
accounted. */
#define INLINE_SIZE_SCALE 2
#define INLINE_TIME_SCALE (CGRAPH_FREQ_BASE * 2)
-typedef struct GTY(()) size_time_entry
+struct GTY(()) size_time_entry
{
struct predicate predicate;
int size;
int time;
-} size_time_entry;
+};
/* Function inlining information. */
struct GTY(()) inline_summary
@@ -156,7 +156,8 @@ struct GTY(()) inline_summary
int scc_no;
};
-
+/* Need a typedef for inline_summary because of inline function
+ 'inline_summary' below. */
typedef struct inline_summary inline_summary_t;
extern GTY(()) vec<inline_summary_t, va_gc> *inline_summary_vec;
@@ -172,7 +173,6 @@ struct inline_param_summary
Value 0 is reserved for compile time invariants. */
int change_prob;
};
-typedef struct inline_param_summary inline_param_summary_t;
/* Information kept about callgraph edges. */
struct inline_edge_summary
@@ -186,17 +186,19 @@ struct inline_edge_summary
/* Array indexed by parameters.
0 means that parameter change all the time, REG_BR_PROB_BASE means
that parameter is constant. */
- vec<inline_param_summary_t> param;
+ vec<inline_param_summary> param;
};
+/* Need a typedef for inline_edge_summary because of inline function
+ 'inline_edge_summary' below. */
typedef struct inline_edge_summary inline_edge_summary_t;
extern vec<inline_edge_summary_t> inline_edge_summary_vec;
-typedef struct edge_growth_cache_entry
+struct edge_growth_cache_entry
{
int time, size;
inline_hints hints;
-} edge_growth_cache_entry;
+};
extern vec<int> node_growth_cache;
extern vec<edge_growth_cache_entry> edge_growth_cache;
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index acc01fcfbbb..a28ab55097b 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -68,11 +68,11 @@ struct param_analysis_info
};
/* Vector where the parameter infos are actually stored. */
-vec<ipa_node_params_t> ipa_node_params_vector;
+vec<ipa_node_params> ipa_node_params_vector;
/* Vector of known aggregate values in cloned nodes. */
vec<ipa_agg_replacement_value_p, va_gc> *ipa_node_agg_replacements;
/* Vector where the parameter infos are actually stored. */
-vec<ipa_edge_args_t, va_gc> *ipa_edge_args_vector;
+vec<ipa_edge_args, va_gc> *ipa_edge_args_vector;
/* Holders of ipa cgraph hooks: */
static struct cgraph_edge_hook_list *edge_removal_hook_holder;
@@ -116,7 +116,7 @@ ipa_func_spec_opts_forbid_analysis_p (struct cgraph_node *node)
to INFO. */
static int
-ipa_get_param_decl_index_1 (vec<ipa_param_descriptor_t> descriptors, tree ptree)
+ipa_get_param_decl_index_1 (vec<ipa_param_descriptor> descriptors, tree ptree)
{
int i, count;
@@ -142,7 +142,7 @@ ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree)
static void
ipa_populate_param_decls (struct cgraph_node *node,
- vec<ipa_param_descriptor_t> &descriptors)
+ vec<ipa_param_descriptor> &descriptors)
{
tree fndecl;
tree fnargs;
@@ -775,7 +775,7 @@ parm_preserved_before_stmt_p (struct param_analysis_info *parm_ainfo,
modified. Otherwise return -1. */
static int
-load_from_unmodified_param (vec<ipa_param_descriptor_t> descriptors,
+load_from_unmodified_param (vec<ipa_param_descriptor> descriptors,
struct param_analysis_info *parms_ainfo,
gimple stmt)
{
@@ -863,7 +863,7 @@ parm_ref_data_pass_through_p (struct param_analysis_info *parm_ainfo,
reference respectively. */
static bool
-ipa_load_from_parm_agg_1 (vec<ipa_param_descriptor_t> descriptors,
+ipa_load_from_parm_agg_1 (vec<ipa_param_descriptor> descriptors,
struct param_analysis_info *parms_ainfo, gimple stmt,
tree op, int *index_p, HOST_WIDE_INT *offset_p,
HOST_WIDE_INT *size_p, bool *by_ref_p)
@@ -3444,7 +3444,15 @@ ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments)
if (adj->by_ref)
ptype = build_pointer_type (adj->type);
else
- ptype = adj->type;
+ {
+ ptype = adj->type;
+ if (is_gimple_reg_type (ptype))
+ {
+ unsigned malign = GET_MODE_ALIGNMENT (TYPE_MODE (ptype));
+ if (TYPE_ALIGN (ptype) < malign)
+ ptype = build_aligned_type (ptype, malign);
+ }
+ }
if (care_for_types)
new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
@@ -4688,7 +4696,7 @@ adjust_agg_replacement_values (struct cgraph_node *node,
unsigned int
ipcp_transform_function (struct cgraph_node *node)
{
- vec<ipa_param_descriptor_t> descriptors = vNULL;
+ vec<ipa_param_descriptor> descriptors = vNULL;
struct param_analysis_info *parms_ainfo;
struct ipa_agg_replacement_value *aggval;
gimple_stmt_iterator gsi;
@@ -4717,7 +4725,7 @@ ipcp_transform_function (struct cgraph_node *node)
descriptors.safe_grow_cleared (param_count);
ipa_populate_param_decls (node, descriptors);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
struct ipa_agg_replacement_value *v;
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index 2fb00afcac2..9a987d78a04 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -152,14 +152,14 @@ struct GTY(()) ipa_ancestor_jf_data
different, all unlisted parts are assumed to be unknown and all values must
fulfill is_gimple_ip_invariant. */
-typedef struct GTY(()) ipa_agg_jf_item
+struct GTY(()) ipa_agg_jf_item
{
/* The offset at which the known value is located within the aggregate. */
HOST_WIDE_INT offset;
/* The known constant or type if this is a clobber. */
tree value;
-} ipa_agg_jf_item_t;
+};
/* Aggregate jump function - i.e. description of contents of aggregates passed
@@ -168,18 +168,17 @@ typedef struct GTY(()) ipa_agg_jf_item
struct GTY(()) ipa_agg_jump_function
{
/* Description of the individual items. */
- vec<ipa_agg_jf_item_t, va_gc> *items;
+ vec<ipa_agg_jf_item, va_gc> *items;
/* True if the data was passed by reference (as opposed to by value). */
bool by_ref;
};
typedef struct ipa_agg_jump_function *ipa_agg_jump_function_p;
-typedef struct ipa_agg_jump_function ipa_agg_jump_function_t;
/* A jump function for a callsite represents the values passed as actual
arguments of the callsite. See enum jump_func_type for the various
types of jump functions supported. */
-typedef struct GTY (()) ipa_jump_func
+struct GTY (()) ipa_jump_func
{
/* Aggregate contants description. See struct ipa_agg_jump_function and its
description. */
@@ -196,7 +195,7 @@ typedef struct GTY (()) ipa_jump_func
struct ipa_pass_through_data GTY ((tag ("IPA_JF_PASS_THROUGH"))) pass_through;
struct ipa_ancestor_jf_data GTY ((tag ("IPA_JF_ANCESTOR"))) ancestor;
} GTY ((desc ("%1.type"))) value;
-} ipa_jump_func_t;
+};
/* Return the offset of the component that is described by a known type jump
@@ -350,7 +349,6 @@ struct ipa_param_descriptor
unsigned used : 1;
};
-typedef struct ipa_param_descriptor ipa_param_descriptor_t;
struct ipcp_lattice;
/* ipa_node_params stores information related to formal parameters of functions
@@ -361,7 +359,7 @@ struct ipa_node_params
{
/* Information about individual formal parameters that are gathered when
summaries are generated. */
- vec<ipa_param_descriptor_t> descriptors;
+ vec<ipa_param_descriptor> descriptors;
/* Pointer to an array of structures describing individual formal
parameters. */
struct ipcp_param_lattices *lattices;
@@ -474,11 +472,11 @@ void ipa_set_node_agg_value_chain (struct cgraph_node *node,
/* ipa_edge_args stores information related to a callsite and particularly its
arguments. It can be accessed by the IPA_EDGE_REF macro. */
-typedef struct GTY(()) ipa_edge_args
+struct GTY(()) ipa_edge_args
{
/* Vector of the callsite's jump function of each parameter. */
- vec<ipa_jump_func_t, va_gc> *jump_functions;
-} ipa_edge_args_t;
+ vec<ipa_jump_func, va_gc> *jump_functions;
+};
/* ipa_edge_args access functions. Please use these to access fields that
are or will be shared among various passes. */
@@ -501,17 +499,14 @@ ipa_get_ith_jump_func (struct ipa_edge_args *args, int i)
return &(*args->jump_functions)[i];
}
-/* Vectors need to have typedefs of structures. */
-typedef struct ipa_node_params ipa_node_params_t;
-
/* Types of vectors holding the infos. */
/* Vector where the parameter infos are actually stored. */
-extern vec<ipa_node_params_t> ipa_node_params_vector;
+extern vec<ipa_node_params> ipa_node_params_vector;
/* Vector of known aggregate values in cloned nodes. */
extern GTY(()) vec<ipa_agg_replacement_value_p, va_gc> *ipa_node_agg_replacements;
/* Vector where the parameter infos are actually stored. */
-extern GTY(()) vec<ipa_edge_args_t, va_gc> *ipa_edge_args_vector;
+extern GTY(()) vec<ipa_edge_args, va_gc> *ipa_edge_args_vector;
/* Return the associated parameter/argument info corresponding to the given
node/edge. */
@@ -685,9 +680,7 @@ struct ipa_parm_adjustment
unsigned by_ref : 1;
};
-typedef struct ipa_parm_adjustment ipa_parm_adjustment_t;
-
-typedef vec<ipa_parm_adjustment_t> ipa_parm_adjustment_vec;
+typedef vec<ipa_parm_adjustment> ipa_parm_adjustment_vec;
vec<tree> ipa_get_vector_of_formal_parms (tree fndecl);
vec<tree> ipa_get_vector_of_formal_parm_types (tree fntype);
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index d84b35fd716..a60e078c90c 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -754,7 +754,7 @@ analyze_function (struct cgraph_node *fn, bool ipa)
push_cfun (DECL_STRUCT_FUNCTION (decl));
- FOR_EACH_BB (this_block)
+ FOR_EACH_BB_FN (this_block, cfun)
{
gimple_stmt_iterator gsi;
struct walk_stmt_info wi;
diff --git a/gcc/ipa-ref-inline.h b/gcc/ipa-ref-inline.h
index 4bb43c2af1d..83901518e53 100644
--- a/gcc/ipa-ref-inline.h
+++ b/gcc/ipa-ref-inline.h
@@ -27,7 +27,7 @@ ipa_ref_node (struct ipa_ref *ref)
/* Return varpool node REF is referring. */
-static inline struct varpool_node *
+static inline varpool_node *
ipa_ref_varpool_node (struct ipa_ref *ref)
{
return varpool (ref->referred);
@@ -43,7 +43,7 @@ ipa_ref_referring_node (struct ipa_ref *ref)
/* Return varpool node REF is in. */
-static inline struct varpool_node *
+static inline varpool_node *
ipa_ref_referring_varpool_node (struct ipa_ref *ref)
{
return varpool (ref->referring);
diff --git a/gcc/ipa-ref.h b/gcc/ipa-ref.h
index 9f392b41588..3daede2d6b8 100644
--- a/gcc/ipa-ref.h
+++ b/gcc/ipa-ref.h
@@ -19,7 +19,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
struct cgraph_node;
-struct varpool_node;
+class varpool_node;
class symtab_node;
diff --git a/gcc/ipa-reference.c b/gcc/ipa-reference.c
index 9e4eb0022fc..9f3626a14c3 100644
--- a/gcc/ipa-reference.c
+++ b/gcc/ipa-reference.c
@@ -667,7 +667,7 @@ static unsigned int
propagate (void)
{
struct cgraph_node *node;
- struct varpool_node *vnode;
+ varpool_node *vnode;
struct cgraph_node **order =
XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int order_pos;
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index d2e2d6f3d19..390adf1f798 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -362,7 +362,8 @@ dominated_by_forbidden (basic_block bb)
EXECUTE_IF_SET_IN_BITMAP (forbidden_dominators, 1, dom_bb, bi)
{
- if (dominated_by_p (CDI_DOMINATORS, bb, BASIC_BLOCK (dom_bb)))
+ if (dominated_by_p (CDI_DOMINATORS, bb,
+ BASIC_BLOCK_FOR_FN (cfun, dom_bb)))
return true;
}
@@ -410,7 +411,7 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
a loop, enable splitting since inlining code skipping the loop
is likely noticeable win. */
if (back_edge
- && profile_status != PROFILE_READ
+ && profile_status_for_fn (cfun) != PROFILE_READ
&& incoming_freq < ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1069,7 +1070,7 @@ find_split_points (int overall_time, int overall_size)
stack.pop ();
}
ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = NULL;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = NULL;
stack.release ();
BITMAP_FREE (current.ssa_names_to_pass);
@@ -1584,7 +1585,7 @@ execute_split_functions (void)
/* We enforce splitting after loop headers when profile info is not
available. */
- if (profile_status != PROFILE_READ)
+ if (profile_status_for_fn (cfun) != PROFILE_READ)
mark_dfs_back_edges ();
/* Initialize bitmap to track forbidden calls. */
@@ -1592,9 +1593,9 @@ execute_split_functions (void)
calculate_dominance_info (CDI_DOMINATORS);
/* Compute local info about basic blocks and determine function size/time. */
- bb_info_vec.safe_grow_cleared (last_basic_block + 1);
+ bb_info_vec.safe_grow_cleared (last_basic_block_for_fn (cfun) + 1);
memset (&best_split_point, 0, sizeof (best_split_point));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int time = 0;
int size = 0;
diff --git a/gcc/ipa-utils.c b/gcc/ipa-utils.c
index 312d75ddbfd..92972803ba0 100644
--- a/gcc/ipa-utils.c
+++ b/gcc/ipa-utils.c
@@ -527,7 +527,7 @@ varpool_node_set_new (void)
/* Add varpool_node NODE to varpool_node_set SET. */
void
-varpool_node_set_add (varpool_node_set set, struct varpool_node *node)
+varpool_node_set_add (varpool_node_set set, varpool_node *node)
{
void **slot;
@@ -551,11 +551,11 @@ varpool_node_set_add (varpool_node_set set, struct varpool_node *node)
/* Remove varpool_node NODE from varpool_node_set SET. */
void
-varpool_node_set_remove (varpool_node_set set, struct varpool_node *node)
+varpool_node_set_remove (varpool_node_set set, varpool_node *node)
{
void **slot, **last_slot;
int index;
- struct varpool_node *last_node;
+ varpool_node *last_node;
slot = pointer_map_contains (set->map, node);
if (slot == NULL || !*slot)
@@ -587,7 +587,7 @@ varpool_node_set_remove (varpool_node_set set, struct varpool_node *node)
is returned if NODE is not in SET. */
varpool_node_set_iterator
-varpool_node_set_find (varpool_node_set set, struct varpool_node *node)
+varpool_node_set_find (varpool_node_set set, varpool_node *node)
{
void **slot;
varpool_node_set_iterator vsi;
@@ -612,7 +612,7 @@ dump_varpool_node_set (FILE *f, varpool_node_set set)
for (iter = vsi_start (set); !vsi_end_p (iter); vsi_next (&iter))
{
- struct varpool_node *node = vsi_node (iter);
+ varpool_node *node = vsi_node (iter);
fprintf (f, " %s", node->name ());
}
fprintf (f, "\n");
@@ -711,8 +711,8 @@ ipa_merge_profiles (struct cgraph_node *dst,
"Giving up; number of basic block mismatch.\n");
match = false;
}
- else if (last_basic_block_for_function (srccfun)
- != last_basic_block_for_function (dstcfun))
+ else if (last_basic_block_for_fn (srccfun)
+ != last_basic_block_for_fn (dstcfun))
{
if (cgraph_dump_file)
fprintf (cgraph_dump_file,
@@ -727,7 +727,7 @@ ipa_merge_profiles (struct cgraph_node *dst,
{
unsigned int i;
- dstbb = BASIC_BLOCK_FOR_FUNCTION (dstcfun, srcbb->index);
+ dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
if (dstbb == NULL)
{
if (cgraph_dump_file)
@@ -772,7 +772,7 @@ ipa_merge_profiles (struct cgraph_node *dst,
{
unsigned int i;
- dstbb = BASIC_BLOCK_FOR_FUNCTION (dstcfun, srcbb->index);
+ dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
dstbb->count += srcbb->count;
for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
{
diff --git a/gcc/ipa.c b/gcc/ipa.c
index 520a5bbdaec..1ec4b5fc3b7 100644
--- a/gcc/ipa.c
+++ b/gcc/ipa.c
@@ -291,7 +291,7 @@ symtab_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
{
symtab_node *first = (symtab_node *) (void *) 1;
struct cgraph_node *node, *next;
- struct varpool_node *vnode, *vnext;
+ varpool_node *vnode, *vnext;
bool changed = false;
struct pointer_set_t *reachable = pointer_set_create ();
struct pointer_set_t *body_needed_for_clonning = pointer_set_create ();
@@ -606,7 +606,7 @@ symtab_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
void
ipa_discover_readonly_nonaddressable_vars (void)
{
- struct varpool_node *vnode;
+ varpool_node *vnode;
if (dump_file)
fprintf (dump_file, "Clearing variable flags:");
FOR_EACH_VARIABLE (vnode)
@@ -663,7 +663,7 @@ address_taken_from_non_vtable_p (symtab_node *node)
i, ref); i++)
if (ref->use == IPA_REF_ADDR)
{
- struct varpool_node *node;
+ varpool_node *node;
if (is_a <cgraph_node> (ref->referring))
return true;
node = ipa_ref_referring_varpool_node (ref);
@@ -801,7 +801,7 @@ cgraph_externally_visible_p (struct cgraph_node *node,
/* Return true when variable VNODE should be considered externally visible. */
bool
-varpool_externally_visible_p (struct varpool_node *vnode)
+varpool_externally_visible_p (varpool_node *vnode)
{
if (DECL_EXTERNAL (vnode->decl))
return true;
@@ -895,7 +895,7 @@ static unsigned int
function_and_variable_visibility (bool whole_program)
{
struct cgraph_node *node;
- struct varpool_node *vnode;
+ varpool_node *vnode;
/* All aliases should be procssed at this point. */
gcc_checking_assert (!alias_pairs || !alias_pairs->length ());
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 09e22d74f6b..660fb0d6eb0 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -138,9 +138,10 @@ create_loop_tree_nodes (void)
ira_bb_nodes
= ((struct ira_loop_tree_node *)
- ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
- last_basic_block_before_change = last_basic_block;
- for (i = 0; i < (unsigned int) last_basic_block; i++)
+ ira_allocate (sizeof (struct ira_loop_tree_node)
+ * last_basic_block_for_fn (cfun)));
+ last_basic_block_before_change = last_basic_block_for_fn (cfun);
+ for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
{
ira_bb_nodes[i].regno_allocno_map = NULL;
memset (ira_bb_nodes[i].reg_pressure, 0,
@@ -340,7 +341,7 @@ form_loop_tree (void)
/* We can not use loop/bb node access macros because of potential
checking and because the nodes are not initialized enough
yet. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb_node = &ira_bb_nodes[bb->index];
bb_node->bb = bb;
@@ -2605,8 +2606,10 @@ remove_unnecessary_regions (bool all_p)
mark_all_loops_for_removal ();
else
mark_loops_for_removal ();
- children_vec.create (last_basic_block + number_of_loops (cfun));
- removed_loop_vec.create (last_basic_block + number_of_loops (cfun));
+ children_vec.create (last_basic_block_for_fn (cfun)
+ + number_of_loops (cfun));
+ removed_loop_vec.create (last_basic_block_for_fn (cfun)
+ + number_of_loops (cfun));
remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
children_vec.release ();
if (all_p)
diff --git a/gcc/ira-costs.c b/gcc/ira-costs.c
index d7299e658d7..c8d64d5e50a 100644
--- a/gcc/ira-costs.c
+++ b/gcc/ira-costs.c
@@ -1585,7 +1585,7 @@ find_costs_and_classes (FILE *dump_file)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
process_bb_for_costs (bb);
}
diff --git a/gcc/ira-emit.c b/gcc/ira-emit.c
index 198fa47b702..196efa02545 100644
--- a/gcc/ira-emit.c
+++ b/gcc/ira-emit.c
@@ -986,7 +986,7 @@ emit_moves (void)
edge e;
rtx insns, tmp;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (at_bb_start[bb->index] != NULL)
{
@@ -1203,7 +1203,7 @@ add_ranges_and_copies (void)
bitmap live_through;
live_through = ira_allocate_bitmap ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* It does not matter what loop_tree_node (of source or
destination block) to use for searching allocnos by their
@@ -1239,15 +1239,17 @@ ira_emit (bool loops_p)
edge e;
ira_allocno_t a;
ira_allocno_iterator ai;
+ size_t sz;
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
if (! loops_p)
return;
- at_bb_start = (move_t *) ira_allocate (sizeof (move_t) * last_basic_block);
- memset (at_bb_start, 0, sizeof (move_t) * last_basic_block);
- at_bb_end = (move_t *) ira_allocate (sizeof (move_t) * last_basic_block);
- memset (at_bb_end, 0, sizeof (move_t) * last_basic_block);
+ sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
+ at_bb_start = (move_t *) ira_allocate (sz);
+ memset (at_bb_start, 0, sz);
+ at_bb_end = (move_t *) ira_allocate (sz);
+ memset (at_bb_end, 0, sz);
local_allocno_bitmap = ira_allocate_bitmap ();
used_regno_bitmap = ira_allocate_bitmap ();
renamed_regno_bitmap = ira_allocate_bitmap ();
@@ -1258,7 +1260,7 @@ ira_emit (bool loops_p)
ira_free_bitmap (renamed_regno_bitmap);
ira_free_bitmap (local_allocno_bitmap);
setup_entered_from_non_parent_p ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
at_bb_start[bb->index] = NULL;
at_bb_end[bb->index] = NULL;
@@ -1273,15 +1275,15 @@ ira_emit (bool loops_p)
memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
curr_tick = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
unify_moves (bb, true);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
unify_moves (bb, false);
move_vec.create (ira_allocnos_num);
emit_moves ();
add_ranges_and_copies ();
/* Clean up: */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
free_move_list (at_bb_start[bb->index]);
free_move_list (at_bb_end[bb->index]);
@@ -1299,7 +1301,7 @@ ira_emit (bool loops_p)
reload assumes initial insn codes defined. The insn codes can be
invalidated by CFG infrastructure for example in jump
redirection. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS_REVERSE (bb, insn)
if (INSN_P (insn))
recog_memoized (insn);
diff --git a/gcc/ira-int.h b/gcc/ira-int.h
index b46e7b00274..690763c9810 100644
--- a/gcc/ira-int.h
+++ b/gcc/ira-int.h
@@ -688,7 +688,7 @@ extern int ira_move_loops_num, ira_additional_jumps_num;
#endif
/* The iterator for min/max sets. */
-typedef struct {
+struct minmax_set_iterator {
/* Array containing the bit vector. */
IRA_INT_TYPE *vec;
@@ -707,7 +707,7 @@ typedef struct {
/* The word of the bit vector currently visited. */
unsigned IRA_INT_TYPE word;
-} minmax_set_iterator;
+};
/* Initialize the iterator I for bit vector VEC containing minimal and
maximal values MIN and MAX. */
@@ -1081,10 +1081,10 @@ ira_init_register_move_cost_if_necessary (enum machine_mode mode)
/* The iterator for all allocnos. */
-typedef struct {
+struct ira_allocno_iterator {
/* The number of the current element in IRA_ALLOCNOS. */
int n;
-} ira_allocno_iterator;
+};
/* Initialize the iterator I. */
static inline void
@@ -1118,10 +1118,10 @@ ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
ira_allocno_iter_cond (&(ITER), &(A));)
/* The iterator for all objects. */
-typedef struct {
+struct ira_object_iterator {
/* The number of the current element in ira_object_id_map. */
int n;
-} ira_object_iterator;
+};
/* Initialize the iterator I. */
static inline void
@@ -1155,10 +1155,10 @@ ira_object_iter_cond (ira_object_iterator *i, ira_object_t *obj)
ira_object_iter_cond (&(ITER), &(OBJ));)
/* The iterator for objects associated with an allocno. */
-typedef struct {
+struct ira_allocno_object_iterator {
/* The number of the element the allocno's object array. */
int n;
-} ira_allocno_object_iterator;
+};
/* Initialize the iterator I. */
static inline void
@@ -1192,10 +1192,10 @@ ira_allocno_object_iter_cond (ira_allocno_object_iterator *i, ira_allocno_t a,
/* The iterator for prefs. */
-typedef struct {
+struct ira_pref_iterator {
/* The number of the current element in IRA_PREFS. */
int n;
-} ira_pref_iterator;
+};
/* Initialize the iterator I. */
static inline void
@@ -1230,10 +1230,10 @@ ira_pref_iter_cond (ira_pref_iterator *i, ira_pref_t *pref)
/* The iterator for copies. */
-typedef struct {
+struct ira_copy_iterator {
/* The number of the current element in IRA_COPIES. */
int n;
-} ira_copy_iterator;
+};
/* Initialize the iterator I. */
static inline void
@@ -1267,7 +1267,7 @@ ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
ira_copy_iter_cond (&(ITER), &(C));)
/* The iterator for object conflicts. */
-typedef struct {
+struct ira_object_conflict_iterator {
/* TRUE if the conflicts are represented by vector of allocnos. */
bool conflict_vec_p;
@@ -1294,7 +1294,7 @@ typedef struct {
/* The word of bit vector currently visited. It is defined only if
OBJECT_CONFLICT_VEC_P is FALSE. */
unsigned IRA_INT_TYPE word;
-} ira_object_conflict_iterator;
+};
/* Initialize the iterator I with ALLOCNO conflicts. */
static inline void
diff --git a/gcc/ira.c b/gcc/ira.c
index b3477ae9162..d6462ca7203 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -2135,7 +2135,7 @@ decrease_live_ranges_number (void)
if (ira_dump_file)
fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n");
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
set = single_set (insn);
@@ -2358,7 +2358,7 @@ compute_regs_asm_clobbered (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS_REVERSE (bb, insn)
@@ -2951,7 +2951,7 @@ mark_elimination (int from, int to)
basic_block bb;
bitmap r;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
r = DF_LR_IN (bb);
if (bitmap_bit_p (r, from))
@@ -3473,7 +3473,7 @@ update_equiv_regs (void)
paradoxical subreg. Don't set such reg sequivalent to a mem,
because lra will not substitute such equiv memory in order to
prevent access beyond allocated memory for paradoxical memory subreg. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn))
for_each_rtx (&insn, set_paradoxical_subreg, (void *) pdx_subregs);
@@ -3481,7 +3481,7 @@ update_equiv_regs (void)
/* Scan the insns and find which registers have equivalences. Do this
in a separate scan of the insns because (due to -fcse-follow-jumps)
a register can be set below its use. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
loop_depth = bb_loop_depth (bb);
@@ -3772,7 +3772,7 @@ update_equiv_regs (void)
within the same loop (or in an inner loop), then move the register
initialization just before the use, so that they are in the same
basic block. */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
loop_depth = bb_loop_depth (bb);
for (insn = BB_END (bb);
@@ -3905,7 +3905,7 @@ update_equiv_regs (void)
if (!bitmap_empty_p (cleared_regs))
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
@@ -4127,7 +4127,7 @@ build_insn_chain (void)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (eliminable_regset, i))
bitmap_set_bit (elim_regset, i);
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
bitmap_iterator bi;
rtx insn;
@@ -4507,12 +4507,15 @@ find_moveable_pseudos (void)
int *uid_luid = XNEWVEC (int, max_uid);
rtx *closest_uses = XNEWVEC (rtx, max_regs);
/* A set of registers which are live but not modified throughout a block. */
- bitmap_head *bb_transp_live = XNEWVEC (bitmap_head, last_basic_block);
+ bitmap_head *bb_transp_live = XNEWVEC (bitmap_head,
+ last_basic_block_for_fn (cfun));
/* A set of registers which only exist in a given basic block. */
- bitmap_head *bb_local = XNEWVEC (bitmap_head, last_basic_block);
+ bitmap_head *bb_local = XNEWVEC (bitmap_head,
+ last_basic_block_for_fn (cfun));
/* A set of registers which are set once, in an instruction that can be
moved freely downwards, but are otherwise transparent to a block. */
- bitmap_head *bb_moveable_reg_sets = XNEWVEC (bitmap_head, last_basic_block);
+ bitmap_head *bb_moveable_reg_sets = XNEWVEC (bitmap_head,
+ last_basic_block_for_fn (cfun));
bitmap_head live, used, set, interesting, unusable_as_input;
bitmap_iterator bi;
bitmap_initialize (&interesting, 0);
@@ -4529,7 +4532,7 @@ find_moveable_pseudos (void)
bitmap_initialize (&used, 0);
bitmap_initialize (&set, 0);
bitmap_initialize (&unusable_as_input, 0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
bitmap transp = bb_transp_live + bb->index;
@@ -4592,7 +4595,7 @@ find_moveable_pseudos (void)
bitmap_clear (&used);
bitmap_clear (&set);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap local = bb_local + bb->index;
rtx insn;
@@ -4821,7 +4824,7 @@ find_moveable_pseudos (void)
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_clear (bb_local + bb->index);
bitmap_clear (bb_transp_live + bb->index);
@@ -4918,7 +4921,7 @@ split_live_ranges_for_shrink_wrap (void)
bitmap_initialize (&reachable, 0);
queue.create (n_basic_blocks_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (CALL_P (insn) && !SIBLING_CALL_P (insn))
{
@@ -5142,7 +5145,7 @@ allocate_initial_values (void)
fixed regs are accepted. */
SET_REGNO (preg, new_regno);
/* Update global register liveness information. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (REGNO_REG_SET_P (df_get_live_in (bb), regno))
SET_REGNO_REG_SET (df_get_live_in (bb), new_regno);
@@ -5187,7 +5190,8 @@ ira (FILE *f)
pseudos and 10K blocks or 100K pseudos and 1K blocks), we will
use simplified and faster algorithms in LRA. */
lra_simple_p
- = (ira_use_lra_p && max_reg_num () >= (1 << 26) / last_basic_block);
+ = (ira_use_lra_p
+ && max_reg_num () >= (1 << 26) / last_basic_block_for_fn (cfun));
if (lra_simple_p)
{
/* It permits to skip live range splitting in LRA. */
@@ -5439,7 +5443,7 @@ do_reload (void)
loop_optimizer_finalize ();
free_dominance_info (CDI_DOMINATORS);
}
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bb->loop_father = NULL;
current_loops = NULL;
@@ -5488,7 +5492,7 @@ do_reload (void)
loop_optimizer_finalize ();
free_dominance_info (CDI_DOMINATORS);
}
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bb->loop_father = NULL;
current_loops = NULL;
diff --git a/gcc/jump.c b/gcc/jump.c
index a27aaa94b8d..5eefeefbf63 100644
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -275,7 +275,7 @@ mark_all_labels (rtx f)
if (current_ir_type () == IR_RTL_CFGLAYOUT)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* In cfglayout mode, we don't bother with trivial next-insn
propagation of LABEL_REFs into JUMP_LABEL. This will be
diff --git a/gcc/langhooks-def.h b/gcc/langhooks-def.h
index 411cf74b666..b7be47200a8 100644
--- a/gcc/langhooks-def.h
+++ b/gcc/langhooks-def.h
@@ -214,18 +214,6 @@ extern tree lhd_make_node (enum tree_code);
#define LANG_HOOKS_OMP_CLAUSE_DTOR hook_tree_tree_tree_null
#define LANG_HOOKS_OMP_FINISH_CLAUSE hook_void_tree
-extern void lhd_install_body_with_frame_cleanup (tree, tree);
-extern bool lhd_cilk_detect_spawn (tree *);
-#define LANG_HOOKS_CILKPLUS_DETECT_SPAWN_AND_UNWRAP lhd_cilk_detect_spawn
-#define LANG_HOOKS_CILKPLUS_FRAME_CLEANUP lhd_install_body_with_frame_cleanup
-#define LANG_HOOKS_CILKPLUS_GIMPLIFY_SPAWN lhd_gimplify_expr
-
-#define LANG_HOOKS_CILKPLUS { \
- LANG_HOOKS_CILKPLUS_DETECT_SPAWN_AND_UNWRAP, \
- LANG_HOOKS_CILKPLUS_FRAME_CLEANUP, \
- LANG_HOOKS_CILKPLUS_GIMPLIFY_SPAWN \
-}
-
#define LANG_HOOKS_DECLS { \
LANG_HOOKS_GLOBAL_BINDINGS_P, \
LANG_HOOKS_PUSHDECL, \
@@ -303,7 +291,6 @@ extern void lhd_end_section (void);
LANG_HOOKS_TREE_DUMP_INITIALIZER, \
LANG_HOOKS_DECLS, \
LANG_HOOKS_FOR_TYPES_INITIALIZER, \
- LANG_HOOKS_CILKPLUS, \
LANG_HOOKS_LTO, \
LANG_HOOKS_GET_INNERMOST_GENERIC_PARMS, \
LANG_HOOKS_GET_INNERMOST_GENERIC_ARGS, \
diff --git a/gcc/langhooks.c b/gcc/langhooks.c
index 7fe349d4f2c..6766ee5c022 100644
--- a/gcc/langhooks.c
+++ b/gcc/langhooks.c
@@ -676,18 +676,3 @@ lhd_end_section (void)
saved_section = NULL;
}
}
-
-/* Empty function that is replaced with appropriate language dependent
- frame cleanup function for _Cilk_spawn. */
-
-void
-lhd_install_body_with_frame_cleanup (tree, tree)
-{
-}
-
-/* Empty function to handle cilk_valid_spawn. */
-bool
-lhd_cilk_detect_spawn (tree *)
-{
- return false;
-}
diff --git a/gcc/langhooks.h b/gcc/langhooks.h
index 9539e7d5b7a..5a5c8b6c3a7 100644
--- a/gcc/langhooks.h
+++ b/gcc/langhooks.h
@@ -139,23 +139,6 @@ struct lang_hooks_for_types
tree (*reconstruct_complex_type) (tree, tree);
};
-/* Language hooks related to Cilk Plus. */
-
-struct lang_hooks_for_cilkplus
-{
- /* Returns true if the expression passed in has a spawned function call. */
- bool (*cilk_detect_spawn_and_unwrap) (tree *);
-
- /* Function to add the clean up functions after spawn. The reason why it is
- language dependent is because in C++, it must handle exceptions. */
- void (*install_body_with_frame_cleanup) (tree, tree);
-
- /* Function to gimplify a spawned function call. Returns enum gimplify
- status, but as mentioned in a previous comment, we can't see that type
- here, so just return an int. */
- int (*gimplify_cilk_spawn) (tree *, gimple_seq *, gimple_seq *);
-};
-
/* Language hooks related to decls and the symbol table. */
struct lang_hooks_for_decls
@@ -424,8 +407,6 @@ struct lang_hooks
struct lang_hooks_for_decls decls;
struct lang_hooks_for_types types;
-
- struct lang_hooks_for_cilkplus cilkplus;
struct lang_hooks_for_lto lto;
diff --git a/gcc/lcm.c b/gcc/lcm.c
index aa63c7272f0..b5d56e05bf8 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -105,11 +105,11 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
- bitmap_vector_ones (antin, last_basic_block);
+ bitmap_vector_ones (antin, last_basic_block_for_fn (cfun));
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of ANTIN above. */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
*qin++ = bb;
bb->aux = bb;
@@ -281,7 +281,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*qin++ = bb;
bb->aux = bb;
@@ -330,10 +330,10 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
/* Computation of insertion and deletion points requires computing LATERIN
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
- bitmap_ones (laterin[last_basic_block]);
+ bitmap_ones (laterin[last_basic_block_for_fn (cfun)]);
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
- bitmap_and (laterin[last_basic_block],
- laterin[last_basic_block],
+ bitmap_and (laterin[last_basic_block_for_fn (cfun)],
+ laterin[last_basic_block_for_fn (cfun)],
later[(size_t) e->aux]);
clear_aux_for_edges ();
@@ -350,7 +350,7 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
int x;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_and_compl (del[bb->index], antloc[bb->index],
laterin[bb->index]);
@@ -359,7 +359,8 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
- bitmap_and_compl (insert[x], later[x], laterin[last_basic_block]);
+ bitmap_and_compl (insert[x], later[x],
+ laterin[last_basic_block_for_fn (cfun)]);
else
bitmap_and_compl (insert[x], later[x], laterin[b->index]);
}
@@ -389,29 +390,35 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
fprintf (dump_file, "Edge List:\n");
verify_edge_list (dump_file, edge_list);
print_edge_list (dump_file, edge_list);
- dump_bitmap_vector (dump_file, "transp", "", transp, last_basic_block);
- dump_bitmap_vector (dump_file, "antloc", "", antloc, last_basic_block);
- dump_bitmap_vector (dump_file, "avloc", "", avloc, last_basic_block);
- dump_bitmap_vector (dump_file, "kill", "", kill, last_basic_block);
+ dump_bitmap_vector (dump_file, "transp", "", transp,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "antloc", "", antloc,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "avloc", "", avloc,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "kill", "", kill,
+ last_basic_block_for_fn (cfun));
}
#endif
/* Compute global availability. */
- avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
- avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+ avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
compute_available (avloc, kill, avout, avin);
sbitmap_vector_free (avin);
/* Compute global anticipatability. */
- antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
- antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ antin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+ antout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
compute_antinout_edge (antloc, transp, antin, antout);
#ifdef LCM_DEBUG_INFO
if (dump_file)
{
- dump_bitmap_vector (dump_file, "antin", "", antin, last_basic_block);
- dump_bitmap_vector (dump_file, "antout", "", antout, last_basic_block);
+ dump_bitmap_vector (dump_file, "antin", "", antin,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "antout", "", antout,
+ last_basic_block_for_fn (cfun));
}
#endif
@@ -431,13 +438,15 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
later = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the exit block in the laterin vector. */
- laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
+ laterin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun) + 1,
+ n_exprs);
compute_laterin (edge_list, earliest, antloc, later, laterin);
#ifdef LCM_DEBUG_INFO
if (dump_file)
{
- dump_bitmap_vector (dump_file, "laterin", "", laterin, last_basic_block + 1);
+ dump_bitmap_vector (dump_file, "laterin", "", laterin,
+ last_basic_block_for_fn (cfun) + 1);
dump_bitmap_vector (dump_file, "later", "", later, num_edges);
}
#endif
@@ -445,9 +454,9 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *del = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ *del = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
bitmap_vector_clear (*insert, num_edges);
- bitmap_vector_clear (*del, last_basic_block);
+ bitmap_vector_clear (*del, last_basic_block_for_fn (cfun));
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *del);
sbitmap_vector_free (laterin);
@@ -458,7 +467,7 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
{
dump_bitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
dump_bitmap_vector (dump_file, "pre_delete_map", "", *del,
- last_basic_block);
+ last_basic_block_for_fn (cfun));
}
#endif
@@ -484,11 +493,11 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
/* We want a maximal solution. */
- bitmap_vector_ones (avout, last_basic_block);
+ bitmap_vector_ones (avout, last_basic_block_for_fn (cfun));
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of AVOUT above. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*qin++ = bb;
bb->aux = bb;
@@ -629,7 +638,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
/* Add all the blocks to the worklist. This prevents an early exit
from the loop given our optimistic initialization of NEARER. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*tos++ = bb;
bb->aux = bb;
@@ -666,10 +675,10 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
/* Computation of insertion and deletion points requires computing NEAREROUT
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
- bitmap_ones (nearerout[last_basic_block]);
+ bitmap_ones (nearerout[last_basic_block_for_fn (cfun)]);
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
- bitmap_and (nearerout[last_basic_block],
- nearerout[last_basic_block],
+ bitmap_and (nearerout[last_basic_block_for_fn (cfun)],
+ nearerout[last_basic_block_for_fn (cfun)],
nearer[(size_t) e->aux]);
clear_aux_for_edges ();
@@ -686,7 +695,7 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
int x;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_and_compl (del[bb->index], st_avloc[bb->index],
nearerout[bb->index]);
@@ -694,7 +703,8 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
if (b == ENTRY_BLOCK_PTR_FOR_FN (cfun))
- bitmap_and_compl (insert[x], nearer[x], nearerout[last_basic_block]);
+ bitmap_and_compl (insert[x], nearer[x],
+ nearerout[last_basic_block_for_fn (cfun)]);
else
bitmap_and_compl (insert[x], nearer[x], nearerout[b->index]);
}
@@ -719,15 +729,15 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
edge_list = create_edge_list ();
num_edges = NUM_EDGES (edge_list);
- st_antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
- st_antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
- bitmap_vector_clear (st_antin, last_basic_block);
- bitmap_vector_clear (st_antout, last_basic_block);
+ st_antin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+ st_antout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+ bitmap_vector_clear (st_antin, last_basic_block_for_fn (cfun));
+ bitmap_vector_clear (st_antout, last_basic_block_for_fn (cfun));
compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
/* Compute global anticipatability. */
- st_avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
- st_avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+ st_avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
compute_available (st_avloc, kill, st_avout, st_avin);
#ifdef LCM_DEBUG_INFO
@@ -736,20 +746,26 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
fprintf (dump_file, "Edge List:\n");
verify_edge_list (dump_file, edge_list);
print_edge_list (dump_file, edge_list);
- dump_bitmap_vector (dump_file, "transp", "", transp, last_basic_block);
- dump_bitmap_vector (dump_file, "st_avloc", "", st_avloc, last_basic_block);
- dump_bitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
- dump_bitmap_vector (dump_file, "st_antin", "", st_antin, last_basic_block);
- dump_bitmap_vector (dump_file, "st_antout", "", st_antout, last_basic_block);
- dump_bitmap_vector (dump_file, "st_kill", "", kill, last_basic_block);
+ dump_bitmap_vector (dump_file, "transp", "", transp,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_avloc", "", st_avloc,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_antloc", "", st_antloc,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_antin", "", st_antin,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_antout", "", st_antout,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_kill", "", kill,
+ last_basic_block_for_fn (cfun));
}
#endif
#ifdef LCM_DEBUG_INFO
if (dump_file)
{
- dump_bitmap_vector (dump_file, "st_avout", "", st_avout, last_basic_block);
- dump_bitmap_vector (dump_file, "st_avin", "", st_avin, last_basic_block);
+ dump_bitmap_vector (dump_file, "st_avout", "", st_avout, last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_avin", "", st_avin, last_basic_block_for_fn (cfun));
}
#endif
@@ -772,14 +788,15 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
nearer = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the entry block. */
- nearerout = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
+ nearerout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun) + 1,
+ n_exprs);
compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
#ifdef LCM_DEBUG_INFO
if (dump_file)
{
dump_bitmap_vector (dump_file, "nearerout", "", nearerout,
- last_basic_block + 1);
+ last_basic_block_for_fn (cfun) + 1);
dump_bitmap_vector (dump_file, "nearer", "", nearer, num_edges);
}
#endif
@@ -787,7 +804,7 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *del = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ *del = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *del);
@@ -799,7 +816,7 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
{
dump_bitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
dump_bitmap_vector (dump_file, "pre_delete_map", "", *del,
- last_basic_block);
+ last_basic_block_for_fn (cfun));
}
#endif
return edge_list;
diff --git a/gcc/loop-init.c b/gcc/loop-init.c
index 664b1ace427..8c5553b983f 100644
--- a/gcc/loop-init.c
+++ b/gcc/loop-init.c
@@ -169,7 +169,7 @@ loop_optimizer_finalize (void)
ggc_free (current_loops);
current_loops = NULL;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bb->loop_father = NULL;
}
@@ -213,7 +213,7 @@ fix_loop_structure (bitmap changed_bbs)
/* Remember the depth of the blocks in the loop hierarchy, so that we can
recognize blocks whose loop nesting relationship has changed. */
if (changed_bbs)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = (void *) (size_t) loop_depth (bb->loop_father);
/* Remove the dead loops from structures. We start from the innermost
@@ -256,7 +256,7 @@ fix_loop_structure (bitmap changed_bbs)
/* Mark the blocks whose loop has changed. */
if (changed_bbs)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if ((void *) (size_t) loop_depth (bb->loop_father) != bb->aux)
bitmap_set_bit (changed_bbs, bb->index);
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index 9f1fc07900c..f47bd505922 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -1825,7 +1825,7 @@ calculate_loop_reg_pressure (void)
}
ira_setup_eliminable_regset ();
bitmap_initialize (&curr_regs_live, &reg_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
curr_loop = bb->loop_father;
if (curr_loop == current_loops->tree_root)
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index b8df05b390a..6d0e5e9699c 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -1361,7 +1361,7 @@ decide_peel_simple (struct loop *loop, int flags)
also branch from branch prediction POV (and probably better reason
to not unroll/peel). */
if (num_loop_branches (loop) > 1
- && profile_status != PROFILE_READ)
+ && profile_status_for_fn (cfun) != PROFILE_READ)
{
if (dump_file)
fprintf (dump_file, ";; Not peeling, contains branches\n");
@@ -1998,7 +1998,7 @@ static void
opt_info_start_duplication (struct opt_info *opt_info)
{
if (opt_info)
- opt_info->first_new_block = last_basic_block;
+ opt_info->first_new_block = last_basic_block_for_fn (cfun);
}
/* Determine the number of iterations between initialization of the base
@@ -2359,9 +2359,11 @@ apply_opt_in_copies (struct opt_info *opt_info,
for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
allocate_basic_variable (ivts);
- for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
+ for (i = opt_info->first_new_block;
+ i < (unsigned) last_basic_block_for_fn (cfun);
+ i++)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
orig_bb = get_bb_original (bb);
/* bb->aux holds position in copy sequence initialized by
@@ -2435,9 +2437,11 @@ apply_opt_in_copies (struct opt_info *opt_info,
/* Rewrite also the original loop body. Find them as originals of the blocks
in the last copied iteration, i.e. those that have
get_bb_copy (get_bb_original (bb)) == bb. */
- for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
+ for (i = opt_info->first_new_block;
+ i < (unsigned) last_basic_block_for_fn (cfun);
+ i++)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
orig_bb = get_bb_original (bb);
if (get_bb_copy (orig_bb) != bb)
continue;
diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c
index e67bc35648b..0b0e397c61a 100644
--- a/gcc/lower-subreg.c
+++ b/gcc/lower-subreg.c
@@ -1463,7 +1463,7 @@ decompose_multiword_subregs (bool decompose_copies)
memset (reg_copy_graph.address (), 0, sizeof (bitmap) * max);
speed_p = optimize_function_for_speed_p (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
@@ -1537,13 +1537,13 @@ decompose_multiword_subregs (bool decompose_copies)
propagate_pseudo_copies ();
- sub_blocks = sbitmap_alloc (last_basic_block);
+ sub_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (sub_blocks);
EXECUTE_IF_SET_IN_BITMAP (decomposable_context, 0, regno, iter)
decompose_register (regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
@@ -1647,7 +1647,7 @@ decompose_multiword_subregs (bool decompose_copies)
rtx insn, end;
edge fallthru;
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
insn = BB_HEAD (bb);
end = BB_END (bb);
diff --git a/gcc/lra-assigns.c b/gcc/lra-assigns.c
index 88fc693bf2d..41ee28648ae 100644
--- a/gcc/lra-assigns.c
+++ b/gcc/lra-assigns.c
@@ -1302,7 +1302,7 @@ assign_by_spills (void)
/* FIXME: Look up the changed insns in the cached LRA insn data using
an EXECUTE_IF_SET_IN_BITMAP over changed_insns. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
{
diff --git a/gcc/lra-coalesce.c b/gcc/lra-coalesce.c
index 859e02f0dba..db8409f02a9 100644
--- a/gcc/lra-coalesce.c
+++ b/gcc/lra-coalesce.c
@@ -221,9 +221,12 @@ lra_coalesce (void)
basic_block bb;
rtx mv, set, insn, next, *sorted_moves;
int i, mv_num, sregno, dregno;
+ unsigned int regno;
int coalesced_moves;
int max_regno = max_reg_num ();
bitmap_head involved_insns_bitmap;
+ bitmap_head result_pseudo_vals_bitmap;
+ bitmap_iterator bi;
timevar_push (TV_LRA_COALESCE);
@@ -239,7 +242,7 @@ lra_coalesce (void)
mv_num = 0;
/* Collect moves. */
coalesced_moves = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_SAFE (bb, insn, next)
if (INSN_P (insn)
@@ -297,7 +300,7 @@ lra_coalesce (void)
}
}
bitmap_initialize (&used_pseudos_bitmap, &reg_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
update_live_info (df_get_live_in (bb));
update_live_info (df_get_live_out (bb));
@@ -318,6 +321,34 @@ lra_coalesce (void)
}
}
}
+ /* If we have situation after inheritance pass:
+
+ r1 <- ... insn originally setting p1
+ i1 <- r1 setting inheritance i1 from reload r1
+ ...
+ ... <- ... p2 ... dead p2
+ ..
+ p1 <- i1
+ r2 <- i1
+ ...<- ... r2 ...
+
+ And we are coalescing p1 and p2 using p1. In this case i1 and p1
+ should have different values, otherwise they can get the same
+ hard reg and this is wrong for insn using p2 before coalescing.
+ So invalidate such inheritance pseudo values. */
+ bitmap_initialize (&result_pseudo_vals_bitmap, &reg_obstack);
+ EXECUTE_IF_SET_IN_BITMAP (&coalesced_pseudos_bitmap, 0, regno, bi)
+ bitmap_set_bit (&result_pseudo_vals_bitmap,
+ lra_reg_info[first_coalesced_pseudo[regno]].val);
+ EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi)
+ if (bitmap_bit_p (&result_pseudo_vals_bitmap, lra_reg_info[regno].val))
+ {
+ lra_set_regno_unique_value (regno);
+ if (lra_dump_file != NULL)
+ fprintf (lra_dump_file,
+ " Make unique value for inheritance r%d\n", regno);
+ }
+ bitmap_clear (&result_pseudo_vals_bitmap);
bitmap_clear (&used_pseudos_bitmap);
bitmap_clear (&involved_insns_bitmap);
bitmap_clear (&coalesced_pseudos_bitmap);
diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
index bb5242a962a..0c4eec39230 100644
--- a/gcc/lra-constraints.c
+++ b/gcc/lra-constraints.c
@@ -271,9 +271,11 @@ in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class)
where other reload pseudos are no longer allocatable. */
|| (INSN_UID (curr_insn) >= new_insn_uid_start
&& curr_insn_set != NULL
- && (OBJECT_P (SET_SRC (curr_insn_set))
+ && ((OBJECT_P (SET_SRC (curr_insn_set))
+ && ! CONSTANT_P (SET_SRC (curr_insn_set)))
|| (GET_CODE (SET_SRC (curr_insn_set)) == SUBREG
- && OBJECT_P (SUBREG_REG (SET_SRC (curr_insn_set)))))))
+ && OBJECT_P (SUBREG_REG (SET_SRC (curr_insn_set)))
+ && ! CONSTANT_P (SUBREG_REG (SET_SRC (curr_insn_set)))))))
/* When we don't know what class will be used finally for reload
pseudos, we use ALL_REGS. */
return ((regno >= new_regno_start && rclass == ALL_REGS)
@@ -5300,7 +5302,7 @@ lra_inheritance (void)
bitmap_initialize (&live_regs, &reg_obstack);
bitmap_initialize (&temp_bitmap, &reg_obstack);
bitmap_initialize (&ebb_global_regs, &reg_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
start_bb = bb;
if (lra_dump_file != NULL)
@@ -5401,7 +5403,7 @@ remove_inheritance_pseudos (bitmap remove_pseudos)
because we need to marks insns affected by previous
inheritance/split pass for processing by the subsequent
constraint pass. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fix_bb_live_info (df_get_live_in (bb), remove_pseudos);
fix_bb_live_info (df_get_live_out (bb), remove_pseudos);
diff --git a/gcc/lra-eliminations.c b/gcc/lra-eliminations.c
index 915e3a0677f..6c52bb34251 100644
--- a/gcc/lra-eliminations.c
+++ b/gcc/lra-eliminations.c
@@ -1284,7 +1284,7 @@ init_elimination (void)
struct elim_table *ep;
init_elim_table ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
curr_sp_change = 0;
stop_to_sp_elimination_p = false;
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index efc19f20140..a677f86dcf5 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -996,12 +996,12 @@ lra_create_live_ranges (bool all_p)
curr_point = 0;
point_freq_vec.create (get_max_uid () * 2);
lra_point_freq = point_freq_vec.address ();
- int *post_order_rev_cfg = XNEWVEC (int, last_basic_block);
+ int *post_order_rev_cfg = XNEWVEC (int, last_basic_block_for_fn (cfun));
int n_blocks_inverted = inverted_post_order_compute (post_order_rev_cfg);
lra_assert (n_blocks_inverted == n_basic_blocks_for_fn (cfun));
for (i = n_blocks_inverted - 1; i >= 0; --i)
{
- bb = BASIC_BLOCK (post_order_rev_cfg[i]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, post_order_rev_cfg[i]);
if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c
index 6bebb92fd83..1e5f52bd009 100644
--- a/gcc/lra-spills.c
+++ b/gcc/lra-spills.c
@@ -280,7 +280,7 @@ assign_spill_hard_regs (int *pseudo_regnos, int n)
add_to_hard_reg_set (&reserved_hard_regs[p],
lra_reg_info[i].biggest_mode, hard_regno);
bitmap_initialize (&ok_insn_bitmap, &reg_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (DEBUG_INSN_P (insn)
|| ((set = single_set (insn)) != NULL_RTX
@@ -478,7 +478,7 @@ spill_pseudos (void)
bitmap_ior_into (&changed_insns, &lra_reg_info[i].insn_bitmap);
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
@@ -686,7 +686,7 @@ lra_final_code_change (void)
if (lra_reg_info[i].nrefs != 0
&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
SET_REGNO (regno_reg_rtx[i], hard_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS_SAFE (bb, insn, curr)
if (INSN_P (insn))
{
diff --git a/gcc/lra.c b/gcc/lra.c
index d21d8646a61..21b8af17ace 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -1960,7 +1960,7 @@ remove_scratches (void)
scratches.create (get_max_uid ());
bitmap_initialize (&scratch_bitmap, &reg_obstack);
bitmap_initialize (&scratch_operand_bitmap, &reg_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
@@ -2049,7 +2049,7 @@ check_rtl (bool final_p)
rtx insn;
lra_assert (! final_p || reload_completed);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
@@ -2090,7 +2090,7 @@ has_nonexceptional_receiver (void)
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
@@ -2165,7 +2165,7 @@ update_inc_notes (void)
basic_block bb;
rtx insn;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn))
{
@@ -2422,7 +2422,7 @@ lra (FILE *f)
if (cfun->can_throw_non_call_exceptions)
{
sbitmap blocks;
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
sbitmap_free (blocks);
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index 6f2773c70ba..44cfa286fad 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -204,7 +204,7 @@ lto_set_symtab_encoder_encode_body (lto_symtab_encoder_t encoder,
bool
lto_symtab_encoder_encode_initializer_p (lto_symtab_encoder_t encoder,
- struct varpool_node *node)
+ varpool_node *node)
{
int index = lto_symtab_encoder_lookup (encoder, node);
if (index == LCC_NOT_FOUND)
@@ -216,7 +216,7 @@ lto_symtab_encoder_encode_initializer_p (lto_symtab_encoder_t encoder,
static void
lto_set_symtab_encoder_encode_initializer (lto_symtab_encoder_t encoder,
- struct varpool_node *node)
+ varpool_node *node)
{
int index = lto_symtab_encoder_lookup (encoder, node);
encoder->nodes[index].initializer = true;
@@ -277,7 +277,7 @@ lto_output_edge (struct lto_simple_output_block *ob, struct cgraph_edge *edge,
bp = bitpack_create (ob->main_stream);
uid = (!gimple_has_body_p (edge->caller->decl)
? edge->lto_stmt_uid : gimple_uid (edge->call_stmt) + 1);
- bp_pack_enum (&bp, cgraph_inline_failed_enum,
+ bp_pack_enum (&bp, cgraph_inline_failed_t,
CIF_N_REASONS, edge->inline_failed);
bp_pack_var_len_unsigned (&bp, uid);
bp_pack_var_len_unsigned (&bp, edge->frequency);
@@ -389,7 +389,7 @@ lto_output_node (struct lto_simple_output_block *ob, struct cgraph_node *node,
intptr_t ref;
bool in_other_partition = false;
struct cgraph_node *clone_of, *ultimate_clone_of;
- struct ipa_opt_pass_d *pass;
+ ipa_opt_pass_d *pass;
int i;
bool alias_p;
@@ -539,7 +539,7 @@ lto_output_node (struct lto_simple_output_block *ob, struct cgraph_node *node,
If NODE is not in SET, then NODE is a boundary. */
static void
-lto_output_varpool_node (struct lto_simple_output_block *ob, struct varpool_node *node,
+lto_output_varpool_node (struct lto_simple_output_block *ob, varpool_node *node,
lto_symtab_encoder_t encoder)
{
bool boundary_p = !lto_symtab_encoder_in_partition_p (encoder, node);
@@ -796,7 +796,7 @@ compute_ltrans_boundary (lto_symtab_encoder_t in_encoder)
for (lsei = lsei_start_variable_in_partition (in_encoder);
!lsei_end_p (lsei); lsei_next_variable_in_partition (&lsei))
{
- struct varpool_node *vnode = lsei_varpool_node (lsei);
+ varpool_node *vnode = lsei_varpool_node (lsei);
lto_set_symtab_encoder_in_partition (encoder, vnode);
lto_set_symtab_encoder_encode_initializer (encoder, vnode);
@@ -804,7 +804,7 @@ compute_ltrans_boundary (lto_symtab_encoder_t in_encoder)
/* For proper debug info, we need to ship the origins, too. */
if (DECL_ABSTRACT_ORIGIN (vnode->decl))
{
- struct varpool_node *origin_node
+ varpool_node *origin_node
= varpool_get_node (DECL_ABSTRACT_ORIGIN (node->decl));
lto_set_symtab_encoder_in_partition (encoder, origin_node);
}
@@ -1060,12 +1060,12 @@ input_node (struct lto_file_decl_data *file_data,
node->ipa_transforms_to_apply = vNULL;
for (i = 0; i < count; i++)
{
- struct opt_pass *pass;
+ opt_pass *pass;
int pid = streamer_read_hwi (ib);
gcc_assert (pid < passes->passes_by_id_size);
pass = passes->passes_by_id[pid];
- node->ipa_transforms_to_apply.safe_push ((struct ipa_opt_pass_d *) pass);
+ node->ipa_transforms_to_apply.safe_push ((ipa_opt_pass_d *) pass);
}
if (tag == LTO_symtab_analyzed_node)
@@ -1113,13 +1113,13 @@ input_node (struct lto_file_decl_data *file_data,
/* Read a node from input_block IB. TAG is the node's tag just read.
Return the node read or overwriten. */
-static struct varpool_node *
+static varpool_node *
input_varpool_node (struct lto_file_decl_data *file_data,
struct lto_input_block *ib)
{
int decl_index;
tree var_decl;
- struct varpool_node *node;
+ varpool_node *node;
struct bitpack_d bp;
int ref = LCC_NOT_FOUND;
int order;
@@ -1225,7 +1225,7 @@ input_edge (struct lto_input_block *ib, vec<symtab_node *> nodes,
count = streamer_read_gcov_count (ib);
bp = streamer_read_bitpack (ib);
- inline_failed = bp_unpack_enum (&bp, cgraph_inline_failed_enum, CIF_N_REASONS);
+ inline_failed = bp_unpack_enum (&bp, cgraph_inline_failed_t, CIF_N_REASONS);
stmt_id = bp_unpack_var_len_unsigned (&bp);
freq = (int) bp_unpack_var_len_unsigned (&bp);
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 3fbe820d23e..78b51ea461f 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -204,7 +204,7 @@ lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
unsigned HOST_WIDE_INT ix_u;
tree result = NULL_TREE;
- lto_tag_check_range (tag, LTO_field_decl_ref, LTO_global_decl_ref);
+ lto_tag_check_range (tag, LTO_field_decl_ref, LTO_namelist_decl_ref);
switch (tag)
{
@@ -248,6 +248,28 @@ lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
result = lto_file_decl_data_get_var_decl (data_in->file_data, ix_u);
break;
+ case LTO_namelist_decl_ref:
+ {
+ tree tmp;
+ vec<constructor_elt, va_gc> *nml_decls = NULL;
+ unsigned i, n;
+
+ result = make_node (NAMELIST_DECL);
+ TREE_TYPE (result) = void_type_node;
+ DECL_NAME (result) = stream_read_tree (ib, data_in);
+ n = streamer_read_uhwi (ib);
+ for (i = 0; i < n; i++)
+ {
+ ix_u = streamer_read_uhwi (ib);
+ tmp = lto_file_decl_data_get_var_decl (data_in->file_data, ix_u);
+ gcc_assert (tmp != NULL_TREE);
+ CONSTRUCTOR_APPEND_ELT (nml_decls, NULL_TREE, tmp);
+ }
+ NAMELIST_DECL_ASSOCIATED_DECL (result) = build_constructor (NULL_TREE,
+ nml_decls);
+ break;
+ }
+
default:
gcc_unreachable ();
}
@@ -589,7 +611,7 @@ make_new_block (struct function *fn, unsigned int index)
{
basic_block bb = alloc_block ();
bb->index = index;
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, index, bb);
+ SET_BASIC_BLOCK_FOR_FN (fn, index, bb);
n_basic_blocks_for_fn (fn)++;
return bb;
}
@@ -610,22 +632,22 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
init_empty_tree_cfg_for_function (fn);
init_ssa_operands (fn);
- profile_status_for_function (fn) = streamer_read_enum (ib, profile_status_d,
- PROFILE_LAST);
+ profile_status_for_fn (fn) = streamer_read_enum (ib, profile_status_d,
+ PROFILE_LAST);
bb_count = streamer_read_uhwi (ib);
- last_basic_block_for_function (fn) = bb_count;
- if (bb_count > basic_block_info_for_function (fn)->length ())
- vec_safe_grow_cleared (basic_block_info_for_function (fn), bb_count);
+ last_basic_block_for_fn (fn) = bb_count;
+ if (bb_count > basic_block_info_for_fn (fn)->length ())
+ vec_safe_grow_cleared (basic_block_info_for_fn (fn), bb_count);
- if (bb_count > label_to_block_map_for_function (fn)->length ())
- vec_safe_grow_cleared (label_to_block_map_for_function (fn), bb_count);
+ if (bb_count > label_to_block_map_for_fn (fn)->length ())
+ vec_safe_grow_cleared (label_to_block_map_for_fn (fn), bb_count);
index = streamer_read_hwi (ib);
while (index != -1)
{
- basic_block bb = BASIC_BLOCK_FOR_FUNCTION (fn, index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, index);
unsigned int edge_count;
if (bb == NULL)
@@ -649,7 +671,7 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
count_materialization_scale);
edge_flags = streamer_read_uhwi (ib);
- dest = BASIC_BLOCK_FOR_FUNCTION (fn, dest_index);
+ dest = BASIC_BLOCK_FOR_FN (fn, dest_index);
if (dest == NULL)
dest = make_new_block (fn, dest_index);
@@ -666,7 +688,7 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
index = streamer_read_hwi (ib);
while (index != -1)
{
- basic_block bb = BASIC_BLOCK_FOR_FUNCTION (fn, index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, index);
bb->prev_bb = p_bb;
p_bb->next_bb = bb;
p_bb = bb;
@@ -697,7 +719,7 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
}
struct loop *loop = alloc_loop ();
- loop->header = BASIC_BLOCK_FOR_FUNCTION (fn, header_index);
+ loop->header = BASIC_BLOCK_FOR_FN (fn, header_index);
loop->header->loop_father = loop;
/* Read everything copy_loop_info copies. */
@@ -966,7 +988,7 @@ input_function (tree fn_decl, struct data_in *data_in,
/* Fix up the call statements that are mentioned in the callgraph
edges. */
set_gimple_stmt_max_uid (cfun, 0);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -981,7 +1003,7 @@ input_function (tree fn_decl, struct data_in *data_in,
}
}
stmts = (gimple *) xcalloc (gimple_stmt_max_uid (fn), sizeof (gimple));
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator bsi = gsi_start_phis (bb);
while (!gsi_end_p (bsi))
@@ -1260,7 +1282,7 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
if (tag == LTO_null)
result = NULL_TREE;
- else if (tag >= LTO_field_decl_ref && tag <= LTO_global_decl_ref)
+ else if (tag >= LTO_field_decl_ref && tag <= LTO_namelist_decl_ref)
{
/* If TAG is a reference to an indexable tree, the next value
in IB is the index into the table where we expect to find
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 6b78f4b02a3..d03f91cf9d9 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -54,6 +54,8 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
+static void lto_write_tree (struct output_block*, tree, bool);
+
/* Clear the line info stored in DATA_IN. */
static void
@@ -252,6 +254,21 @@ lto_output_tree_ref (struct output_block *ob, tree expr)
lto_output_type_decl_index (ob->decl_state, ob->main_stream, expr);
break;
+ case NAMELIST_DECL:
+ {
+ unsigned i;
+ tree value, tmp;
+
+ streamer_write_record_start (ob, LTO_namelist_decl_ref);
+ stream_write_tree (ob, DECL_NAME (expr), true);
+ tmp = NAMELIST_DECL_ASSOCIATED_DECL (expr);
+ gcc_assert (tmp != NULL_TREE);
+ streamer_write_uhwi (ob, CONSTRUCTOR_ELTS (tmp)->length());
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (tmp), i, value)
+ lto_output_var_decl_index (ob->decl_state, ob->main_stream, value);
+ break;
+ }
+
case NAMESPACE_DECL:
streamer_write_record_start (ob, LTO_namespace_decl_ref);
lto_output_namespace_decl_index (ob->decl_state, ob->main_stream, expr);
@@ -322,7 +339,7 @@ get_symbol_initial_value (struct output_block *ob, tree expr)
&& initial)
{
lto_symtab_encoder_t encoder;
- struct varpool_node *vnode;
+ varpool_node *vnode;
encoder = ob->decl_state->symtab_node_encoder;
vnode = varpool_get_node (expr);
@@ -1616,10 +1633,10 @@ output_cfg (struct output_block *ob, struct function *fn)
ob->main_stream = ob->cfg_stream;
streamer_write_enum (ob->main_stream, profile_status_d, PROFILE_LAST,
- profile_status_for_function (fn));
+ profile_status_for_fn (fn));
/* Output the number of the highest basic block. */
- streamer_write_uhwi (ob, last_basic_block_for_function (fn));
+ streamer_write_uhwi (ob, last_basic_block_for_fn (fn));
FOR_ALL_BB_FN (bb, fn)
{
@@ -1864,7 +1881,7 @@ output_function (struct cgraph_node *node)
virtual PHIs get re-computed on-the-fly which would make numbers
inconsistent. */
set_gimple_stmt_max_uid (cfun, 0);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -1883,7 +1900,7 @@ output_function (struct cgraph_node *node)
}
/* To avoid keeping duplicate gimple IDs in the statements, renumber
virtual phis now. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h
index 9dac7c9e846..701748ce4d4 100644
--- a/gcc/lto-streamer.h
+++ b/gcc/lto-streamer.h
@@ -222,7 +222,8 @@ enum LTO_tags
LTO_const_decl_ref,
LTO_imported_decl_ref,
LTO_translation_unit_decl_ref,
- LTO_global_decl_ref, /* Do not change. */
+ LTO_global_decl_ref,
+ LTO_namelist_decl_ref, /* Do not change. */
/* This tag must always be last. */
LTO_NUM_TAGS
@@ -256,7 +257,7 @@ enum lto_section_type
};
/* Indices to the various function, type and symbol streams. */
-typedef enum
+enum lto_decl_stream_e_t
{
LTO_DECL_STREAM_TYPE = 0, /* Must be first. */
LTO_DECL_STREAM_FIELD_DECL,
@@ -266,7 +267,7 @@ typedef enum
LTO_DECL_STREAM_NAMESPACE_DECL,
LTO_DECL_STREAM_LABEL_DECL,
LTO_N_DECL_STREAMS
-} lto_decl_stream_e_t;
+};
typedef enum ld_plugin_symbol_resolution ld_plugin_symbol_resolution_t;
@@ -428,7 +429,7 @@ struct lto_stats_d
};
/* Entry of LTO symtab encoder. */
-typedef struct
+struct lto_encoder_entry
{
symtab_node *node;
/* Is the node in this partition (i.e. ltrans of this partition will
@@ -440,7 +441,7 @@ typedef struct
For example the readonly variable initializers are encoded to aid
constant folding even if they are not in the partition. */
unsigned int initializer:1;
-} lto_encoder_entry;
+};
/* Encoder data structure used to stream callgraph nodes. */
@@ -453,11 +454,11 @@ struct lto_symtab_encoder_d
typedef struct lto_symtab_encoder_d *lto_symtab_encoder_t;
/* Iterator structure for cgraph node sets. */
-typedef struct
+struct lto_symtab_encoder_iterator
{
lto_symtab_encoder_t encoder;
unsigned index;
-} lto_symtab_encoder_iterator;
+};
@@ -522,7 +523,6 @@ struct res_pair
ld_plugin_symbol_resolution_t res;
unsigned index;
};
-typedef struct res_pair res_pair;
/* One of these is allocated for each object file that being compiled
@@ -885,7 +885,7 @@ void lto_set_symtab_encoder_in_partition (lto_symtab_encoder_t,
symtab_node *);
bool lto_symtab_encoder_encode_initializer_p (lto_symtab_encoder_t,
- struct varpool_node *);
+ varpool_node *);
void output_symtab (void);
void input_symtab (void);
bool referenced_from_other_partition_p (struct ipa_ref_list *,
@@ -1080,7 +1080,7 @@ lsei_cgraph_node (lto_symtab_encoder_iterator lsei)
}
/* Return the node pointed to by LSI. */
-static inline struct varpool_node *
+static inline varpool_node *
lsei_varpool_node (lto_symtab_encoder_iterator lsei)
{
return varpool (lsei.encoder->nodes[lsei.index].node);
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 6d6ebf6717d..a191fec01c2 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,9 @@
+2013-12-06 Oleg Endo <olegendo@gcc.gnu.org>
+
+ * lto.c: Remove struct tags when referring to class varpool_node.
+ * lto-partition.c: Likewise.
+ * lto-symtab.c: Likewise.
+
2013-11-29 Jakub Jelinek <jakub@redhat.com>
Richard Biener <rguenther@suse.de>
diff --git a/gcc/lto/lto-partition.c b/gcc/lto/lto-partition.c
index 95ec7fabf86..5b46af9d907 100644
--- a/gcc/lto/lto-partition.c
+++ b/gcc/lto/lto-partition.c
@@ -408,8 +408,8 @@ node_cmp (const void *pa, const void *pb)
static int
varpool_node_cmp (const void *pa, const void *pb)
{
- const struct varpool_node *a = *(const struct varpool_node * const *) pa;
- const struct varpool_node *b = *(const struct varpool_node * const *) pb;
+ const varpool_node *a = *(const varpool_node * const *) pa;
+ const varpool_node *b = *(const varpool_node * const *) pb;
return b->order - a->order;
}
@@ -457,14 +457,14 @@ lto_balanced_map (void)
int n_nodes = 0;
int n_varpool_nodes = 0, varpool_pos = 0, best_varpool_pos = 0;
struct cgraph_node **order = XNEWVEC (struct cgraph_node *, cgraph_max_uid);
- struct varpool_node **varpool_order = NULL;
+ varpool_node **varpool_order = NULL;
int i;
struct cgraph_node *node;
int total_size = 0, best_total_size = 0;
int partition_size;
ltrans_partition partition;
int last_visited_node = 0;
- struct varpool_node *vnode;
+ varpool_node *vnode;
int cost = 0, internal = 0;
int best_n_nodes = 0, best_i = 0, best_cost =
INT_MAX, best_internal = 0;
@@ -494,13 +494,13 @@ lto_balanced_map (void)
FOR_EACH_VARIABLE (vnode)
if (get_symbol_class (vnode) == SYMBOL_PARTITION)
n_varpool_nodes++;
- varpool_order = XNEWVEC (struct varpool_node *, n_varpool_nodes);
+ varpool_order = XNEWVEC (varpool_node *, n_varpool_nodes);
n_varpool_nodes = 0;
FOR_EACH_VARIABLE (vnode)
if (get_symbol_class (vnode) == SYMBOL_PARTITION)
varpool_order[n_varpool_nodes++] = vnode;
- qsort (varpool_order, n_varpool_nodes, sizeof (struct varpool_node *),
+ qsort (varpool_order, n_varpool_nodes, sizeof (varpool_node *),
varpool_node_cmp);
}
diff --git a/gcc/lto/lto-symtab.c b/gcc/lto/lto-symtab.c
index 87d6332ab2b..ad0a37ca75c 100644
--- a/gcc/lto/lto-symtab.c
+++ b/gcc/lto/lto-symtab.c
@@ -103,8 +103,8 @@ lto_cgraph_replace_node (struct cgraph_node *node,
all edges and removing the old node. */
static void
-lto_varpool_replace_node (struct varpool_node *vnode,
- struct varpool_node *prevailing_node)
+lto_varpool_replace_node (varpool_node *vnode,
+ varpool_node *prevailing_node)
{
gcc_assert (!vnode->definition || prevailing_node->definition);
gcc_assert (!vnode->analyzed || prevailing_node->analyzed);
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index e529fdbe2bf..60b00d835ec 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -3314,7 +3314,7 @@ lto_main (void)
do_whole_program_analysis ();
else
{
- struct varpool_node *vnode;
+ varpool_node *vnode;
timevar_start (TV_PHASE_OPT_GEN);
diff --git a/gcc/mcf.c b/gcc/mcf.c
index e709f2ac2c6..146b43c1377 100644
--- a/gcc/mcf.c
+++ b/gcc/mcf.c
@@ -1245,7 +1245,7 @@ adjust_cfg_counts (fixup_graph_type *fixup_graph)
sum_edge_counts (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
/* Compute edge probabilities. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
if (bb->count)
{
@@ -1281,7 +1281,7 @@ adjust_cfg_counts (fixup_graph_type *fixup_graph)
{
fprintf (dump_file, "\nCheck %s() CFG flow conservation:\n",
current_function_name ());
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if ((bb->count != sum_edge_counts (bb->preds))
|| (bb->count != sum_edge_counts (bb->succs)))
diff --git a/gcc/mkconfig.sh b/gcc/mkconfig.sh
index 29fdfc7a294..a5b116d4483 100644
--- a/gcc/mkconfig.sh
+++ b/gcc/mkconfig.sh
@@ -97,6 +97,9 @@ case $output in
#if defined IN_GCC && !defined GENERATOR_FILE && !defined USED_FOR_TARGET
# include "insn-flags.h"
#endif
+#if defined IN_GCC && !defined GENERATOR_FILE
+# include "insn-modes.h"
+#endif
EOF
;;
esac
diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c
index ed45094c395..4f68536d622 100644
--- a/gcc/mode-switching.c
+++ b/gcc/mode-switching.c
@@ -480,7 +480,8 @@ optimize_mode_switching (void)
entry_exit_extra = 3;
#endif
bb_info[n_entities]
- = XCNEWVEC (struct bb_info, last_basic_block + entry_exit_extra);
+ = XCNEWVEC (struct bb_info,
+ last_basic_block_for_fn (cfun) + entry_exit_extra);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
@@ -500,11 +501,11 @@ optimize_mode_switching (void)
/* Create the bitmap vectors. */
- antic = sbitmap_vector_alloc (last_basic_block, n_entities);
- transp = sbitmap_vector_alloc (last_basic_block, n_entities);
- comp = sbitmap_vector_alloc (last_basic_block, n_entities);
+ antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities);
+ transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities);
+ comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities);
- bitmap_vector_ones (transp, last_basic_block);
+ bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
for (j = n_entities - 1; j >= 0; j--)
{
@@ -515,7 +516,7 @@ optimize_mode_switching (void)
/* Determine what the first use (if any) need for a mode of entity E is.
This will be the mode that is anticipatable for this block.
Also compute the initial transparency settings. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct seginfo *ptr;
int last_mode = no_mode;
@@ -608,7 +609,7 @@ optimize_mode_switching (void)
#endif /* NORMAL_MODE */
}
- kill = sbitmap_vector_alloc (last_basic_block, n_entities);
+ kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities);
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
@@ -616,14 +617,14 @@ optimize_mode_switching (void)
sbitmap *insert;
/* Set the anticipatable and computing arrays. */
- bitmap_vector_clear (antic, last_basic_block);
- bitmap_vector_clear (comp, last_basic_block);
+ bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
+ bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
for (j = n_entities - 1; j >= 0; j--)
{
int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
struct bb_info *info = bb_info[j];
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (info[bb->index].seginfo->mode == m)
bitmap_set_bit (antic[bb->index], j);
@@ -636,7 +637,7 @@ optimize_mode_switching (void)
/* Calculate the optimal locations for the
placement mode switches to modes with priority I. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_not (kill[bb->index], transp[bb->index]);
edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
kill, &insert, &del);
@@ -691,7 +692,7 @@ optimize_mode_switching (void)
insert_insn_on_edge (mode_set, eg);
}
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
if (bitmap_bit_p (del[bb->index], j))
{
make_preds_opaque (bb, j);
@@ -711,7 +712,7 @@ optimize_mode_switching (void)
{
int no_mode = num_modes[entity_map[j]];
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
struct seginfo *ptr, *next;
for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index f3130449909..ba8d02096ef 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -3343,7 +3343,7 @@ rest_of_handle_sms (void)
max_regno = max_reg_num ();
/* Finalize layout changes. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
free_dominance_info (CDI_DOMINATORS);
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index 6ec2ef5fa70..366bc3f5402 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -1,3 +1,9 @@
+2013-12-04 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/52023
+ * objc-act.c (objc_synthesize_getter): Update calls to
+ c_sizeof_or_alignof_type.
+
2013-11-22 Andrew MacLeod <amacleod@redhat.com>
* objc/objc-act.c: Add required include files from gimple.h.
diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c
index 0cf93d4002c..5cdec349d27 100644
--- a/gcc/objc/objc-act.c
+++ b/gcc/objc/objc-act.c
@@ -7261,6 +7261,7 @@ objc_synthesize_getter (tree klass, tree class_methods ATTRIBUTE_UNUSED, tree pr
the same type, there is no need to lookup the ivar. */
size_of = c_sizeof_or_alignof_type (location, TREE_TYPE (property),
true /* is_sizeof */,
+ false /* min_alignof */,
false /* complain */);
if (PROPERTY_NONATOMIC (property))
@@ -7462,6 +7463,7 @@ objc_synthesize_setter (tree klass, tree class_methods ATTRIBUTE_UNUSED, tree pr
the same type, there is no need to lookup the ivar. */
size_of = c_sizeof_or_alignof_type (location, TREE_TYPE (property),
true /* is_sizeof */,
+ false /* min_alignof */,
false /* complain */);
if (PROPERTY_NONATOMIC (property))
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index ad13532f932..7c07ae6757d 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -4543,7 +4543,7 @@ optimize_omp_library_calls (gimple entry_stmt)
&& find_omp_clause (gimple_omp_task_clauses (entry_stmt),
OMP_CLAUSE_UNTIED) != NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple call = gsi_stmt (gsi);
@@ -4847,7 +4847,7 @@ expand_omp_taskreg (struct omp_region *region)
basic_block bb;
bool changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
changed |= gimple_purge_dead_eh_edges (bb);
if (changed)
cleanup_tree_cfg ();
@@ -7937,7 +7937,7 @@ expand_omp_target (struct omp_region *region)
basic_block bb;
bool changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
changed |= gimple_purge_dead_eh_edges (bb);
if (changed)
cleanup_tree_cfg ();
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 074149ab61f..24e75d5f97d 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -4283,7 +4283,7 @@ emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label, int prob)
insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
XEXP (test, 1), label));
if (prob != -1
- && profile_status != PROFILE_ABSENT
+ && profile_status_for_fn (cfun) != PROFILE_ABSENT
&& insn
&& JUMP_P (insn)
&& any_condjump_p (insn)
@@ -5503,7 +5503,8 @@ gen_int_libfunc (optab optable, const char *opname, char suffix,
if (maxsize < LONG_LONG_TYPE_SIZE)
maxsize = LONG_LONG_TYPE_SIZE;
if (GET_MODE_CLASS (mode) != MODE_INT
- || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
+ || GET_MODE_BITSIZE (mode) < BITS_PER_WORD
+ || GET_MODE_BITSIZE (mode) > maxsize)
return;
gen_libfunc (optable, opname, suffix, mode);
}
@@ -6912,6 +6913,45 @@ expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
return expand_vec_perm (mode, m1, m2, perm, target);
}
+
+/* Return true if target supports vector masked load/store for mode. */
+bool
+can_vec_mask_load_store_p (enum machine_mode mode, bool is_load)
+{
+ optab op = is_load ? maskload_optab : maskstore_optab;
+ enum machine_mode vmode;
+ unsigned int vector_sizes;
+
+ /* If mode is vector mode, check it directly. */
+ if (VECTOR_MODE_P (mode))
+ return optab_handler (op, mode) != CODE_FOR_nothing;
+
+ /* Otherwise, return true if there is some vector mode with
+ the mask load/store supported. */
+
+ /* See if there is any chance the mask load or store might be
+ vectorized. If not, punt. */
+ vmode = targetm.vectorize.preferred_simd_mode (mode);
+ if (!VECTOR_MODE_P (vmode))
+ return false;
+
+ if (optab_handler (op, vmode) != CODE_FOR_nothing)
+ return true;
+
+ vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
+ while (vector_sizes != 0)
+ {
+ unsigned int cur = 1 << floor_log2 (vector_sizes);
+ vector_sizes &= ~cur;
+ if (cur <= GET_MODE_SIZE (mode))
+ continue;
+ vmode = mode_for_vector (mode, cur / GET_MODE_SIZE (mode));
+ if (VECTOR_MODE_P (vmode)
+ && optab_handler (op, vmode) != CODE_FOR_nothing)
+ return true;
+ }
+ return false;
+}
/* Return true if there is a compare_and_swap pattern. */
diff --git a/gcc/optabs.def b/gcc/optabs.def
index 6b924acf8cd..f19ceba4746 100644
--- a/gcc/optabs.def
+++ b/gcc/optabs.def
@@ -187,6 +187,10 @@ OPTAB_D (movcc_optab, "mov$acc")
OPTAB_D (cmov_optab, "cmov$a6")
OPTAB_D (cstore_optab, "cstore$a4")
OPTAB_D (ctrap_optab, "ctrap$a4")
+OPTAB_D (addv4_optab, "addv$I$a4")
+OPTAB_D (subv4_optab, "subv$I$a4")
+OPTAB_D (mulv4_optab, "mulv$I$a4")
+OPTAB_D (negv3_optab, "negv$I$a3")
OPTAB_D (smul_highpart_optab, "smul$a3_highpart")
OPTAB_D (umul_highpart_optab, "umul$a3_highpart")
@@ -248,6 +252,8 @@ OPTAB_D (sdot_prod_optab, "sdot_prod$I$a")
OPTAB_D (ssum_widen_optab, "widen_ssum$I$a3")
OPTAB_D (udot_prod_optab, "udot_prod$I$a")
OPTAB_D (usum_widen_optab, "widen_usum$I$a3")
+OPTAB_D (maskload_optab, "maskload$a")
+OPTAB_D (maskstore_optab, "maskstore$a")
OPTAB_D (vec_extract_optab, "vec_extract$a")
OPTAB_D (vec_init_optab, "vec_init$a")
OPTAB_D (vec_pack_sfix_trunc_optab, "vec_pack_sfix_trunc_$a")
diff --git a/gcc/optabs.h b/gcc/optabs.h
index 6a5ec19a539..3c40b4a0e94 100644
--- a/gcc/optabs.h
+++ b/gcc/optabs.h
@@ -236,7 +236,7 @@ extern rtx expand_vec_cond_expr (tree, tree, tree, tree, rtx);
/* Generate code for VEC_LSHIFT_EXPR and VEC_RSHIFT_EXPR. */
extern rtx expand_vec_shift_expr (sepops, rtx);
-/* Return tree if target supports vector operations for VEC_PERM_EXPR. */
+/* Return true if target supports vector operations for VEC_PERM_EXPR. */
extern bool can_vec_perm_p (enum machine_mode, bool, const unsigned char *);
/* Generate code for VEC_PERM_EXPR. */
@@ -248,6 +248,9 @@ extern int can_mult_highpart_p (enum machine_mode, bool);
/* Generate code for MULT_HIGHPART_EXPR. */
extern rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, bool);
+/* Return true if target supports vector masked load/store for mode. */
+extern bool can_vec_mask_load_store_p (enum machine_mode, bool);
+
/* Return the insn used to implement mode MODE of OP, or CODE_FOR_nothing
if the target does not have such an insn. */
diff --git a/gcc/opts.c b/gcc/opts.c
index a0a6c53128c..4cb2cdf4eff 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -494,7 +494,7 @@ static const struct default_options default_options_table[] =
{ OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_foptimize_strlen, NULL, 1 },
{ OPT_LEVELS_2_PLUS, OPT_fhoist_adjacent_loads, NULL, 1 },
{ OPT_LEVELS_2_PLUS, OPT_fipa_sem_equality, NULL, 1 },
- { OPT_LEVELS_2_PLUS, OPT_fisolate_erroneous_paths, NULL, 1 },
+ { OPT_LEVELS_2_PLUS, OPT_fisolate_erroneous_paths_dereference, NULL, 1 },
/* -O3 optimizations. */
{ OPT_LEVELS_3_PLUS, OPT_ftree_loop_distribute_patterns, NULL, 1 },
@@ -1460,6 +1460,8 @@ common_handle_option (struct gcc_options *opts,
{ "vla-bound", SANITIZE_VLA, sizeof "vla-bound" - 1 },
{ "return", SANITIZE_RETURN, sizeof "return" - 1 },
{ "null", SANITIZE_NULL, sizeof "null" - 1 },
+ { "signed-integer-overflow", SANITIZE_SI_OVERFLOW,
+ sizeof "signed-integer-overflow" -1 },
{ NULL, 0, 0 }
};
const char *comma;
diff --git a/gcc/opts.h b/gcc/opts.h
index 264f4de7daa..7477647bf67 100644
--- a/gcc/opts.h
+++ b/gcc/opts.h
@@ -249,14 +249,14 @@ struct cl_decoded_option
/* Structure describing an option deferred for handling after the main
option handlers. */
-typedef struct
+struct cl_deferred_option
{
/* Elements from struct cl_decoded_option used for deferred
options. */
size_t opt_index;
const char *arg;
int value;
-} cl_deferred_option;
+};
/* Structure describing a single option-handling callback. */
diff --git a/gcc/params.h b/gcc/params.h
index 65802245924..f137e9eb5e6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -38,7 +38,7 @@ along with GCC; see the file COPYING3. If not see
/* The information associated with each parameter. */
-typedef struct param_info
+struct param_info
{
/* The name used with the `--param <name>=<value>' switch to set this
value. */
@@ -55,7 +55,7 @@ typedef struct param_info
/* A short description of the option. */
const char *const help;
-} param_info;
+};
/* An array containing the compiler parameters and their current
values. */
@@ -79,14 +79,14 @@ extern void set_param_value (const char *name, int value,
/* The parameters in use by language-independent code. */
-typedef enum compiler_param
+enum compiler_param
{
#define DEFPARAM(enumerator, option, msgid, default, min, max) \
enumerator,
#include "params.def"
#undef DEFPARAM
LAST_PARAM
-} compiler_param;
+};
/* The value of the parameter given by ENUM. Not an lvalue. */
#define PARAM_VALUE(ENUM) \
diff --git a/gcc/pass_manager.h b/gcc/pass_manager.h
index 9a71e9c7cbf..b5e10c373cf 100644
--- a/gcc/pass_manager.h
+++ b/gcc/pass_manager.h
@@ -51,7 +51,7 @@ public:
pass_manager (context *ctxt);
void register_pass (struct register_pass_info *pass_info);
- void register_one_dump_file (struct opt_pass *pass);
+ void register_one_dump_file (opt_pass *pass);
opt_pass *get_pass_for_id (int id) const;
@@ -91,8 +91,8 @@ public:
private:
void set_pass_for_id (int id, opt_pass *pass);
- int register_dump_files_1 (struct opt_pass *pass, int properties);
- void register_dump_files (struct opt_pass *pass, int properties);
+ int register_dump_files_1 (opt_pass *pass, int properties);
+ void register_dump_files (opt_pass *pass, int properties);
private:
context *m_ctxt;
diff --git a/gcc/passes.c b/gcc/passes.c
index ea89d7a2cfb..f30f159813e 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -90,9 +90,9 @@ using namespace gcc;
/* This is used for debugging. It allows the current pass to printed
from anywhere in compilation.
The variable current_pass is also used for statistics and plugins. */
-struct opt_pass *current_pass;
+opt_pass *current_pass;
-static void register_pass_name (struct opt_pass *, const char *);
+static void register_pass_name (opt_pass *, const char *);
/* Most passes are single-instance (within their context) and thus don't
need to implement cloning, but passes that support multiple instances
@@ -613,12 +613,12 @@ make_pass_postreload (gcc::context *ctxt)
void
pass_manager::
-set_pass_for_id (int id, struct opt_pass *pass)
+set_pass_for_id (int id, opt_pass *pass)
{
pass->static_pass_number = id;
if (passes_by_id_size <= id)
{
- passes_by_id = XRESIZEVEC (struct opt_pass *, passes_by_id, id + 1);
+ passes_by_id = XRESIZEVEC (opt_pass *, passes_by_id, id + 1);
memset (passes_by_id + passes_by_id_size, 0,
(id + 1 - passes_by_id_size) * sizeof (void *));
passes_by_id_size = id + 1;
@@ -628,7 +628,7 @@ set_pass_for_id (int id, struct opt_pass *pass)
/* Return the pass with the static pass number ID. */
-struct opt_pass *
+opt_pass *
pass_manager::get_pass_for_id (int id) const
{
if (id >= passes_by_id_size)
@@ -641,13 +641,13 @@ pass_manager::get_pass_for_id (int id) const
enabled or not. */
void
-register_one_dump_file (struct opt_pass *pass)
+register_one_dump_file (opt_pass *pass)
{
g->get_passes ()->register_one_dump_file (pass);
}
void
-pass_manager::register_one_dump_file (struct opt_pass *pass)
+pass_manager::register_one_dump_file (opt_pass *pass)
{
char *dot_name, *flag_name, *glob_name;
const char *name, *full_name, *prefix;
@@ -707,7 +707,7 @@ pass_manager::register_one_dump_file (struct opt_pass *pass)
int
pass_manager::
-register_dump_files_1 (struct opt_pass *pass, int properties)
+register_dump_files_1 (opt_pass *pass, int properties)
{
do
{
@@ -740,7 +740,7 @@ register_dump_files_1 (struct opt_pass *pass, int properties)
void
pass_manager::
-register_dump_files (struct opt_pass *pass,int properties)
+register_dump_files (opt_pass *pass,int properties)
{
pass->properties_required |= properties;
register_dump_files_1 (pass, properties);
@@ -749,7 +749,7 @@ register_dump_files (struct opt_pass *pass,int properties)
struct pass_registry
{
const char* unique_name;
- struct opt_pass *pass;
+ opt_pass *pass;
};
/* Helper for pass_registry hash table. */
@@ -783,7 +783,7 @@ static hash_table <pass_registry_hasher> name_to_pass_map;
/* Register PASS with NAME. */
static void
-register_pass_name (struct opt_pass *pass, const char *name)
+register_pass_name (opt_pass *pass, const char *name)
{
struct pass_registry **slot;
struct pass_registry pr;
@@ -816,7 +816,7 @@ static vec<char_ptr> pass_tab = vNULL;
int
passes_pass_traverse (pass_registry **p, void *data ATTRIBUTE_UNUSED)
{
- struct opt_pass *pass = (*p)->pass;
+ opt_pass *pass = (*p)->pass;
gcc_assert (pass->static_pass_number > 0);
gcc_assert (pass_tab.exists ());
@@ -839,13 +839,13 @@ create_pass_tab (void)
name_to_pass_map.traverse <void *, passes_pass_traverse> (NULL);
}
-static bool override_gate_status (struct opt_pass *, tree, bool);
+static bool override_gate_status (opt_pass *, tree, bool);
/* Dump the instantiated name for PASS. IS_ON indicates if PASS
is turned on or not. */
static void
-dump_one_pass (struct opt_pass *pass, int pass_indent)
+dump_one_pass (opt_pass *pass, int pass_indent)
{
int indent = 3 * pass_indent;
const char *pn;
@@ -869,7 +869,7 @@ dump_one_pass (struct opt_pass *pass, int pass_indent)
/* Dump pass list PASS with indentation INDENT. */
static void
-dump_pass_list (struct opt_pass *pass, int indent)
+dump_pass_list (opt_pass *pass, int indent)
{
do
{
@@ -920,7 +920,7 @@ pass_manager::dump_passes () const
/* Returns the pass with NAME. */
-static struct opt_pass *
+static opt_pass *
get_pass_by_name (const char *name)
{
struct pass_registry **slot, pr;
@@ -967,7 +967,7 @@ static vec<uid_range_p>
static void
enable_disable_pass (const char *arg, bool is_enable)
{
- struct opt_pass *pass;
+ opt_pass *pass;
char *range_str, *phase_name;
char *argstr = xstrdup (arg);
vec<uid_range_p> *tab = 0;
@@ -1150,7 +1150,7 @@ disable_pass (const char *arg)
/* Returns true if PASS is explicitly enabled/disabled for FUNC. */
static bool
-is_pass_explicitly_enabled_or_disabled (struct opt_pass *pass,
+is_pass_explicitly_enabled_or_disabled (opt_pass *pass,
tree func,
vec<uid_range_p> tab)
{
@@ -1216,7 +1216,7 @@ is_pass_explicitly_enabled_or_disabled (struct opt_pass *pass,
(TDI_end + current value of extra_dump_files_in_use) ) */
static void
-add_pass_instance (struct opt_pass *new_pass, bool track_duplicates,
+add_pass_instance (opt_pass *new_pass, bool track_duplicates,
opt_pass *initial_pass)
{
/* Are we dealing with the first pass of its kind, or a clone? */
@@ -1248,9 +1248,8 @@ add_pass_instance (struct opt_pass *new_pass, bool track_duplicates,
/* Add a pass to the pass list. Duplicate the pass if it's already
in the list. */
-static struct opt_pass **
-next_pass_1 (struct opt_pass **list, struct opt_pass *pass,
- struct opt_pass *initial_pass)
+static opt_pass **
+next_pass_1 (opt_pass **list, opt_pass *pass, opt_pass *initial_pass)
{
/* Every pass should have a name so that plugins can refer to them. */
gcc_assert (pass->name != NULL);
@@ -1270,7 +1269,7 @@ next_pass_1 (struct opt_pass **list, struct opt_pass *pass,
struct pass_list_node
{
- struct opt_pass *pass;
+ opt_pass *pass;
struct pass_list_node *next;
};
@@ -1284,10 +1283,9 @@ static struct pass_list_node *prev_added_pass_node;
PASS_LIST - root of the pass list to insert the new pass to */
static bool
-position_pass (struct register_pass_info *new_pass_info,
- struct opt_pass **pass_list)
+position_pass (struct register_pass_info *new_pass_info, opt_pass **pass_list)
{
- struct opt_pass *pass = *pass_list, *prev_pass = NULL;
+ opt_pass *pass = *pass_list, *prev_pass = NULL;
bool success = false;
for ( ; pass; prev_pass = pass, pass = pass->next)
@@ -1303,7 +1301,7 @@ position_pass (struct register_pass_info *new_pass_info,
|| (new_pass_info->ref_pass_instance_number == 1
&& pass->todo_flags_start & TODO_mark_first_instance)))
{
- struct opt_pass *new_pass;
+ opt_pass *new_pass;
struct pass_list_node *new_pass_node;
if (new_pass_info->ref_pass_instance_number == 0)
@@ -1503,7 +1501,7 @@ pass_manager::pass_manager (context *ctxt)
all_late_ipa_passes (NULL), passes_by_id (NULL), passes_by_id_size (0),
m_ctxt (ctxt)
{
- struct opt_pass **p;
+ opt_pass **p;
/* Initialize the pass_lists array. */
#define DEF_PASS_LIST(LIST) pass_lists[PASS_LIST_NO_##LIST] = &LIST;
@@ -1517,7 +1515,7 @@ pass_manager::pass_manager (context *ctxt)
#define PUSH_INSERT_PASSES_WITHIN(PASS) \
{ \
- struct opt_pass **p = &(PASS ## _1)->sub;
+ opt_pass **p = &(PASS ## _1)->sub;
#define POP_INSERT_PASSES() \
}
@@ -1936,7 +1934,7 @@ verify_curr_properties (void *data)
/* This is non-static so that the plugins can use it. */
bool
-pass_init_dump_file (struct opt_pass *pass)
+pass_init_dump_file (opt_pass *pass)
{
/* If a dump file name is present, open it if enabled. */
if (pass->static_pass_number != -1)
@@ -1964,7 +1962,7 @@ pass_init_dump_file (struct opt_pass *pass)
/* This is non-static so that plugins can use it. */
void
-pass_fini_dump_file (struct opt_pass *pass)
+pass_fini_dump_file (opt_pass *pass)
{
timevar_push (TV_DUMP);
@@ -1985,7 +1983,7 @@ pass_fini_dump_file (struct opt_pass *pass)
static void
update_properties_after_pass (void *data)
{
- struct opt_pass *pass = (struct opt_pass *) data;
+ opt_pass *pass = (opt_pass *) data;
cfun->curr_properties = (cfun->curr_properties | pass->properties_provided)
& ~pass->properties_destroyed;
}
@@ -1993,11 +1991,11 @@ update_properties_after_pass (void *data)
/* Execute summary generation for all of the passes in IPA_PASS. */
void
-execute_ipa_summary_passes (struct ipa_opt_pass_d *ipa_pass)
+execute_ipa_summary_passes (ipa_opt_pass_d *ipa_pass)
{
while (ipa_pass)
{
- struct opt_pass *pass = ipa_pass;
+ opt_pass *pass = ipa_pass;
/* Execute all of the IPA_PASSes in the list. */
if (ipa_pass->type == IPA_PASS
@@ -2018,7 +2016,7 @@ execute_ipa_summary_passes (struct ipa_opt_pass_d *ipa_pass)
pass_fini_dump_file (pass);
}
- ipa_pass = (struct ipa_opt_pass_d *)ipa_pass->next;
+ ipa_pass = (ipa_opt_pass_d *)ipa_pass->next;
}
}
@@ -2026,9 +2024,9 @@ execute_ipa_summary_passes (struct ipa_opt_pass_d *ipa_pass)
static void
execute_one_ipa_transform_pass (struct cgraph_node *node,
- struct ipa_opt_pass_d *ipa_pass)
+ ipa_opt_pass_d *ipa_pass)
{
- struct opt_pass *pass = ipa_pass;
+ opt_pass *pass = ipa_pass;
unsigned int todo_after = 0;
current_pass = pass;
@@ -2115,7 +2113,7 @@ apply_ipa_transforms (void *data)
default. */
static bool
-override_gate_status (struct opt_pass *pass, tree func, bool gate_status)
+override_gate_status (opt_pass *pass, tree func, bool gate_status)
{
bool explicitly_enabled = false;
bool explicitly_disabled = false;
@@ -2136,7 +2134,7 @@ override_gate_status (struct opt_pass *pass, tree func, bool gate_status)
/* Execute PASS. */
bool
-execute_one_pass (struct opt_pass *pass)
+execute_one_pass (opt_pass *pass)
{
unsigned int todo_after = 0;
@@ -2237,7 +2235,7 @@ execute_one_pass (struct opt_pass *pass)
{
struct cgraph_node *node;
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
- node->ipa_transforms_to_apply.safe_push ((struct ipa_opt_pass_d *)pass);
+ node->ipa_transforms_to_apply.safe_push ((ipa_opt_pass_d *)pass);
}
if (!current_function_decl)
@@ -2259,7 +2257,7 @@ execute_one_pass (struct opt_pass *pass)
}
void
-execute_pass_list (struct opt_pass *pass)
+execute_pass_list (opt_pass *pass)
{
do
{
@@ -2289,11 +2287,11 @@ write_lto (void)
those node in SET. */
static void
-ipa_write_summaries_2 (struct opt_pass *pass, struct lto_out_decl_state *state)
+ipa_write_summaries_2 (opt_pass *pass, struct lto_out_decl_state *state)
{
while (pass)
{
- struct ipa_opt_pass_d *ipa_pass = (struct ipa_opt_pass_d *)pass;
+ ipa_opt_pass_d *ipa_pass = (ipa_opt_pass_d *)pass;
gcc_assert (!current_function_decl);
gcc_assert (!cfun);
gcc_assert (pass->type == SIMPLE_IPA_PASS || pass->type == IPA_PASS);
@@ -2353,7 +2351,7 @@ ipa_write_summaries (void)
{
lto_symtab_encoder_t encoder;
int i, order_pos;
- struct varpool_node *vnode;
+ varpool_node *vnode;
struct cgraph_node *node;
struct cgraph_node **order;
@@ -2406,11 +2404,12 @@ ipa_write_summaries (void)
only those node in SET. */
static void
-ipa_write_optimization_summaries_1 (struct opt_pass *pass, struct lto_out_decl_state *state)
+ipa_write_optimization_summaries_1 (opt_pass *pass,
+ struct lto_out_decl_state *state)
{
while (pass)
{
- struct ipa_opt_pass_d *ipa_pass = (struct ipa_opt_pass_d *)pass;
+ ipa_opt_pass_d *ipa_pass = (ipa_opt_pass_d *)pass;
gcc_assert (!current_function_decl);
gcc_assert (!cfun);
gcc_assert (pass->type == SIMPLE_IPA_PASS || pass->type == IPA_PASS);
@@ -2484,11 +2483,11 @@ ipa_write_optimization_summaries (lto_symtab_encoder_t encoder)
are local passes. */
static void
-ipa_read_summaries_1 (struct opt_pass *pass)
+ipa_read_summaries_1 (opt_pass *pass)
{
while (pass)
{
- struct ipa_opt_pass_d *ipa_pass = (struct ipa_opt_pass_d *) pass;
+ ipa_opt_pass_d *ipa_pass = (ipa_opt_pass_d *) pass;
gcc_assert (!current_function_decl);
gcc_assert (!cfun);
@@ -2534,11 +2533,11 @@ ipa_read_summaries (void)
are local passes. */
static void
-ipa_read_optimization_summaries_1 (struct opt_pass *pass)
+ipa_read_optimization_summaries_1 (opt_pass *pass)
{
while (pass)
{
- struct ipa_opt_pass_d *ipa_pass = (struct ipa_opt_pass_d *) pass;
+ ipa_opt_pass_d *ipa_pass = (ipa_opt_pass_d *) pass;
gcc_assert (!current_function_decl);
gcc_assert (!cfun);
@@ -2582,7 +2581,7 @@ ipa_read_optimization_summaries (void)
/* Same as execute_pass_list but assume that subpasses of IPA passes
are local passes. */
void
-execute_ipa_pass_list (struct opt_pass *pass)
+execute_ipa_pass_list (opt_pass *pass)
{
do
{
@@ -2614,8 +2613,8 @@ execute_ipa_pass_list (struct opt_pass *pass)
/* Execute stmt fixup hooks of all passes in PASS for NODE and STMTS. */
static void
-execute_ipa_stmt_fixups (struct opt_pass *pass,
- struct cgraph_node *node, gimple *stmts)
+execute_ipa_stmt_fixups (opt_pass *pass,
+ struct cgraph_node *node, gimple *stmts)
{
while (pass)
{
@@ -2623,7 +2622,7 @@ execute_ipa_stmt_fixups (struct opt_pass *pass,
if (pass->type == IPA_PASS
&& ((!pass->has_gate) || pass->gate ()))
{
- struct ipa_opt_pass_d *ipa_pass = (struct ipa_opt_pass_d *) pass;
+ ipa_opt_pass_d *ipa_pass = (ipa_opt_pass_d *) pass;
if (ipa_pass->stmt_fixup)
{
diff --git a/gcc/passes.def b/gcc/passes.def
index 65c00bbbb41..1fe2003cbca 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -217,6 +217,8 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_iv_canon);
NEXT_PASS (pass_parallelize_loops);
NEXT_PASS (pass_if_conversion);
+ /* pass_vectorize must immediately follow pass_if_conversion.
+ Please do not add any other passes in between. */
NEXT_PASS (pass_vectorize);
PUSH_INSERT_PASSES_WITHIN (pass_vectorize)
NEXT_PASS (pass_dce_loop);
diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c
index 9ce17e50793..a1204f9016a 100644
--- a/gcc/postreload-gcse.c
+++ b/gcc/postreload-gcse.c
@@ -266,7 +266,7 @@ alloc_mem (void)
/* Find the largest UID and create a mapping from UIDs to CUIDs. */
uid_cuid = XCNEWVEC (int, get_max_uid () + 1);
i = 1;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
@@ -828,7 +828,7 @@ compute_hash_table (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 7803b33024f..a50fc878045 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -213,7 +213,7 @@ reload_cse_regs_1 (void)
cselib_init (CSELIB_RECORD_MEMORY);
init_alias_analysis ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
@@ -1281,7 +1281,7 @@ reload_combine (void)
label_live = XNEWVEC (HARD_REG_SET, n_labels);
CLEAR_HARD_REG_SET (ever_live_at_start);
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
insn = BB_HEAD (bb);
if (LABEL_P (insn))
diff --git a/gcc/predict.c b/gcc/predict.c
index df97eb06731..d1bf2afe155 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -74,14 +74,6 @@ along with GCC; see the file COPYING3. If not see
static sreal real_zero, real_one, real_almost_one, real_br_prob_base,
real_inv_br_prob_base, real_one_half, real_bb_freq_max;
-/* Random guesstimation given names.
- PROV_VERY_UNLIKELY should be small enough so basic block predicted
- by it gets below HOT_BB_FREQUENCY_FRACTION. */
-#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
-#define PROB_EVEN (REG_BR_PROB_BASE / 2)
-#define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY)
-#define PROB_ALWAYS (REG_BR_PROB_BASE)
-
static void combine_predictions_for_insn (rtx, basic_block);
static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int);
static void predict_paths_leading_to (basic_block, enum br_predictor, enum prediction);
@@ -129,7 +121,7 @@ maybe_hot_frequency_p (struct function *fun, int freq)
if (node->frequency == NODE_FREQUENCY_HOT)
return true;
}
- if (profile_status_for_function (fun) == PROFILE_ABSENT)
+ if (profile_status_for_fn (fun) == PROFILE_ABSENT)
return true;
if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
&& freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency * 2 / 3))
@@ -172,7 +164,7 @@ set_hot_bb_threshold (gcov_type min)
static inline bool
maybe_hot_count_p (struct function *fun, gcov_type count)
{
- if (fun && profile_status_for_function (fun) != PROFILE_READ)
+ if (fun && profile_status_for_fn (fun) != PROFILE_READ)
return true;
/* Code executed at most once is not hot. */
if (profile_info->runs >= count)
@@ -187,7 +179,7 @@ bool
maybe_hot_bb_p (struct function *fun, const_basic_block bb)
{
gcc_checking_assert (fun);
- if (profile_status_for_function (fun) == PROFILE_READ)
+ if (profile_status_for_fn (fun) == PROFILE_READ)
return maybe_hot_count_p (fun, bb->count);
return maybe_hot_frequency_p (fun, bb->frequency);
}
@@ -232,7 +224,7 @@ cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
bool
maybe_hot_edge_p (edge e)
{
- if (profile_status == PROFILE_READ)
+ if (profile_status_for_fn (cfun) == PROFILE_READ)
return maybe_hot_count_p (cfun, e->count);
return maybe_hot_frequency_p (cfun, EDGE_FREQUENCY (e));
}
@@ -247,7 +239,7 @@ probably_never_executed (struct function *fun,
gcov_type count, int frequency)
{
gcc_checking_assert (fun);
- if (profile_status_for_function (fun) == PROFILE_READ)
+ if (profile_status_for_fn (cfun) == PROFILE_READ)
{
int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
if (count * unlikely_count_fraction >= profile_info->runs)
@@ -446,7 +438,7 @@ optimize_loop_nest_for_size_p (struct loop *loop)
bool
predictable_edge_p (edge e)
{
- if (profile_status == PROFILE_ABSENT)
+ if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
return false;
if ((e->probability
<= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
@@ -547,8 +539,8 @@ gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
static bool
probability_reliable_p (int prob)
{
- return (profile_status == PROFILE_READ
- || (profile_status == PROFILE_GUESSED
+ return (profile_status_for_fn (cfun) == PROFILE_READ
+ || (profile_status_for_fn (cfun) == PROFILE_GUESSED
&& (prob <= HITRATE (1) || prob >= HITRATE (99))));
}
@@ -618,7 +610,7 @@ rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
void
gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
{
- gcc_assert (profile_status != PROFILE_GUESSED);
+ gcc_assert (profile_status_for_fn (cfun) != PROFILE_GUESSED);
if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && EDGE_COUNT (e->src->succs) >
1)
&& flag_guess_branch_prob && optimize)
@@ -1945,7 +1937,7 @@ strip_predict_hints (void)
gimple ass_stmt;
tree var;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator bi;
for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
@@ -2216,7 +2208,7 @@ tree_bb_level_predictions (void)
apply_return_prediction ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -2390,10 +2382,10 @@ tree_estimate_probability (void)
if (number_of_loops (cfun) > 1)
predict_loops ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
tree_estimate_probability_bb (bb);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
combine_predictions_for_bb (bb);
#ifdef ENABLE_CHECKING
@@ -2433,8 +2425,8 @@ tree_estimate_probability_driver (void)
loop_optimizer_finalize ();
if (dump_file && (dump_flags & TDF_DETAILS))
gimple_dump_cfg (dump_file, dump_flags);
- if (profile_status == PROFILE_ABSENT)
- profile_status = PROFILE_GUESSED;
+ if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
+ profile_status_for_fn (cfun) = PROFILE_GUESSED;
return 0;
}
@@ -2586,7 +2578,7 @@ propagate_freq (basic_block head, bitmap tovisit)
edge_iterator ei;
int count = 0;
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
FOR_EACH_EDGE (e, ei, bb->preds)
{
@@ -2747,7 +2739,7 @@ estimate_loops (void)
estimate_loops_at_level (current_loops->tree_root->inner);
/* Now propagate the frequencies through all the blocks. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_set_bit (tovisit, bb->index);
}
@@ -2796,7 +2788,7 @@ drop_profile (struct cgraph_node *node, gcov_type call_count)
node->name (), node->order);
}
- profile_status_for_function (fn)
+ profile_status_for_fn (fn)
= (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
node->frequency
= hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
@@ -2859,7 +2851,7 @@ handle_missing_profiles (void)
if (callee->count > 0)
continue;
if (DECL_COMDAT (callee->decl) && fn && fn->cfg
- && profile_status_for_function (fn) == PROFILE_READ)
+ && profile_status_for_fn (fn) == PROFILE_READ)
{
drop_profile (node, 0);
worklist.safe_push (callee);
@@ -2918,7 +2910,7 @@ expensive_function_p (int threshold)
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
@@ -2944,7 +2936,7 @@ estimate_bb_frequencies (bool force)
basic_block bb;
sreal freq_max;
- if (force || profile_status != PROFILE_READ || !counts_to_freqs ())
+ if (force || profile_status_for_fn (cfun) != PROFILE_READ || !counts_to_freqs ())
{
static int real_values_initialized = 0;
@@ -2987,7 +2979,7 @@ estimate_bb_frequencies (bool force)
estimate_loops ();
memcpy (&freq_max, &real_zero, sizeof (real_zero));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (sreal_compare (&freq_max, &BLOCK_INFO (bb)->frequency) < 0)
memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
@@ -3020,7 +3012,7 @@ compute_function_frequency (void)
if (DECL_STATIC_DESTRUCTOR (current_function_decl))
node->only_called_at_exit = true;
- if (profile_status != PROFILE_READ)
+ if (profile_status_for_fn (cfun) != PROFILE_READ)
{
int flags = flags_from_decl_or_type (current_function_decl);
if (lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
@@ -3045,7 +3037,7 @@ compute_function_frequency (void)
functions to unlikely and that is most of what we care about. */
if (!cfun->after_inlining)
node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (maybe_hot_bb_p (cfun, bb))
{
@@ -3179,8 +3171,8 @@ rebuild_frequencies (void)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
count_max = MAX (bb->count, count_max);
- if (profile_status == PROFILE_GUESSED
- || (profile_status == PROFILE_READ && count_max < REG_BR_PROB_BASE/10))
+ if (profile_status_for_fn (cfun) == PROFILE_GUESSED
+ || (profile_status_for_fn (cfun) == PROFILE_READ && count_max < REG_BR_PROB_BASE/10))
{
loop_optimizer_init (0);
add_noreturn_fake_exit_edges ();
@@ -3190,7 +3182,7 @@ rebuild_frequencies (void)
remove_fake_exit_edges ();
loop_optimizer_finalize ();
}
- else if (profile_status == PROFILE_READ)
+ else if (profile_status_for_fn (cfun) == PROFILE_READ)
counts_to_freqs ();
else
gcc_unreachable ();
diff --git a/gcc/predict.h b/gcc/predict.h
index 83b1695c65e..23435dc2c99 100644
--- a/gcc/predict.h
+++ b/gcc/predict.h
@@ -20,6 +20,16 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_PREDICT_H
#define GCC_PREDICT_H
+/* Random guesstimation given names.
+ PROB_VERY_UNLIKELY should be small enough so basic block predicted
+ by it gets below HOT_BB_FREQUENCY_FRACTION. */
+#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
+#define PROB_EVEN (REG_BR_PROB_BASE / 2)
+#define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY)
+#define PROB_ALWAYS (REG_BR_PROB_BASE)
+#define PROB_UNLIKELY (REG_BR_PROB_BASE / 5 - 1)
+#define PROB_LIKELY (PROB_ALWAYS - PROB_VERY_LIKELY)
+
#define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) ENUM,
enum br_predictor
{
diff --git a/gcc/profile.c b/gcc/profile.c
index 9aec3cb06b3..62b126c4a81 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -354,7 +354,7 @@ is_inconsistent (void)
{
basic_block bb;
bool inconsistent = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
inconsistent |= is_edge_inconsistent (bb->preds);
if (!dump_file && inconsistent)
@@ -692,7 +692,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
/* If the graph has been correctly solved, every block will have a
succ and pred count of zero. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
}
@@ -797,7 +797,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
give all abnormals frequency of 0, otherwise distribute the
frequency over abnormals (this is the case of noreturn
calls). */
- else if (profile_status == PROFILE_ABSENT)
+ else if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
{
int total = 0;
@@ -825,7 +825,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
}
}
counts_to_freqs ();
- profile_status = PROFILE_READ;
+ profile_status_for_fn (cfun) = PROFILE_READ;
compute_function_frequency ();
if (dump_file)
@@ -1021,7 +1021,7 @@ branch_prob (void)
We also add fake exit edges for each call and asm statement in the
basic, since it may not return. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int need_exit_edge = 0, need_entry_edge = 0;
int have_exit_edge = 0, have_entry_edge = 0;
@@ -1260,7 +1260,7 @@ branch_prob (void)
/* Initialize the output. */
output_location (NULL, 0, NULL, NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
gcov_position_t offset = 0;
diff --git a/gcc/recog.c b/gcc/recog.c
index 30d5eb6fd43..5671805feb0 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -2942,11 +2942,11 @@ split_all_insns (void)
bool changed;
basic_block bb;
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
changed = false;
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
rtx insn, next;
bool finish = false;
@@ -3600,7 +3600,7 @@ peephole2_optimize (void)
search_ofs = 0;
live = BITMAP_ALLOC (&reg_obstack);
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
bool past_end = false;
int pos;
diff --git a/gcc/ree.c b/gcc/ree.c
index 87427fdb00e..9938e98b4dc 100644
--- a/gcc/ree.c
+++ b/gcc/ree.c
@@ -835,7 +835,7 @@ find_removable_extensions (void)
rtx insn, set;
unsigned *def_map = XCNEWVEC (unsigned, max_insn_uid);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (!NONDEBUG_INSN_P (insn))
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index 6aad46684d6..87b9821fbeb 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -2846,7 +2846,7 @@ compensate_edges (void)
starting_stack_p = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
edge e;
@@ -3153,7 +3153,7 @@ convert_regs (void)
/* ??? Process all unreachable blocks. Though there's no excuse
for keeping these even when not optimizing. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
block_info bi = BLOCK_INFO (b);
@@ -3212,7 +3212,7 @@ reg_to_stack (void)
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
block_info bi = BLOCK_INFO (bb);
edge_iterator ei;
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 9b52a6301f7..3c9ef3d3380 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -1066,9 +1066,9 @@ copyprop_hardreg_forward (void)
sbitmap visited;
bool analyze_called = false;
- all_vd = XNEWVEC (struct value_data, last_basic_block);
+ all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (cfun));
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
if (MAY_HAVE_DEBUG_INSNS)
@@ -1076,7 +1076,7 @@ copyprop_hardreg_forward (void)
= create_alloc_pool ("debug insn changes pool",
sizeof (struct queued_debug_insn_change), 256);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_set_bit (visited, bb->index);
@@ -1112,7 +1112,7 @@ copyprop_hardreg_forward (void)
if (MAY_HAVE_DEBUG_INSNS)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bitmap_bit_p (visited, bb->index)
&& all_vd[bb->index].n_debug_insn_changes)
{
diff --git a/gcc/reginfo.c b/gcc/reginfo.c
index db66a095765..46288ebd181 100644
--- a/gcc/reginfo.c
+++ b/gcc/reginfo.c
@@ -1266,7 +1266,7 @@ init_subregs_of_mode (void)
bitmap_obstack_initialize (&srom_obstack);
subregs_of_mode = BITMAP_ALLOC (&srom_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn))
find_subregs_of_mode (PATTERN (insn), subregs_of_mode);
diff --git a/gcc/regrename.c b/gcc/regrename.c
index 5e86fa5a61a..9ff94d0c0e8 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -668,13 +668,13 @@ regrename_analyze (bitmap bb_mask)
int n_bbs;
int *inverse_postorder;
- inverse_postorder = XNEWVEC (int, last_basic_block);
+ inverse_postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
n_bbs = pre_and_rev_post_order_compute (NULL, inverse_postorder, false);
/* Gather some information about the blocks in this function. */
rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
i = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct bb_rename_info *ri = rename_info + i;
ri->bb = bb;
@@ -696,7 +696,7 @@ regrename_analyze (bitmap bb_mask)
for (i = 0; i < n_bbs; i++)
{
- basic_block bb1 = BASIC_BLOCK (inverse_postorder[i]);
+ basic_block bb1 = BASIC_BLOCK_FOR_FN (cfun, inverse_postorder[i]);
struct bb_rename_info *this_info;
bool success;
edge e;
@@ -778,7 +778,7 @@ regrename_analyze (bitmap bb_mask)
We perform the analysis for both incoming and outgoing edges, but we
only need to merge once (in the second part, after verifying outgoing
edges). */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
unsigned j;
@@ -843,7 +843,7 @@ regrename_analyze (bitmap bb_mask)
}
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
unsigned j;
@@ -920,7 +920,7 @@ regrename_analyze (bitmap bb_mask)
free (rename_info);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = NULL;
}
diff --git a/gcc/regrename.h b/gcc/regrename.h
index f3a0df0bba0..f2ceccf2b8e 100644
--- a/gcc/regrename.h
+++ b/gcc/regrename.h
@@ -65,7 +65,7 @@ struct du_chain
/* This struct describes data gathered during regrename_analyze about
a single operand of an insn. */
-typedef struct
+struct operand_rr_info
{
/* The number of chains recorded for this operand. */
int n_chains;
@@ -73,14 +73,14 @@ typedef struct
a memory operand. */
struct du_chain *chains[MAX_REGS_PER_ADDRESS];
struct du_head *heads[MAX_REGS_PER_ADDRESS];
-} operand_rr_info;
+};
/* A struct to hold a vector of operand_rr_info structures describing the
operands of an insn. */
-typedef struct
+struct insn_rr_info
{
operand_rr_info *op_info;
-} insn_rr_info;
+};
extern vec<insn_rr_info> insn_rr;
diff --git a/gcc/regstat.c b/gcc/regstat.c
index 85678a70f68..6a191d8ceab 100644
--- a/gcc/regstat.c
+++ b/gcc/regstat.c
@@ -120,7 +120,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
bitmap local_live, bitmap local_processed,
int *local_live_last_luid)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
rtx insn;
df_ref *def_rec;
df_ref *use_rec;
@@ -375,7 +375,7 @@ regstat_compute_ri (void)
reg_info_p = XCNEWVEC (struct reg_info_t, max_regno);
local_live_last_luid = XNEWVEC (int, max_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
regstat_bb_compute_ri (bb->index, live, artificial_uses,
local_live, local_processed,
@@ -440,7 +440,7 @@ regstat_get_setjmp_crosses (void)
static void
regstat_bb_compute_calls_crossed (unsigned int bb_index, bitmap live)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
rtx insn;
df_ref *def_rec;
df_ref *use_rec;
@@ -522,7 +522,7 @@ regstat_compute_calls_crossed (void)
reg_info_p_size = max_regno;
reg_info_p = XCNEWVEC (struct reg_info_t, max_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
regstat_bb_compute_calls_crossed (bb->index, live);
}
diff --git a/gcc/reload.h b/gcc/reload.h
index f68c345482a..d7b28422f64 100644
--- a/gcc/reload.h
+++ b/gcc/reload.h
@@ -203,7 +203,7 @@ extern struct target_reload *this_target_reload;
(this_target_reload->x_caller_save_initialized_p)
/* Register equivalences. Indexed by register number. */
-typedef struct reg_equivs_s
+struct reg_equivs_t
{
/* The constant value to which pseudo reg N is equivalent,
or zero if pseudo reg N is not equivalent to a constant.
@@ -238,7 +238,7 @@ typedef struct reg_equivs_s
/* The list of insns that initialized reg N from its equivalent
constant or memory slot. */
rtx init;
-} reg_equivs_t;
+};
#define reg_equiv_constant(ELT) \
(*reg_equivs)[(ELT)].constant
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 6864ec1667f..47439ce6ec9 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -613,7 +613,7 @@ has_nonexceptional_receiver (void)
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
@@ -641,7 +641,7 @@ has_nonexceptional_receiver (void)
/* Now see if there's a reachable block with an exceptional incoming
edge. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->flags & BB_REACHABLE && bb_has_abnormal_pred (bb))
return true;
@@ -1048,7 +1048,7 @@ reload (rtx first, int global)
pseudo. */
if (! frame_pointer_needed)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear_bit (df_get_live_in (bb), HARD_FRAME_POINTER_REGNUM);
/* Come here (with failure set nonzero) if we can't get enough spill
@@ -1283,7 +1283,7 @@ reload (rtx first, int global)
if (cfun->can_throw_non_call_exceptions)
{
sbitmap blocks;
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
sbitmap_free (blocks);
@@ -1592,7 +1592,7 @@ calculate_elim_costs_all_insns (void)
set_initial_elim_offsets ();
set_initial_label_offsets ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
elim_bb = bb;
diff --git a/gcc/resource.c b/gcc/resource.c
index 4609c3ad963..442c8523cb8 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -918,7 +918,8 @@ mark_target_live_regs (rtx insns, rtx target, struct resources *res)
information, we can get it from there unless the insn at the
start of the basic block has been deleted. */
if (tinfo && tinfo->block != -1
- && ! INSN_DELETED_P (BB_HEAD (BASIC_BLOCK (tinfo->block))))
+ && ! INSN_DELETED_P (BB_HEAD (BASIC_BLOCK_FOR_FN (cfun,
+ tinfo->block))))
b = tinfo->block;
}
@@ -958,7 +959,7 @@ mark_target_live_regs (rtx insns, rtx target, struct resources *res)
to use the LR problem. Otherwise, we must assume everything is live. */
if (b != -1)
{
- regset regs_live = DF_LR_IN (BASIC_BLOCK (b));
+ regset regs_live = DF_LR_IN (BASIC_BLOCK_FOR_FN (cfun, b));
rtx start_insn, stop_insn;
/* Compute hard regs live at start of block. */
@@ -967,7 +968,7 @@ mark_target_live_regs (rtx insns, rtx target, struct resources *res)
/* Get starting and ending insn, handling the case where each might
be a SEQUENCE. */
start_insn = (b == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->index ?
- insns : BB_HEAD (BASIC_BLOCK (b)));
+ insns : BB_HEAD (BASIC_BLOCK_FOR_FN (cfun, b)));
stop_insn = target;
if (NONJUMP_INSN_P (start_insn)
@@ -1215,10 +1216,10 @@ init_resource_info (rtx epilogue_insn)
/* Allocate and initialize the tables used by mark_target_live_regs. */
target_hash_table = XCNEWVEC (struct target_info *, TARGET_HASH_PRIME);
- bb_ticks = XCNEWVEC (int, last_basic_block);
+ bb_ticks = XCNEWVEC (int, last_basic_block_for_fn (cfun));
/* Set the BLOCK_FOR_INSN of each label that starts a basic block. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (LABEL_P (BB_HEAD (bb)))
BLOCK_FOR_INSN (BB_HEAD (bb)) = bb;
}
@@ -1257,7 +1258,7 @@ free_resource_info (void)
bb_ticks = NULL;
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (LABEL_P (BB_HEAD (bb)))
BLOCK_FOR_INSN (BB_HEAD (bb)) = NULL;
}
diff --git a/gcc/rtl.def b/gcc/rtl.def
index a76b28b66b3..1f069624e4e 100644
--- a/gcc/rtl.def
+++ b/gcc/rtl.def
@@ -82,7 +82,7 @@ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA)
/* Used in the cselib routines to describe a value. Objects of this
kind are only allocated in cselib.c, in an alloc pool instead of in
- GC memory. The only operand of a VALUE is a cselib_val_struct.
+ GC memory. The only operand of a VALUE is a cselib_val.
var-tracking requires this to have a distinct integral value from
DECL codes in trees. */
DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ)
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 3b8cdacbd8e..39f121be426 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -114,7 +114,7 @@ extern const unsigned char rtx_next[NUM_RTX_CODE];
/* The flags and bitfields of an ADDR_DIFF_VEC. BASE is the base label
relative to which the offsets are calculated, as explained in rtl.def. */
-typedef struct
+struct addr_diff_vec_flags
{
/* Set at the start of shorten_branches - ONLY WHEN OPTIMIZING - : */
unsigned min_align: 8;
@@ -132,12 +132,12 @@ typedef struct
unsigned offset_unsigned: 1; /* offsets have to be treated as unsigned. */
unsigned : 2;
unsigned scale : 8;
-} addr_diff_vec_flags;
+};
/* Structure used to describe the attributes of a MEM. These are hashed
so MEMs that the same attributes share a data structure. This means
they cannot be modified in place. */
-typedef struct GTY(()) mem_attrs
+struct GTY(()) mem_attrs
{
/* The expression that the MEM accesses, or null if not known.
This expression might be larger than the memory reference itself.
@@ -168,7 +168,7 @@ typedef struct GTY(()) mem_attrs
/* True if SIZE is known. */
bool size_known_p;
-} mem_attrs;
+};
/* Structure used to describe the attributes of a REG in similar way as
mem_attrs does for MEM above. Note that the OFFSET field is calculated
@@ -177,14 +177,14 @@ typedef struct GTY(()) mem_attrs
object in the low part of a 4-byte register, the OFFSET field
will be -3 rather than 0. */
-typedef struct GTY(()) reg_attrs {
+struct GTY(()) reg_attrs {
tree decl; /* decl corresponding to REG. */
HOST_WIDE_INT offset; /* Offset from start of DECL. */
-} reg_attrs;
+};
/* Common union for an element of an rtx. */
-union rtunion_def
+union rtunion
{
int rt_int;
unsigned int rt_uint;
@@ -193,15 +193,14 @@ union rtunion_def
rtvec rt_rtvec;
enum machine_mode rt_type;
addr_diff_vec_flags rt_addr_diff_vec_flags;
- struct cselib_val_struct *rt_cselib;
+ struct cselib_val *rt_cselib;
tree rt_tree;
basic_block rt_bb;
mem_attrs *rt_mem;
reg_attrs *rt_reg;
struct constant_descriptor_rtx *rt_constant;
- struct dw_cfi_struct *rt_cfi;
+ struct dw_cfi_node *rt_cfi;
};
-typedef union rtunion_def rtunion;
/* This structure remembers the position of a SYMBOL_REF within an
object_block structure. A SYMBOL_REF only provides this information
@@ -968,7 +967,7 @@ extern void rtl_check_failed_flag (const char *, const_rtx, const char *,
#define ADDR_DIFF_VEC_FLAGS(RTX) X0ADVFLAGS (RTX, 4)
/* In a VALUE, the value cselib has assigned to RTX.
- This is a "struct cselib_val_struct", see cselib.h. */
+ This is a "struct cselib_val", see cselib.h. */
#define CSELIB_VAL_PTR(RTX) X0CSELIB (RTX, 0)
/* Holds a list of notes on what this insn does to various REGs.
@@ -2188,12 +2187,12 @@ extern void set_insn_deleted (rtx);
#define single_set_1(I) single_set_2 (I, PATTERN (I))
/* Structure used for passing data to REPLACE_LABEL. */
-typedef struct replace_label_data
+struct replace_label_data
{
rtx r1;
rtx r2;
bool update_label_nuses;
-} replace_label_data;
+};
extern enum machine_mode get_address_mode (rtx mem);
extern int rtx_addr_can_trap_p (const_rtx);
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index 4885bd4aa10..a51c32e7cd3 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -1180,6 +1180,27 @@ set_noop_p (const_rtx set)
dst = SUBREG_REG (dst);
}
+ /* It is a NOOP if destination overlaps with selected src vector
+ elements. */
+ if (GET_CODE (src) == VEC_SELECT
+ && REG_P (XEXP (src, 0)) && REG_P (dst)
+ && HARD_REGISTER_P (XEXP (src, 0))
+ && HARD_REGISTER_P (dst))
+ {
+ int i;
+ rtx par = XEXP (src, 1);
+ rtx src0 = XEXP (src, 0);
+ int c0 = INTVAL (XVECEXP (par, 0, 0));
+ HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
+
+ for (i = 1; i < XVECLEN (par, 0); i++)
+ if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
+ return 0;
+ return
+ simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
+ offset, GET_MODE (dst)) == (int) REGNO (dst);
+ }
+
return (REG_P (src) && REG_P (dst)
&& REGNO (src) == REGNO (dst));
}
diff --git a/gcc/sanitizer.def b/gcc/sanitizer.def
index 5bf1e3cebe6..9c94650321e 100644
--- a/gcc/sanitizer.def
+++ b/gcc/sanitizer.def
@@ -315,3 +315,19 @@ DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
"__ubsan_handle_type_mismatch",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW,
+ "__ubsan_handle_add_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW,
+ "__ubsan_handle_sub_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW,
+ "__ubsan_handle_mul_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW,
+ "__ubsan_handle_negate_overflow",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
diff --git a/gcc/sbitmap.h b/gcc/sbitmap.h
index 9a0852a670b..0dc9567f580 100644
--- a/gcc/sbitmap.h
+++ b/gcc/sbitmap.h
@@ -126,7 +126,7 @@ bitmap_clear_bit (sbitmap map, int bitno)
}
/* The iterator for sbitmap. */
-typedef struct {
+struct sbitmap_iterator {
/* The pointer to the first word of the bitmap. */
const SBITMAP_ELT_TYPE *ptr;
@@ -141,7 +141,7 @@ typedef struct {
/* The words currently visited. */
SBITMAP_ELT_TYPE word;
-} sbitmap_iterator;
+};
/* Initialize the iterator I with sbitmap BMP and the initial index
MIN. */
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 955501a9547..d4baec5a534 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -637,7 +637,7 @@ schedule_ebbs (void)
schedule_ebbs_init ();
/* Schedule every region in the subroutine. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx head = BB_HEAD (bb);
@@ -737,7 +737,7 @@ ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
gcc_assert (last_bb->index != bbi);
if (jump_bb_nexti == last_bb->index)
- last_bb = BASIC_BLOCK (jump_bbi);
+ last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
}
#endif /* INSN_SCHEDULING */
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 84b5cb58c96..b2c77240298 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -1381,7 +1381,7 @@ extern void schedule_ebbs_finish (void);
/* A region is the main entity for interblock scheduling: insns
are allowed to move between blocks in the same region, along
control flow graph edges, in the 'up' direction. */
-typedef struct
+struct region
{
/* Number of extended basic blocks in region. */
int rgn_nr_blocks;
@@ -1392,8 +1392,7 @@ typedef struct
unsigned int dont_calc_deps : 1;
/* This region has at least one non-trivial ebb. */
unsigned int has_real_ebb : 1;
-}
-region;
+};
extern int nr_regions;
extern region *rgn_table;
@@ -1416,8 +1415,9 @@ extern int *containing_rgn;
/* The mapping from ebb to block. */
extern int *ebb_head;
#define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
-#define EBB_FIRST_BB(ebb) BASIC_BLOCK (BB_TO_BLOCK (ebb))
-#define EBB_LAST_BB(ebb) BASIC_BLOCK (rgn_bb_table[ebb_head[ebb + 1] - 1])
+#define EBB_FIRST_BB(ebb) BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (ebb))
+#define EBB_LAST_BB(ebb) \
+ BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[ebb_head[ebb + 1] - 1])
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
extern int current_nr_blocks;
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 1663e2fd95d..863cd1de2d0 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -272,7 +272,7 @@ is_cfg_nonregular (void)
/* If we have insns which refer to labels as non-jumped-to operands,
then we consider the cfg not well structured. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
FOR_BB_INSNS (b, insn)
{
rtx note, next, set, dest;
@@ -317,7 +317,7 @@ is_cfg_nonregular (void)
Unreachable loops with a single block are detected here. This
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
if (EDGE_COUNT (b->preds) == 0
|| (single_pred_p (b)
@@ -401,7 +401,8 @@ debug_region (int rgn)
for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
{
- dump_bb (stderr, BASIC_BLOCK (rgn_bb_table[current_blocks + bb]),
+ dump_bb (stderr,
+ BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]),
0, TDF_SLIM | TDF_BLOCKS);
fprintf (stderr, "\n");
}
@@ -440,7 +441,7 @@ dump_region_dot (FILE *f, int rgn)
edge e;
edge_iterator ei;
int src_bb_num = rgn_bb_table[current_blocks + i];
- basic_block bb = BASIC_BLOCK (src_bb_num);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num);
FOR_EACH_EDGE (e, ei, bb->succs)
if (bb_in_region_p (e->dest->index, rgn))
@@ -478,7 +479,7 @@ find_single_block_region (bool ebbs_p)
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
- FOR_EACH_BB (ebb_start)
+ FOR_EACH_BB_FN (ebb_start, cfun)
{
RGN_NR_BLOCKS (nr_regions) = 0;
RGN_BLOCKS (nr_regions) = i;
@@ -511,7 +512,7 @@ find_single_block_region (bool ebbs_p)
}
}
else
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rgn_bb_table[nr_regions] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
@@ -554,7 +555,7 @@ too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
(*num_insns) += (common_sched_info->estimate_number_of_insns
- (BASIC_BLOCK (block)));
+ (BASIC_BLOCK_FOR_FN (cfun, block)));
return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
|| (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
@@ -641,23 +642,23 @@ haifa_find_rgns (void)
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = XNEWVEC (int, last_basic_block);
- dfs_nr = XCNEWVEC (int, last_basic_block);
+ max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
- inner = sbitmap_alloc (last_basic_block);
+ inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_ones (inner);
- header = sbitmap_alloc (last_basic_block);
+ header = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (header);
- in_queue = sbitmap_alloc (last_basic_block);
+ in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_queue);
- in_stack = sbitmap_alloc (last_basic_block);
+ in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_stack);
- for (i = 0; i < last_basic_block; i++)
+ for (i = 0; i < last_basic_block_for_fn (cfun); i++)
max_hdr[i] = -1;
#define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
@@ -744,7 +745,7 @@ haifa_find_rgns (void)
}
/* Reset ->aux field used by EDGE_PASSED. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
edge_iterator ei;
edge e;
@@ -761,7 +762,7 @@ haifa_find_rgns (void)
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
@@ -772,7 +773,7 @@ haifa_find_rgns (void)
to hold degree counts. */
degree = dfs_nr;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
degree[bb->index] = EDGE_COUNT (bb->preds);
/* Do not perform region scheduling if there are any unreachable
@@ -798,14 +799,15 @@ haifa_find_rgns (void)
extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
if (extend_regions_p)
{
- degree1 = XNEWVEC (int, last_basic_block);
- extended_rgn_header = sbitmap_alloc (last_basic_block);
+ degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ extended_rgn_header =
+ sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (extended_rgn_header);
}
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
{
@@ -824,7 +826,7 @@ haifa_find_rgns (void)
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- FOR_EACH_BB (jbb)
+ FOR_EACH_BB_FN (jbb, cfun)
{
/* First identify blocks in the loop, except for the loop
entry block. */
@@ -853,7 +855,8 @@ haifa_find_rgns (void)
/* We save degree in case when we meet a too_large region
and cancel it. We need a correct degree later when
calling extend_rgns. */
- memcpy (degree1, degree, last_basic_block * sizeof (int));
+ memcpy (degree1, degree,
+ last_basic_block_for_fn (cfun) * sizeof (int));
/* Decrease degree of all I's successors for topological
ordering. */
@@ -871,7 +874,7 @@ haifa_find_rgns (void)
Place those blocks into the queue. */
if (no_loops)
{
- FOR_EACH_BB (jbb)
+ FOR_EACH_BB_FN (jbb, cfun)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (single_succ_p (jbb)
@@ -948,7 +951,8 @@ haifa_find_rgns (void)
edge e;
child = queue[++head];
- FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds)
+ FOR_EACH_EDGE (e, ei,
+ BASIC_BLOCK_FOR_FN (cfun, child)->preds)
{
node = e->src->index;
@@ -1005,7 +1009,9 @@ haifa_find_rgns (void)
CONTAINING_RGN (child) = nr_regions;
queue[head] = queue[tail--];
- FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
+ FOR_EACH_EDGE (e, ei,
+ BASIC_BLOCK_FOR_FN (cfun,
+ child)->succs)
if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
--degree[e->dest->index];
}
@@ -1046,7 +1052,7 @@ haifa_find_rgns (void)
/* Any block that did not end up in a region is placed into a region
by itself. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (degree[bb->index] >= 0)
{
rgn_bb_table[idx] = bb->index;
@@ -1157,9 +1163,9 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
- max_hdr = XNEWVEC (int, last_basic_block);
+ max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
- order = XNEWVEC (int, last_basic_block);
+ order = XNEWVEC (int, last_basic_block_for_fn (cfun));
post_order_compute (order, false, false);
for (i = nblocks - 1; i >= 0; i--)
@@ -1200,7 +1206,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
int hdr = -1;
- FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->preds)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds)
{
int predn = e->src->index;
@@ -1304,7 +1310,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
CONTAINING_RGN (bbn) = nr_regions;
BLOCK_TO_BB (bbn) = 0;
- FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs)
if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
degree[e->dest->index]--;
@@ -1361,7 +1367,8 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
idx++;
- FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
+ FOR_EACH_EDGE (e, ei,
+ BASIC_BLOCK_FOR_FN (cfun, succn)->succs)
if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
degree[e->dest->index]--;
}
@@ -1420,7 +1427,8 @@ compute_dom_prob_ps (int bb)
/* Initialize dom[bb] to '111..1'. */
bitmap_ones (dom[bb]);
- FOR_EACH_EDGE (in_edge, in_ei, BASIC_BLOCK (BB_TO_BLOCK (bb))->preds)
+ FOR_EACH_EDGE (in_edge, in_ei,
+ BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds)
{
int pred_bb;
edge out_edge;
@@ -1508,7 +1516,7 @@ compute_trg_info (int trg)
sp->is_speculative = 0;
sp->src_prob = REG_BR_PROB_BASE;
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
for (i = trg + 1; i < current_nr_blocks; i++)
{
@@ -1838,7 +1846,8 @@ update_live (rtx insn, int src)
(bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
|| (bitmap_bit_p (ancestor_edges[bb_to], \
- EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
+ EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \
+ BB_TO_BLOCK (bb_from)))))))
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
@@ -2655,7 +2664,7 @@ deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
static void
propagate_deps (int bb, struct deps_desc *pred_deps)
{
- basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
+ basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
edge_iterator ei;
edge e;
@@ -2864,7 +2873,8 @@ sched_is_disabled_for_current_region_p (void)
int bb;
for (bb = 0; bb < current_nr_blocks; bb++)
- if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
+ if (!(BASIC_BLOCK_FOR_FN (cfun,
+ BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
return false;
return true;
@@ -2928,11 +2938,11 @@ static void
realloc_bb_state_array (int saved_last_basic_block)
{
char *old_bb_state_array = bb_state_array;
- size_t lbb = (size_t) last_basic_block;
+ size_t lbb = (size_t) last_basic_block_for_fn (cfun);
size_t slbb = (size_t) saved_last_basic_block;
/* Nothing to do if nothing changed since the last time this was called. */
- if (saved_last_basic_block == last_basic_block)
+ if (saved_last_basic_block == last_basic_block_for_fn (cfun))
return;
/* The selective scheduler doesn't use the state arrays. */
@@ -3052,7 +3062,7 @@ schedule_region (int rgn)
if (dbg_cnt (sched_block))
{
edge f;
- int saved_last_basic_block = last_basic_block;
+ int saved_last_basic_block = last_basic_block_for_fn (cfun);
schedule_block (&curr_bb, bb_state[first_bb->index]);
gcc_assert (EBB_FIRST_BB (bb) == first_bb);
@@ -3271,7 +3281,7 @@ sched_rgn_local_init (int rgn)
/* Use ->aux to implement EDGE_TO_BIT mapping. */
rgn_nr_edges = 0;
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
@@ -3281,7 +3291,7 @@ sched_rgn_local_init (int rgn)
rgn_edges = XNEWVEC (edge, rgn_nr_edges);
rgn_nr_edges = 0;
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
@@ -3302,7 +3312,7 @@ sched_rgn_local_init (int rgn)
/* Cleanup ->aux used for EDGE_TO_BIT mapping. */
/* We don't need them anymore. But we want to avoid duplication of
aux fields in the newly created edges. */
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
@@ -3422,9 +3432,12 @@ void
extend_regions (void)
{
rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
- rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks_for_fn (cfun));
- block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
- containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
+ rgn_bb_table = XRESIZEVEC (int, rgn_bb_table,
+ n_basic_blocks_for_fn (cfun));
+ block_to_bb = XRESIZEVEC (int, block_to_bb,
+ last_basic_block_for_fn (cfun));
+ containing_rgn = XRESIZEVEC (int, containing_rgn,
+ last_basic_block_for_fn (cfun));
}
void
diff --git a/gcc/sched-vis.c b/gcc/sched-vis.c
index 8fa29bfa046..6816cd6139d 100644
--- a/gcc/sched-vis.c
+++ b/gcc/sched-vis.c
@@ -890,7 +890,7 @@ extern void debug_bb_n_slim (int);
DEBUG_FUNCTION void
debug_bb_n_slim (int n)
{
- basic_block bb = BASIC_BLOCK (n);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
debug_bb_slim (bb);
}
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index 347b5eb41e7..2e4677071ce 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -750,7 +750,7 @@ sel_dump_cfg_2 (FILE *f, int flags)
if (flags & SEL_DUMP_CFG_FUNCTION_NAME)
fprintf (f, "function [label = \"%s\"];\n", current_function_name ());
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
insn_t insn = BB_HEAD (bb);
insn_t next_tail = NEXT_INSN (BB_END (bb));
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 0db84e64d20..32feaadb136 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -3075,7 +3075,7 @@ sel_finish_global_and_expr (void)
bbs.create (current_nr_blocks);
for (i = 0; i < current_nr_blocks; i++)
- bbs.quick_push (BASIC_BLOCK (BB_TO_BLOCK (i)));
+ bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
/* Clear AV_SETs and INSN_EXPRs. */
{
@@ -3627,7 +3627,7 @@ verify_backedges (void)
edge_iterator ei;
for (i = 0; i < current_nr_blocks; i++)
- FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
if (in_current_region_p (e->dest)
&& BLOCK_TO_BB (e->dest->index) < i)
n++;
@@ -3897,7 +3897,7 @@ purge_empty_blocks (void)
/* Do not attempt to delete the first basic block in the region. */
for (i = 1; i < current_nr_blocks; )
{
- basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
+ basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
if (maybe_tidy_empty_bb (b))
continue;
@@ -4095,14 +4095,14 @@ get_seqno_by_preds (rtx insn)
void
sel_extend_global_bb_info (void)
{
- sel_global_bb_info.safe_grow_cleared (last_basic_block);
+ sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
}
/* Extend region-scope data structures for basic blocks. */
static void
extend_region_bb_info (void)
{
- sel_region_bb_info.safe_grow_cleared (last_basic_block);
+ sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
}
/* Extend all data structures to fit for all basic blocks. */
@@ -4321,7 +4321,7 @@ init_lv_sets (void)
basic_block bb;
/* Initialize of LV sets. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
init_lv_set (bb);
/* Don't forget EXIT_BLOCK. */
@@ -4349,7 +4349,7 @@ free_lv_sets (void)
free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Free LV sets. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (BB_LV_SET (bb))
free_lv_set (bb);
}
@@ -4905,9 +4905,10 @@ recompute_rev_top_order (void)
int *postorder;
int n_blocks, i;
- if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
+ if (!rev_top_order_index
+ || rev_top_order_index_len < last_basic_block_for_fn (cfun))
{
- rev_top_order_index_len = last_basic_block;
+ rev_top_order_index_len = last_basic_block_for_fn (cfun);
rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
rev_top_order_index_len);
}
@@ -6079,7 +6080,7 @@ sel_init_pipelining (void)
| LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
current_loop_nest = NULL;
- bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
+ bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (bbs_in_loop_rgns);
recompute_rev_top_order ();
@@ -6145,16 +6146,16 @@ make_regions_from_the_rest (void)
/* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
loop. */
- loop_hdr = XNEWVEC (int, last_basic_block);
- degree = XCNEWVEC (int, last_basic_block);
+ loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
/* For each basic block that belongs to some loop assign the number
of innermost loop it belongs to. */
- for (i = 0; i < last_basic_block; i++)
+ for (i = 0; i < last_basic_block_for_fn (cfun); i++)
loop_hdr[i] = -1;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->loop_father && !bb->loop_father->num == 0
&& !(bb->flags & BB_IRREDUCIBLE_LOOP))
@@ -6164,7 +6165,7 @@ make_regions_from_the_rest (void)
/* For each basic block degree is calculated as the number of incoming
edges, that are going out of bbs that are not yet scheduled.
The basic blocks that are scheduled have degree value of zero. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
degree[bb->index] = 0;
@@ -6182,7 +6183,7 @@ make_regions_from_the_rest (void)
/* Any block that did not end up in a region is placed into a region
by itself. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (degree[bb->index] >= 0)
{
rgn_bb_table[cur_rgn_blocks] = bb->index;
@@ -6346,7 +6347,7 @@ sel_remove_loop_preheader (void)
/* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
{
- bb = BASIC_BLOCK (BB_TO_BLOCK (i));
+ bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
/* If the basic block belongs to region, but doesn't belong to
corresponding loop, then it should be a preheader. */
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index ff99e519cf9..d194740b4f2 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -407,7 +407,7 @@ _list_clear (_list_t *l)
/* List iterator backend. */
-typedef struct
+struct _list_iterator
{
/* The list we're iterating. */
_list_t *lp;
@@ -417,7 +417,7 @@ typedef struct
/* True when we've actually removed something. */
bool removed_p;
-} _list_iterator;
+};
static inline void
_list_iter_start (_list_iterator *ip, _list_t *lp, bool can_remove_p)
@@ -850,18 +850,17 @@ extern bitmap blocks_to_reschedule;
/* A variable to track which part of rtx we are scanning in
sched-deps.c: sched_analyze_insn (). */
-enum deps_where_def
- {
- DEPS_IN_INSN,
- DEPS_IN_LHS,
- DEPS_IN_RHS,
- DEPS_IN_NOWHERE
- };
-typedef enum deps_where_def deps_where_t;
+enum deps_where_t
+{
+ DEPS_IN_INSN,
+ DEPS_IN_LHS,
+ DEPS_IN_RHS,
+ DEPS_IN_NOWHERE
+};
/* Per basic block data for the whole CFG. */
-typedef struct
+struct sel_global_bb_info_def
{
/* For each bb header this field contains a set of live registers.
For all other insns this field has a NULL.
@@ -873,7 +872,7 @@ typedef struct
true - block has usable LV_SET.
false - block's LV_SET should be recomputed. */
bool lv_set_valid_p;
-} sel_global_bb_info_def;
+};
typedef sel_global_bb_info_def *sel_global_bb_info_t;
@@ -893,7 +892,7 @@ extern void sel_finish_global_bb_info (void);
#define BB_LV_SET_VALID_P(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set_valid_p)
/* Per basic block data for the region. */
-typedef struct
+struct sel_region_bb_info_def
{
/* This insn stream is constructed in such a way that it should be
traversed by PREV_INSN field - (*not* NEXT_INSN). */
@@ -905,7 +904,7 @@ typedef struct
/* If (AV_LEVEL == GLOBAL_LEVEL) then AV is valid. */
int av_level;
-} sel_region_bb_info_def;
+};
typedef sel_region_bb_info_def *sel_region_bb_info_t;
@@ -951,7 +950,7 @@ extern regset sel_all_regs;
/* Successor iterator backend. */
-typedef struct
+struct succ_iterator
{
/* True if we're at BB end. */
bool bb_end;
@@ -979,7 +978,7 @@ typedef struct
/* If skip to loop exits, save here information about loop exits. */
int current_exit;
vec<edge> loop_exits;
-} succ_iterator;
+};
/* A structure returning all successor's information. */
struct succs_info
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 1e3fcf0da5a..3e1fd96840d 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -4663,8 +4663,8 @@ create_block_for_bookkeeping (edge e1, edge e2)
new_bb->index = succ->index;
succ->index = i;
- SET_BASIC_BLOCK (new_bb->index, new_bb);
- SET_BASIC_BLOCK (succ->index, succ);
+ SET_BASIC_BLOCK_FOR_FN (cfun, new_bb->index, new_bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, succ->index, succ);
memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
@@ -4903,7 +4903,8 @@ remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
&& (EXPR_SPEC (expr)
|| !EXPR_ORIG_BB_INDEX (expr)
|| !dominated_by_p (CDI_DOMINATORS,
- BASIC_BLOCK (EXPR_ORIG_BB_INDEX (expr)),
+ BASIC_BLOCK_FOR_FN (cfun,
+ EXPR_ORIG_BB_INDEX (expr)),
BLOCK_FOR_INSN (FENCE_INSN (fence)))))
{
if (sched_verbose >= 4)
@@ -6886,7 +6887,7 @@ current_region_empty_p (void)
{
int i;
for (i = 0; i < current_nr_blocks; i++)
- if (! sel_bb_empty_p (BASIC_BLOCK (BB_TO_BLOCK (i))))
+ if (! sel_bb_empty_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))))
return false;
return true;
@@ -6945,7 +6946,7 @@ sel_region_init (int rgn)
bbs.create (current_nr_blocks);
for (i = 0; i < current_nr_blocks; i++)
- bbs.quick_push (BASIC_BLOCK (BB_TO_BLOCK (i)));
+ bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
sel_init_bbs (bbs);
@@ -6980,13 +6981,14 @@ sel_region_init (int rgn)
compute_live for the first insn of the loop. */
if (current_loop_nest)
{
- int header = (sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0)))
- ? 1
- : 0);
+ int header =
+ (sel_is_loop_preheader_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (0)))
+ ? 1
+ : 0);
if (current_nr_blocks == header + 1)
update_liveness_on_insn
- (sel_bb_head (BASIC_BLOCK (BB_TO_BLOCK (header))));
+ (sel_bb_head (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (header))));
}
/* Set hooks so that no newly generated insn will go out unnoticed. */
@@ -7024,7 +7026,7 @@ simplify_changed_insns (void)
for (i = 0; i < current_nr_blocks; i++)
{
- basic_block bb = BASIC_BLOCK (BB_TO_BLOCK (i));
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
rtx insn;
FOR_BB_INSNS (bb, insn)
diff --git a/gcc/sese.c b/gcc/sese.c
index 7e59ac8d909..5e47ef77d9a 100644
--- a/gcc/sese.c
+++ b/gcc/sese.c
@@ -156,7 +156,7 @@ build_sese_loop_nests (sese region)
basic_block bb;
struct loop *loop0, *loop1;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_in_sese_p (bb, region))
{
struct loop *loop = bb->loop_father;
@@ -303,10 +303,10 @@ sese_build_liveouts (sese region, bitmap liveouts)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
sese_build_liveouts_bb (region, liveouts, bb);
if (MAY_HAVE_DEBUG_STMTS)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
sese_reset_debug_liveouts_bb (region, liveouts, bb);
}
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 97e760dfe25..cfe1c005836 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -657,11 +657,16 @@ simplify_truncation (enum machine_mode mode, rtx op,
XEXP (op, 0), origmode);
}
- /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
- to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
- if (GET_CODE (op) == PLUS
- || GET_CODE (op) == MINUS
- || GET_CODE (op) == MULT)
+ /* If the machine can perform operations in the truncated mode, distribute
+ the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
+ (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
+ if (1
+#ifdef WORD_REGISTER_OPERATIONS
+ && precision >= BITS_PER_WORD
+#endif
+ && (GET_CODE (op) == PLUS
+ || GET_CODE (op) == MINUS
+ || GET_CODE (op) == MULT))
{
rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
if (op0)
diff --git a/gcc/sreal.h b/gcc/sreal.h
index 71cb91ea3e5..ac7de573b9d 100644
--- a/gcc/sreal.h
+++ b/gcc/sreal.h
@@ -41,7 +41,7 @@ along with GCC; see the file COPYING3. If not see
#endif
/* Structure for holding a simple real number. */
-typedef struct sreal
+struct sreal
{
#if SREAL_PART_BITS < 32
unsigned HOST_WIDE_INT sig_lo; /* Significant (lower part). */
@@ -50,7 +50,7 @@ typedef struct sreal
unsigned HOST_WIDE_INT sig; /* Significant. */
#endif
signed int exp; /* Exponent. */
-} sreal;
+};
extern void dump_sreal (FILE *, sreal *);
extern void debug (sreal &ref);
diff --git a/gcc/ssa-iterators.h b/gcc/ssa-iterators.h
index af486b6118d..eceddbce214 100644
--- a/gcc/ssa-iterators.h
+++ b/gcc/ssa-iterators.h
@@ -23,7 +23,7 @@ along with GCC; see the file COPYING3. If not see
/* Immediate use lists are used to directly access all uses for an SSA
name and get pointers to the statement for each use.
- The structure ssa_use_operand_d consists of PREV and NEXT pointers
+ The structure ssa_use_operand_t consists of PREV and NEXT pointers
to maintain the list. A USE pointer, which points to address where
the use is located and a LOC pointer which can point to the
statement where the use is located, or, in the case of the root
@@ -55,7 +55,7 @@ along with GCC; see the file COPYING3. If not see
If iteration is halted early, the marker node must be removed from
the list before continuing. */
-typedef struct immediate_use_iterator_d
+struct imm_use_iterator
{
/* This is the current use the iterator is processing. */
ssa_use_operand_t *imm_use;
@@ -66,7 +66,7 @@ typedef struct immediate_use_iterator_d
/* This is the next ssa_name to visit. IMM_USE may get removed before
the next one is traversed to, so it must be cached early. */
ssa_use_operand_t *next_imm_name;
-} imm_use_iterator;
+};
/* Use this iterator when simply looking at stmts. Adding, deleting or
@@ -131,7 +131,7 @@ enum ssa_op_iter_type {
optimization, this structure is scalarized, and any unused fields are
optimized away, resulting in little overhead. */
-typedef struct ssa_operand_iterator_d
+struct ssa_op_iter
{
enum ssa_op_iter_type iter_type;
bool done;
@@ -140,7 +140,7 @@ typedef struct ssa_operand_iterator_d
unsigned numops;
use_optype_p uses;
gimple stmt;
-} ssa_op_iter;
+};
/* These flags are used to determine which operands are returned during
execution of the loop. */
diff --git a/gcc/stack-ptr-mod.c b/gcc/stack-ptr-mod.c
index 68ccd1619ed..acca80127e1 100644
--- a/gcc/stack-ptr-mod.c
+++ b/gcc/stack-ptr-mod.c
@@ -58,7 +58,7 @@ notice_stack_pointer_modification (void)
been used. */
crtl->sp_is_unchanging = !cfun->calls_alloca;
if (crtl->sp_is_unchanging)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
diff --git a/gcc/store-motion.c b/gcc/store-motion.c
index 378d6c7e8ba..57c991aacf3 100644
--- a/gcc/store-motion.c
+++ b/gcc/store-motion.c
@@ -656,7 +656,7 @@ compute_store_table (void)
already_set = XNEWVEC (int, max_gcse_regno);
/* Find all the stores we care about. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* First compute the registers set in this block. */
FOR_BB_INSNS (bb, insn)
@@ -844,7 +844,7 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
edge_iterator *stack, ei;
int sp;
edge act;
- sbitmap visited = sbitmap_alloc (last_basic_block);
+ sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
rtx last, insn, note;
rtx mem = smexpr->pattern;
@@ -1016,11 +1016,13 @@ build_store_vectors (void)
/* Build the gen_vector. This is any store in the table which is not killed
by aliasing later in its block. */
- st_avloc = sbitmap_vector_alloc (last_basic_block, num_stores);
- bitmap_vector_clear (st_avloc, last_basic_block);
+ st_avloc = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
+ num_stores);
+ bitmap_vector_clear (st_avloc, last_basic_block_for_fn (cfun));
- st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
- bitmap_vector_clear (st_antloc, last_basic_block);
+ st_antloc = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
+ num_stores);
+ bitmap_vector_clear (st_antloc, last_basic_block_for_fn (cfun));
for (ptr = first_st_expr (); ptr != NULL; ptr = next_st_expr (ptr))
{
@@ -1052,14 +1054,14 @@ build_store_vectors (void)
}
}
- st_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
- bitmap_vector_clear (st_kill, last_basic_block);
+ st_kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_stores);
+ bitmap_vector_clear (st_kill, last_basic_block_for_fn (cfun));
- st_transp = sbitmap_vector_alloc (last_basic_block, num_stores);
- bitmap_vector_clear (st_transp, last_basic_block);
+ st_transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_stores);
+ bitmap_vector_clear (st_transp, last_basic_block_for_fn (cfun));
regs_set_in_block = XNEWVEC (int, max_gcse_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
memset (regs_set_in_block, 0, sizeof (int) * max_gcse_regno);
@@ -1095,10 +1097,14 @@ build_store_vectors (void)
if (dump_file)
{
- dump_bitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
- dump_bitmap_vector (dump_file, "st_kill", "", st_kill, last_basic_block);
- dump_bitmap_vector (dump_file, "st_transp", "", st_transp, last_basic_block);
- dump_bitmap_vector (dump_file, "st_avloc", "", st_avloc, last_basic_block);
+ dump_bitmap_vector (dump_file, "st_antloc", "", st_antloc,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_kill", "", st_kill,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_transp", "", st_transp,
+ last_basic_block_for_fn (cfun));
+ dump_bitmap_vector (dump_file, "st_avloc", "", st_avloc,
+ last_basic_block_for_fn (cfun));
}
}
@@ -1182,7 +1188,7 @@ one_store_motion_pass (void)
/* Now we want to insert the new stores which are going to be needed. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bitmap_bit_p (st_delete_map[bb->index], ptr->index))
{
delete_store (ptr, bb);
diff --git a/gcc/target.def b/gcc/target.def
index 6f282326ea3..629c3c2efb1 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -2819,7 +2819,7 @@ DEFHOOK
version of this hook assumes the system C library errno location\
is either a declaration of type int or accessed by dereferencing\
a pointer to int.",
- bool, (struct ao_ref_s *ref),
+ bool, (struct ao_ref *ref),
default_ref_may_alias_errno)
/* Support for named address spaces. */
diff --git a/gcc/target.h b/gcc/target.h
index 68873bd1385..ac5318de625 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -54,7 +54,7 @@
#ifdef ENABLE_CHECKING
-typedef struct { void *magic; void *p; } cumulative_args_t;
+struct cumulative_args_t { void *magic; void *p; };
#else /* !ENABLE_CHECKING */
@@ -64,20 +64,19 @@ typedef struct { void *magic; void *p; } cumulative_args_t;
efficient way of argument passing otherwise. However, that would come
at the cost of less type-safe !ENABLE_CHECKING compilation. */
-typedef union { void *p; } cumulative_args_t;
+union cumulative_args_t { void *p; };
#endif /* !ENABLE_CHECKING */
/* Types used by the record_gcc_switches() target function. */
-typedef enum
+enum print_switch_type
{
SWITCH_TYPE_PASSED, /* A switch passed on the command line. */
SWITCH_TYPE_ENABLED, /* An option that is currently enabled. */
SWITCH_TYPE_DESCRIPTIVE, /* Descriptive text, not a switch or option. */
SWITCH_TYPE_LINE_START, /* Please emit any necessary text at the start of a line. */
SWITCH_TYPE_LINE_END /* Please emit a line terminator. */
-}
-print_switch_type;
+};
typedef int (* print_switch_fn_type) (print_switch_type, const char *);
@@ -97,7 +96,7 @@ struct cgraph_node;
struct cgraph_simd_clone;
/* The struct used by the secondary_reload target hook. */
-typedef struct secondary_reload_info
+struct secondary_reload_info
{
/* icode is actually an enum insn_code, but we don't want to force every
file that includes target.h to include optabs.h . */
@@ -108,7 +107,7 @@ typedef struct secondary_reload_info
compatibility hook. */
struct secondary_reload_info *prev_sri;
int t_icode; /* Actually an enum insn_code - see above. */
-} secondary_reload_info;
+};
/* This is defined in sched-int.h . */
struct _dep;
@@ -120,7 +119,7 @@ struct ddg;
struct loop;
/* This is defined in tree-ssa-alias.h. */
-struct ao_ref_s;
+struct ao_ref;
/* This is defined in tree-vectorizer.h. */
struct _stmt_vec_info;
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index 0d8a30e43c4..2b531608a92 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -153,7 +153,7 @@ extern bool default_target_option_valid_attribute_p (tree, tree, tree, int);
extern bool default_target_option_pragma_parse (tree, tree);
extern bool default_target_can_inline_p (tree, tree);
extern bool default_valid_pointer_mode (enum machine_mode);
-extern bool default_ref_may_alias_errno (struct ao_ref_s *);
+extern bool default_ref_may_alias_errno (struct ao_ref *);
extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
extern enum machine_mode default_addr_space_address_mode (addr_space_t);
extern bool default_addr_space_valid_pointer_mode (enum machine_mode,
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index e35854179d2..70b1bc475b5 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,439 @@
+2013-12-03 Jeff Law <law@redhat.com>
+
+ PR tree-optimization/45685
+ * gcc.dg/tree-ssa/pr45685.c: New test.
+
+2013-12-13 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/58296
+ PR tree-optimization/41488
+ * gcc.dg/tree-ssa/scev-7.c: New test.
+ * gcc.dg/pr41488.c: New test.
+ * g++.dg/pr59445.C: New test.
+
+2013-12-12 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/59440
+ * gfortran.dg/namelist_83.f90: New.
+ * gfortran.dg/namelist_83_2.f90: New.
+
+2013-12-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/59470
+ * g++.dg/opt/pr59470.C: New test.
+
+2013-12-12 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/tsan/free_race2.c: New file.
+ * c-c++-common/tsan/race_on_barrier2.c: Likewise.
+ * c-c++-common/tsan/race_on_mutex.c: Likewise.
+ * c-c++-common/tsan/race_on_mutex2.c: Likewise.
+ * c-c++-common/tsan/simple_race.c: Likewise.
+ * c-c++-common/tsan/simple_stack.c: Likewise.
+ * g++.dg/tsan/aligned_vs_unaligned_race.C: Likewise.
+ * g++.dg/tsan/atomic_free.C: Likewise.
+ * g++.dg/tsan/atomic_free2.C: Likewise.
+ * g++.dg/tsan/benign_race.C: Likewise.
+ * g++.dg/tsan/cond_race.C: Likewise.
+ * g++.dg/tsan/default_options.C: Likewise.
+ * g++.dg/tsan/fd_close_norace.C: Likewise.
+ * g++.dg/tsan/fd_close_norace2.C: Likewise.
+ * g++-dg/tsan/tsan.exp: Modified to run additional C++ tests.
+
+2013-12-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR libgomp/59467
+ * gfortran.dg/gomp/pr59467.f90: New test.
+ * c-c++-common/gomp/pr59467.c: New test.
+
+2013-12-12 Ryan Mansfield <rmansfield@qnx.com>
+
+ PR testsuite/59442
+ * gcc.target/i386/sse2-movapd-1.c: Fix alignment attributes.
+ * gcc.target/i386/sse2-movapd-2.c: Likewise.
+ * gcc.target/i386/avx-vmovapd-256-1.c: Likewise.
+ * gcc.target/i386/avx-vmovapd-256-2.c: Likewise.
+
+2013-12-11 Sriraman Tallam <tmsriram@google.com>
+
+ PR target/59390
+ * gcc.target/i386/pr59390.c: New test.
+ * gcc.target/i386/pr59390_1.c: New test.
+ * gcc.target/i386/pr59390_2.c: New test.
+
+2013-12-11 Balaji V. Iyer <balaji.v.iyer@intel.com>
+
+ * g++.dg/cilk-plus/CK/catch_exc.cc: New test case.
+ * g++.dg/cilk-plus/CK/const_spawn.cc: Likewise.
+ * g++.dg/cilk-plus/CK/fib-opr-overload.cc: Likewise.
+ * g++.dg/cilk-plus/CK/fib-tplt.cc: Likewise.
+ * g++.dg/cilk-plus/CK/lambda_spawns.cc: Likewise.
+ * g++.dg/cilk-plus/CK/lambda_spawns_tplt.cc: Likewise.
+ * g++.dg/cilk-plus/cilk-plus.exp: Added support to run Cilk Keywords
+ test stored in c-c++-common. Also, added the Cilk runtime's library
+ to the ld_library_path.
+
+2013-12-11 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ PR middle-end/59134
+ * gcc.c-torture/compile/pr59134.c: New test.
+ * gnat.dg/misaligned_volatile.adb: New test.
+
+2013-12-11 Bernd Edlinger <bernd.edlinger@hotmail.de>
+ Sandra Loosemore <sandra@codesourcery.com>
+
+ * gcc.dg/pr23623.c: Update to test interaction with C++ memory model.
+
+2013-12-11 Sandra Loosemore <sandra@codesourcery.com>
+
+ PR middle-end/23623
+ PR middle-end/48784
+ PR middle-end/56341
+ PR middle-end/56997
+ * gcc.dg/pr23623.c: New test.
+ * gcc.dg/pr48784-1.c: New test.
+ * gcc.dg/pr48784-2.c: New test.
+ * gcc.dg/pr56341-1.c: New test.
+ * gcc.dg/pr56341-2.c: New test.
+ * gcc.dg/pr56997-1.c: New test.
+ * gcc.dg/pr56997-2.c: New test.
+ * gcc.dg/pr56997-3.c: New test.
+
+2013-12-11 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/58916
+ * gfortran.dg/allocate_with_source_4.f90: New.
+
+2013-12-11 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/59417
+ * gcc.c-torture/compile/pr59417.c: New test.
+
+ PR tree-optimization/59386
+ * gcc.c-torture/compile/pr59386.c: New test.
+
+2013-12-11 Bin Cheng <bin.cheng@arm.com>
+
+ Reverted:
+ 2013-12-10 Bin Cheng <bin.cheng@arm.com>
+ PR tree-optimization/41488
+ * gcc.dg/tree-ssa/scev-7.c: New test.
+ * gcc.dg/pr41488.c: New test.
+
+2013-12-10 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/35831
+ * gfortran.dg/c_by_val_5.f90: Modified.
+ * gfortran.dg/dummy_procedure_10.f90: New.
+
+2013-12-10 Yury Gribov <y.gribov@samsung.com>
+
+ * gcc-dg/tsan/tsan.exp: Added missing call to torture-finish.
+ * g++-dg/tsan/tsan.exp: Likewise.
+
+2013-12-10 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/38474
+ * gcc.dg/ipa/ipa-pta-14.c: Un-XFAIL.
+
+2013-12-10 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.dg/vect/vect-cond-11.c: New test.
+ * gcc.target/i386/vect-cond-1.c: New test.
+ * gcc.target/i386/avx2-gather-5.c: New test.
+ * gcc.target/i386/avx2-gather-6.c: New test.
+ * gcc.dg/vect/vect-mask-loadstore-1.c: New test.
+ * gcc.dg/vect/vect-mask-load-1.c: New test.
+
+2013-12-09 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59437
+ * g++.dg/ubsan/pr59437.C: New test.
+
+2013-12-10 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/tsan/thread_leak2.c: `dg-skip-if' removed.
+ * gcc-dg/tsan/tsan.exp: Run only with '-O0' and '-O2' options.
+ * g++-dg/tsan/tsan.exp: Likewise.
+
+2013-12-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.dg/vect/pr58508.c: XFAIL for vect_no_align.
+ * gcc.dg/vect/vect-reduc-pattern-3.c: Require vect_int_mult.
+
+2013-12-10 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/41488
+ * gcc.dg/tree-ssa/scev-7.c: New test.
+ * gcc.dg/pr41488.c: New test.
+
+2013-12-09 Joseph Myers <joseph@codesourcery.com>
+
+ PR preprocessor/55715
+ * gcc.dg/cpp/expr-overflow-1.c: New test.
+
+2013-12-10 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/59428
+ PR fortran/58099
+ PR fortran/58676
+ PR fortran/41724
+ * gfortran.dg/proc_ptr_result_4.f90: Fix proc-ptr interface.
+
+2013-12-09 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/59435
+ * g++.dg/cpp0x/variadic-sizeof3.C: New.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * g++.dg/plugin/selfassign.c (execute_warn_self_assign): Eliminate
+ use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
+ explicit.
+ * gcc.dg/plugin/selfassign.c (execute_warn_self_assign): Likewise.
+
+2013-12-09 Richard Earnshaw <rearnsha@arm.com>
+
+ * gcc.target/arm/ldrd-strd-offset.c: New.
+
+2013-12-09 Martin Jambor <mjambor@suse.cz>
+
+ * gcc.c-torture/compile/pr39834.c: Remove optimization level option.
+ * gcc.c-torture/compile/pr48929.c: Likewise.
+ * gcc.c-torture/compile/pr55569.c: Likewise.
+ * gcc.c-torture/compile/sra-1.c: Likewise.
+ * gcc.c-torture/compile/pr45085.c: Moved to...
+ * gcc.dg/tree-ssa/pr45085.c: ...here, added compile dg-do.
+
+2013-12-09 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59415
+ * g++.dg/ubsan/pr59415.C: New test.
+
+2013-12-09 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/52707
+ * g++.dg/cpp0x/deleted2.C: New.
+
+2013-12-09 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * gcc.dg/tree-ssa/loop-31.c: Update scan pattern.
+
+2013-12-09 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * lib/asan-dg.exp (asan-gtest): Remove expected output from the
+ pass/fail line and add it to the log instead.
+
+2013-12-08 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/52898
+ PR target/51697
+ * gcc.target/sh/pr51697.c: New.
+
+2013-12-08 Uros Bizjak <ubizjak@gmail.com>
+
+ * gcc.dg/macro-fusion-1.c: Cleanup sched2 rtl dump.
+ * gcc.dg/macro-fusion-2.c: Ditto.
+ * gcc.dg/vect/vect-simd-clone-10a.c: Cleanup vect tree dump.
+ * gcc.dg/vect/vect-simd-clone-12a.c: Ditto.
+
+2013-12-08 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/58099
+ PR fortran/58676
+ PR fortran/41724
+ * gfortran.dg/elemental_subroutine_8.f90: New.
+ * gfortran.dg/proc_decl_9.f90: Add ELEMENTAL to make valid.
+ * gfortran.dg/proc_ptr_11.f90: Ditto.
+ * gfortran.dg/proc_ptr_result_8.f90: Ditto.
+ * gfortran.dg/proc_ptr_32.f90: Update dg-error.
+ * gfortran.dg/proc_ptr_33.f90: Ditto.
+ * gfortran.dg/proc_ptr_result_1.f90: Add abstract interface
+ which is not elemental.
+ * gfortran.dg/proc_ptr_result_7.f90: Ditto.
+
+2013-12-07 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/59414
+ * gfortran.dg/class_result_2.f90: New.
+
+2013-12-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/59388
+ * gcc.c-torture/execute/pr59388.c: New test.
+
+2013-12-06 Dominique d'Humieres <dominiq@lps.ens.fr>
+
+ PR testsuite/59043
+ * g++.dg/pubtypes.C: Adjust the regular expression.
+ * gcc.dg/pubtypes-1.c: Likewise.
+ * gcc.dg/pubtypes-2.c: Likewise.
+ * gcc.dg/pubtypes-3.c: Likewise.
+ * gcc.dg/pubtypes-4.c: Likewise.
+
+2013-12-06 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.dg/vect/vect-nop-move.c: Fix dg options.
+
+2013-12-06 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/59405
+ * gcc.target/i386/pr59405.c: New test.
+
+2013-12-06 Ian Bolton <ian.bolton@arm.com>
+ Mark Mitchell <mark@codesourcery.com>
+
+ PR target/59091
+ * gcc.target/arm/builtin-trap.c: New test.
+ * gcc.target/arm/thumb-builtin-trap.c: Likewise.
+
+2013-12-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.target/sparc/pdistn.c: New test.
+ * gcc.target/sparc/pdistn-2.c: Likewise.
+
+2013-12-06 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59058
+ * gcc.dg/torture/pr59058.c: New testcase.
+
+2013-12-05 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * g++.dg/warn/pr15774-1.C: Adjust expected message.
+
+2013-12-05 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR rtl-optimization/59317
+ * testsuite/gcc.target/mips/pr59317.c: New.
+
+2013-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/59333
+ PR sanitizer/59397
+ * c-c++-common/ubsan/pr59333.c: New test.
+ * c-c++-common/ubsan/pr59397.c: New test.
+
+2013-12-05 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.dg/vect/vect-nop-move.c: New test.
+
+2013-12-05 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/tsan/atomic_stack.c: New test.
+ * c-c++-common/tsan/fd_pipe_race.c: New test.
+ * c-c++-common/tsan/free_race.c: New test.
+ * c-c++-common/tsan/mutexset1.c: New test.
+ * c-c++-common/tsan/race_on_barrier.c: New test.
+ * c-c++-common/tsan/sleep_sync.c: New test.
+ * c-c++-common/tsan/thread_leak.c: New test.
+ * c-c++-common/tsan/thread_leak1.c: New test.
+ * c-c++-common/tsan/thread_leak2.c: New test.
+ * c-c++-common/tsan/tiny_race.c: New test.
+ * c-c++-common/tsan/tls_race.c: New test.
+ * c-c++-common/tsan/write_in_reader_lock.c: New test.
+ * lib/tsan-dg.exp: New file.
+ * gcc.dg/tsan/tsan.exp: New file.
+ * g++.dg/tsan/tsan.exp: New file.
+ * g++.dg/dg.exp: Prune tsan subdirectory.
+
+2013-12-05 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ * gcc.target/i386/readeflags-1.c: New.
+ * gcc.target/i386/writeeflags-1.c: Ditto.
+
+2013-12-05 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/59369
+ * c-c++-common/asan/pr59063-1.c: Disable on non-Linux platforms.
+ * c-c++-common/asan/pr59063-2.c: Likewise.
+
+2013-12-05 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * g++.dg/cpp0x/constexpr-46336.C: Adjust expected messages.
+ * g++.dg/cpp0x/defaulted2.C: Likewise.
+ * g++.dg/cpp1y/auto-fn8.C: Likewise.
+ * g++.dg/gomp/udr-3.C: Likewise.
+ * g++.dg/lookup/extern-c-redecl5.C: Likewise.
+ * g++.dg/lookup/linkage1.C: Likewise.
+ * g++.dg/overload/new1.C: Likewise.
+ * g++.dg/parse/friend5.C: Likewise.
+ * g++.dg/parse/namespace-alias-1.C: Likewise.
+ * g++.dg/parse/namespace10.C: Likewise.
+ * g++.dg/parse/redef2.C: Likewise.
+ * g++.dg/template/friend44.C: Likewise.
+ * g++.old-deja/g++.brendan/crash42.C: Likewise.
+ * g++.old-deja/g++.brendan/crash52.C: Likewise.
+ * g++.old-deja/g++.brendan/crash55.C: Likewise.
+ * g++.old-deja/g++.jason/overload21.C: Likewise.
+ * g++.old-deja/g++.jason/overload5.C: Likewise.
+ * g++.old-deja/g++.jason/redecl1.C: Likewise.
+ * g++.old-deja/g++.law/arm8.C: Likewise.
+ * g++.old-deja/g++.other/main1.C: Likewise.
+
+2013-12-05 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/56787
+ * gcc.dg/vect/pr56787.c: Adjust to not require vector float division.
+
+2013-12-05 Kostya Serebryany <kcc@google.com>
+
+ * c-c++-common/asan/null-deref-1.c: Update the test
+ to match the fresh asan run-time.
+
+2013-12-05 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59374
+ * gcc.dg/torture/pr59374-1.c: New testcase.
+ * gcc.dg/torture/pr59374-2.c: Likewise.
+
+2013-12-05 Kirill Yukhin <kirill.yukhin@intel.com>
+
+ * gcc.target/ia64/pr52731.c: New.
+
+2013-12-04 Jeff Law <law@redhat.com>
+
+ * gcc.dg/pr38984.c: Use -fno-isolate-erroneous-paths-dereference.
+ * gcc.dg/tree-ssa/isolate-2.c: Explicitly turn on
+ -fisolate-erroneous-paths-attribute.
+ * gcc.dg/tree-ssa/isolate-4.c: Likewise.
+
+2013-12-04 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/52023
+ * gcc.dg/c11-align-6.c: New test.
+
+2013-12-04 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/overflow-mul-2.c: New test.
+ * c-c++-common/ubsan/overflow-add-1.c: New test.
+ * c-c++-common/ubsan/overflow-add-2.c: New test.
+ * c-c++-common/ubsan/overflow-mul-1.c: New test.
+ * c-c++-common/ubsan/overflow-sub-1.c: New test.
+ * c-c++-common/ubsan/overflow-sub-2.c: New test.
+ * c-c++-common/ubsan/overflow-negate-1.c: New test.
+
+2013-12-04 Marek Polacek <polacek@redhat.com>
+
+ PR c/54113
+ * gcc.dg/pr54113.c: New test.
+
2013-12-04 Jakub Jelinek <jakub@redhat.com>
+ PR c++/59268
+ * g++.dg/cpp0x/constexpr-template6.C: New test.
+
+2013-12-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/pack19.adb: New test.
+
+2013-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/58726
+ * gcc.c-torture/execute/pr58726.c: New test.
+
+ PR target/59163
+ * g++.dg/torture/pr59163.C: New test.
+
PR tree-optimization/59355
* g++.dg/ipa/pr59355.C: New test.
@@ -29,10 +463,9 @@
2013-12-03 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
- * gcc.dg/vect/costmodel/ppc/costmodel-slp-34.c: Skip for little
- endian.
+ * gcc.dg/vect/costmodel/ppc/costmodel-slp-34.c: Skip for little endian.
-2013-12-03 H.J. Lu <hongjiu.lu@intel.com>
+2013-12-03 H.J. Lu <hongjiu.lu@intel.com>
PR target/59363
* gcc.target/i386/pr59363.c: New file.
@@ -77,7 +510,7 @@
2013-12-02 Sriraman Tallam <tmsriram@google.com>
PR target/58944
- * testsuite/gcc.target/i386/pr58944.c: New test.
+ * testsuite/gcc.target/i386/pr58944.c: New test.
2013-12-02 Joseph Myers <joseph@codesourcery.com>
@@ -2153,7 +2586,7 @@
* gcc.dg/tree-prof/tree-prof.exp: Fix comment.
-2013-10-15 Sriraman Tallam <tmsriram@google.com>
+2013-10-15 Sriraman Tallam <tmsriram@google.com>
PR target/57756
* gcc.target/i386/pr57756.c: New test.
diff --git a/gcc/testsuite/c-c++-common/asan/null-deref-1.c b/gcc/testsuite/c-c++-common/asan/null-deref-1.c
index 14ec514aa9a..6aea9d295f3 100644
--- a/gcc/testsuite/c-c++-common/asan/null-deref-1.c
+++ b/gcc/testsuite/c-c++-common/asan/null-deref-1.c
@@ -18,6 +18,5 @@ int main()
/* { dg-output "ERROR: AddressSanitizer:? SEGV on unknown address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ \[^\n\r]*pc 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*AddressSanitizer can not provide additional info.*(\n|\r\n|\r)" } */
/* { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*NullDeref\[^\n\r]* (\[^\n\r]*null-deref-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*null-deref-1.c:15|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/pr59063-1.c b/gcc/testsuite/c-c++-common/asan/pr59063-1.c
index a4e01f76f3a..a22db6a0d82 100644
--- a/gcc/testsuite/c-c++-common/asan/pr59063-1.c
+++ b/gcc/testsuite/c-c++-common/asan/pr59063-1.c
@@ -1,4 +1,4 @@
-/* { dg-do run } */
+/* { dg-do run { target { *-*-linux* } } } */
#include <time.h>
static int weak_gettime (clockid_t clk_id, struct timespec *tp)
diff --git a/gcc/testsuite/c-c++-common/asan/pr59063-2.c b/gcc/testsuite/c-c++-common/asan/pr59063-2.c
index 64354ea7831..759b7f24d09 100644
--- a/gcc/testsuite/c-c++-common/asan/pr59063-2.c
+++ b/gcc/testsuite/c-c++-common/asan/pr59063-2.c
@@ -1,4 +1,4 @@
-/* { dg-do run } */
+/* { dg-do run { target { *-*-linux* } } } */
/* { dg-options "-static-libasan" } */
#include <time.h>
diff --git a/gcc/testsuite/c-c++-common/gomp/pr59467.c b/gcc/testsuite/c-c++-common/gomp/pr59467.c
new file mode 100644
index 00000000000..475182a6236
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/pr59467.c
@@ -0,0 +1,68 @@
+/* PR libgomp/59467 */
+
+int v;
+
+void
+foo (void)
+{
+ int x = 0, y = 0;
+ #pragma omp parallel
+ {
+ int z;
+ #pragma omp single copyprivate (x) /* { dg-error "is not threadprivate or private in outer context" } */
+ {
+ #pragma omp atomic write
+ x = 6;
+ }
+ #pragma omp atomic read
+ z = x;
+ #pragma omp atomic
+ y += z;
+ }
+ #pragma omp parallel
+ {
+ int z;
+ #pragma omp single copyprivate (v) /* { dg-error "is not threadprivate or private in outer context" } */
+ {
+ #pragma omp atomic write
+ v = 6;
+ }
+ #pragma omp atomic read
+ z = v;
+ #pragma omp atomic
+ y += z;
+ }
+ #pragma omp parallel private (x)
+ {
+ int z;
+ #pragma omp single copyprivate (x)
+ {
+ #pragma omp atomic write
+ x = 6;
+ }
+ #pragma omp atomic read
+ z = x;
+ #pragma omp atomic
+ y += z;
+ }
+ x = 0;
+ #pragma omp parallel reduction (+:x)
+ {
+ #pragma omp single copyprivate (x)
+ {
+ #pragma omp atomic write
+ x = 6;
+ }
+ #pragma omp atomic
+ y += x;
+ }
+ #pragma omp single copyprivate (x)
+ {
+ x = 7;
+ }
+ #pragma omp single copyprivate (v) /* { dg-error "is not threadprivate or private in outer context" } */
+ {
+ #pragma omp atomic write
+ v = 6;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tsan/atomic_stack.c b/gcc/testsuite/c-c++-common/tsan/atomic_stack.c
new file mode 100644
index 00000000000..eac71b8793d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/atomic_stack.c
@@ -0,0 +1,32 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+int Global;
+
+void *Thread1(void *x) {
+ sleep(1);
+ __atomic_fetch_add(&Global, 1, __ATOMIC_RELAXED);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global++;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+/* { dg-output " Atomic write of size 4.*" } */
+/* { dg-output " #0 __tsan_atomic32_fetch_add.*" } */
+/* { dg-output " #1 Thread1.*" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/fd_pipe_race.c b/gcc/testsuite/c-c++-common/tsan/fd_pipe_race.c
new file mode 100644
index 00000000000..fc76cbf5ffd
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/fd_pipe_race.c
@@ -0,0 +1,37 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int fds[2];
+
+void *Thread1(void *x) {
+ write(fds[1], "a", 1);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ sleep(1);
+ close(fds[0]);
+ close(fds[1]);
+ return NULL;
+}
+
+int main() {
+ pipe(fds);
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*\n" } */
+/* { dg-output " Write of size 8.*\n" } */
+/* { dg-output " #0 close.*\n" } */
+/* { dg-output " #1 Thread2.*\n" } */
+/* { dg-output " Previous read of size 8.*\n" } */
+/* { dg-output " #0 write.*\n" } */
+/* { dg-output " #1 Thread1.*\n" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/free_race.c b/gcc/testsuite/c-c++-common/tsan/free_race.c
new file mode 100644
index 00000000000..362c92bfbeb
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/free_race.c
@@ -0,0 +1,28 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <stdlib.h>
+
+void __attribute__((noinline)) foo(int *mem) {
+ free(mem);
+}
+
+void __attribute__((noinline)) bar(int *mem) {
+ mem[0] = 42;
+}
+
+int main() {
+ int *mem =(int*)malloc (100);
+ foo(mem);
+ bar(mem);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: heap-use-after-free.*(\n|\r\n|\r)" } */
+/* { dg-output " Write of size 4 at.* by main thread:(\n|\r\n|\r)" } */
+/* { dg-output " #0 bar.*(\n|\r\n|\r)" } */
+/* { dg-output " #1 main.*(\n|\r\n|\r)" } */
+/* { dg-output " Previous write of size 8 at.* by main thread:(\n|\r\n|\r)" } */
+/* { dg-output " #0 free.*(\n|\r\n|\r)" } */
+/* { dg-output " #\(1|2\) foo.*(\n|\r\n|\r)" } */
+/* { dg-output " #\(2|3\) main.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/free_race2.c b/gcc/testsuite/c-c++-common/tsan/free_race2.c
new file mode 100644
index 00000000000..3c15d2d20f5
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/free_race2.c
@@ -0,0 +1,29 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <stdlib.h>
+
+void __attribute__((noinline)) foo(int *mem) {
+ free(mem);
+}
+
+void __attribute__((noinline)) bar(int *mem) {
+ mem[0] = 42;
+}
+
+int main() {
+ int *mem = (int*)malloc(100);
+ foo(mem);
+ bar(mem);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: heap-use-after-free.*(\n|\r\n|\r)" } */
+/* { dg-output " Write of size 4.* by main thread:(\n|\r\n|\r)" } */
+/* { dg-output " #0 bar.*" } */
+/* { dg-output " #1 main .*" } */
+/* { dg-output " Previous write of size 8 at .* by main thread:(\n|\r\n|\r)" } */
+/* { dg-output " #0 free .*" } */
+/* { dg-output " #\(1|2\) foo.*(\n|\r\n|\r)" } */
+/* { dg-output " #\(2|3\) main .*" } */
+
diff --git a/gcc/testsuite/c-c++-common/tsan/mutexset1.c b/gcc/testsuite/c-c++-common/tsan/mutexset1.c
new file mode 100644
index 00000000000..783f262d5ed
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/mutexset1.c
@@ -0,0 +1,41 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+pthread_mutex_t mtx;
+
+void *Thread1(void *x) {
+ sleep(1);
+ pthread_mutex_lock(&mtx);
+ Global++;
+ pthread_mutex_unlock(&mtx);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global--;
+ return NULL;/* { dg-output ".*" } */
+
+}
+
+int main() {
+ pthread_mutex_init(&mtx, 0);
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ pthread_mutex_destroy(&mtx);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+/* { dg-output " Read of size 4 at 0x\[0-9a-f\]+ by thread T1 \\(mutexes: write M\[0-9\]\\):.*" } */
+/* { dg-output " Previous write of size 4 at 0x\[0-9a-f\]+ by thread T2:.*" } */
+/* { dg-output " Mutex M\[0-9\] created at:.*" } */
+/* { dg-output " #0 pthread_mutex_init.*" } */
+/* { dg-output " #1 main (.*mutexset1.c|\\?{2}):\[0-9]+.*" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/race_on_barrier.c b/gcc/testsuite/c-c++-common/tsan/race_on_barrier.c
new file mode 100644
index 00000000000..407c7129272
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/race_on_barrier.c
@@ -0,0 +1,33 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+pthread_barrier_t B;
+int Global;
+
+void *Thread1(void *x) {
+ pthread_barrier_init(&B, 0, 2);
+ pthread_barrier_wait(&B);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ sleep(1);
+ pthread_barrier_wait(&B);
+ return NULL;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ Thread2(0);
+ pthread_join(t, NULL);
+ pthread_barrier_destroy(&B);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/race_on_barrier2.c b/gcc/testsuite/c-c++-common/tsan/race_on_barrier2.c
new file mode 100644
index 00000000000..9576c672e1b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/race_on_barrier2.c
@@ -0,0 +1,33 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+pthread_barrier_t B;
+int Global;
+
+void *Thread1(void *x) {
+ if (pthread_barrier_wait(&B) == PTHREAD_BARRIER_SERIAL_THREAD)
+ pthread_barrier_destroy(&B);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ if (pthread_barrier_wait(&B) == PTHREAD_BARRIER_SERIAL_THREAD)
+ pthread_barrier_destroy(&B);
+ return NULL;
+}
+
+int main() {
+ pthread_barrier_init(&B, 0, 2);
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ Thread2(0);
+ pthread_join(t, NULL);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/race_on_mutex.c b/gcc/testsuite/c-c++-common/tsan/race_on_mutex.c
new file mode 100644
index 00000000000..f112d097de4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/race_on_mutex.c
@@ -0,0 +1,44 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+pthread_mutex_t Mtx;
+int Global;
+
+void *Thread1(void *x) {
+ pthread_mutex_init(&Mtx, 0);
+ pthread_mutex_lock(&Mtx);
+ Global = 42;
+ pthread_mutex_unlock(&Mtx);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ sleep(1);
+ pthread_mutex_lock(&Mtx);
+ Global = 43;
+ pthread_mutex_unlock(&Mtx);
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ pthread_mutex_destroy(&Mtx);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+/* { dg-output " Atomic read of size 1 at .* by thread T2:(\n|\r\n|\r)" } */
+/* { dg-output " #0 pthread_mutex_lock.*" } */
+/* { dg-output " #1 Thread2.* .*(race_on_mutex.c:22|\\?{2}:0) (.*)" } */
+/* { dg-output " Previous write of size 1 at .* by thread T1:(\n|\r\n|\r)" } */
+/* { dg-output " #0 pthread_mutex_init .* (.)*" } */
+/* { dg-output " #1 Thread1.* .*(race_on_mutex.c:13|\\?{2}:0) .*" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/race_on_mutex2.c b/gcc/testsuite/c-c++-common/tsan/race_on_mutex2.c
new file mode 100644
index 00000000000..d8a69801ed0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/race_on_mutex2.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+void *Thread(void *x) {
+ pthread_mutex_lock((pthread_mutex_t*)x);
+ pthread_mutex_unlock((pthread_mutex_t*)x);
+ return 0;
+}
+
+int main() {
+ pthread_mutex_t Mtx;
+ pthread_mutex_init(&Mtx, 0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, &Mtx);
+ sleep(1);
+ pthread_mutex_destroy(&Mtx);
+ pthread_join(t, 0);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/simple_race.c b/gcc/testsuite/c-c++-common/tsan/simple_race.c
new file mode 100644
index 00000000000..24b88e8e5ee
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/simple_race.c
@@ -0,0 +1,28 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+
+int Global;
+
+void *Thread1(void *x) {
+ Global = 42;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global = 43;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/simple_stack.c b/gcc/testsuite/c-c++-common/tsan/simple_stack.c
new file mode 100644
index 00000000000..e92d010a59b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/simple_stack.c
@@ -0,0 +1,66 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+
+void __attribute__((noinline)) foo1() {
+ Global = 42;
+}
+
+void __attribute__((noinline)) bar1() {
+ volatile int tmp = 42; (void)tmp;
+ foo1();
+}
+
+void __attribute__((noinline)) foo2() {
+ volatile int v = Global; (void)v;
+}
+
+void __attribute__((noinline)) bar2() {
+ volatile int tmp = 42; (void)tmp;
+ foo2();
+}
+
+void *Thread1(void *x) {
+ sleep(1);
+ bar1();
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ bar2();
+ return NULL;
+}
+
+void StartThread(pthread_t *t, void *(*f)(void*)) {
+ pthread_create(t, NULL, f, NULL);
+}
+
+int main() {
+ pthread_t t[2];
+ StartThread(&t[0], Thread1);
+ StartThread(&t[1], Thread2);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*" } */
+/* { dg-output " Write of size 4 at .* by thread T1:(\n|\r\n|\r)" } */
+/* { dg-output " #0 foo1.* .*(simple_stack.c:11|\\?{2}:0) (.*)" } */
+/* { dg-output " #1 bar1.* .*(simple_stack.c:16|\\?{2}:0) (.*)" } */
+/* { dg-output " #2 Thread1.* .*(simple_stack.c:30|\\?{2}:0) (.*)" } */
+/* { dg-output " Previous read of size 4 at .* by thread T2:(\n|\r\n|\r)" } */
+/* { dg-output " #0 foo2.* .*(simple_stack.c:20|\\?{2}:0) (.*)" } */
+/* { dg-output " #1 bar2.* .*(simple_stack.c:25|\\?{2}:0) (.*)" } */
+/* { dg-output " #2 Thread2.* .*(simple_stack.c:35|\\?{2}:0) (.*)" } */
+/* { dg-output " Thread T1 \\(tid=.*, running\\) created by main thread at:(\n|\r\n|\r)" } */
+/* { dg-output " #0 pthread_create .* (.*)" } */
+/* { dg-output " #1 StartThread.* .*(simple_stack.c:40|\\?{2}:0) (.*)" } */
+/* { dg-output " Thread T2 (.*) created by main thread at:(\n|\r\n|\r)" } */
+/* { dg-output " #0 pthread_create .* (.*)" } */
+/* { dg-output " #1 StartThread.* .*(simple_stack.c:40|\\?{2}:0) (.*)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/sleep_sync.c b/gcc/testsuite/c-c++-common/tsan/sleep_sync.c
new file mode 100644
index 00000000000..8203d54247e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/sleep_sync.c
@@ -0,0 +1,31 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+int X = 0;
+
+void MySleep() {
+ sleep(1);
+}
+
+void *Thread(void *p) {
+ MySleep(); // Assume the main thread has done the write.
+ X = 42;
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ X = 43;
+ pthread_join(t, 0);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r).*} */
+/* { dg-output " As if synchronized via sleep:(\n|\r\n|\r)} */
+/* { dg-output " #0 sleep.*"*} */
+/* { dg-output " #1 MySleep.*"*} */
+/* { dg-output " #2 Thread.*"*} */
diff --git a/gcc/testsuite/c-c++-common/tsan/thread_leak.c b/gcc/testsuite/c-c++-common/tsan/thread_leak.c
new file mode 100644
index 00000000000..416ef776006
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/thread_leak.c
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+
+#include <pthread.h>
+#include <stdio.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ pthread_join(t, 0);
+ printf("PASS\n");
+ return 0;
+}
+
+/* { dg-prune-output "WARNING: ThreadSanitizer: thread leak.*" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/thread_leak1.c b/gcc/testsuite/c-c++-common/tsan/thread_leak1.c
new file mode 100644
index 00000000000..18bcf2aa68f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/thread_leak1.c
@@ -0,0 +1,19 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: thread leak.*(\n|\r\n|\r)" } */
+/* { dg-output "SUMMARY: ThreadSanitizer: thread leak.*main.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/thread_leak2.c b/gcc/testsuite/c-c++-common/tsan/thread_leak2.c
new file mode 100644
index 00000000000..d6f4e220025
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/thread_leak2.c
@@ -0,0 +1,22 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ int i;
+ for (i = 0; i < 5; i++) {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ }
+ sleep(1);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: thread leak.*(\n|\r\n|\r)" } */
+/* { dg-output " And 4 more similar thread leaks.*" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/tiny_race.c b/gcc/testsuite/c-c++-common/tsan/tiny_race.c
new file mode 100644
index 00000000000..03561832d05
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/tiny_race.c
@@ -0,0 +1,23 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+int Global;
+
+void *Thread1(void *x) {
+ sleep(1);
+ Global = 42;
+ return x;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread1, 0);
+ Global = 43;
+ pthread_join(t, 0);
+ return Global;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/tls_race.c b/gcc/testsuite/c-c++-common/tsan/tls_race.c
new file mode 100644
index 00000000000..041e9af9884
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/tls_race.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stddef.h>
+
+void *Thread(void *a) {
+ *(int*)a = 43;
+ return 0;
+}
+
+int main() {
+ static __thread int Var = 42;
+ pthread_t t;
+ pthread_create(&t, 0, Thread, &Var);
+ Var = 43;
+ pthread_join(t, 0);
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r).*" } */
+/* { dg-output " Location is TLS of main thread.(\n|\r\n|\r).*" } */
diff --git a/gcc/testsuite/c-c++-common/tsan/write_in_reader_lock.c b/gcc/testsuite/c-c++-common/tsan/write_in_reader_lock.c
new file mode 100644
index 00000000000..c6a0beee23f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tsan/write_in_reader_lock.c
@@ -0,0 +1,37 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+pthread_rwlock_t rwlock;
+int GLOB;
+
+void *Thread1(void *p) {
+ (void)p;
+ pthread_rwlock_rdlock(&rwlock);
+ // Write under reader lock.
+ sleep(1);
+ GLOB++;
+ pthread_rwlock_unlock(&rwlock);
+ return 0;
+}
+
+int main(int argc, char *argv[]) {
+ pthread_rwlock_init(&rwlock, NULL);
+ pthread_rwlock_rdlock(&rwlock);
+ pthread_t t;
+ pthread_create(&t, 0, Thread1, 0);
+ volatile int x = GLOB;
+ (void)x;
+ pthread_rwlock_unlock(&rwlock);
+ pthread_join(t, 0);
+ pthread_rwlock_destroy(&rwlock);
+ return 0;
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+/* { dg-output " Write of size 4 at 0x\[0-9a-f\]+ by thread T1.*:(\n|\r\n|\r).*" } */
+/* { dg-output " #0 Thread1.*\(write_in_reader_lock.c|\\?{2}\):\[0-9\]+ .*" } */
+/* { dg-output " Previous read of size 4 at.* by main thread.*:(\n|\r\n|\r).*" } */
+/* { dg-output " #0 main.*\(write_in_reader_lock.c|\\?{2}\):\[0-9\]+.*" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c
new file mode 100644
index 00000000000..436082d21d9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+void __attribute__((noinline,noclone))
+check (int i, int j)
+{
+ if (i != j)
+ __builtin_abort ();
+}
+
+int
+main (void)
+{
+#if __INT_MAX__ == 2147483647
+ /* Here, nothing should fail. */
+ volatile int j = INT_MAX;
+ volatile int i = -1;
+ volatile int k = j + i;
+ check (k, 2147483646);
+ k = i + j;
+ check (k, 2147483646);
+ j--;
+ check (j, 2147483646);
+
+ i = 1;
+ j = INT_MIN;
+ k = i + j;
+ check (k, -2147483647);
+ k = j + i;
+ check (k, -2147483647);
+ j++;
+ check (j, -2147483647);
+#endif
+
+ /* Test integer promotion. */
+#if __SCHAR_MAX__ == 127
+ volatile signed char a = SCHAR_MAX;
+ volatile signed char b = 1;
+ volatile signed char c = a + b;
+ check (c, -128);
+ a++;
+ check (a, -128);
+#endif
+
+#if __SHRT_MAX__ == 32767
+ volatile short d = SHRT_MAX;
+ volatile short e = 1;
+ volatile short f = d + e;
+ check (f, -32768);
+ d++;
+ check (d, -32768);
+#endif
+
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c
new file mode 100644
index 00000000000..de2cd2d0f88
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+#define LONG_MAX __LONG_MAX__
+#define LONG_MIN (-__LONG_MAX__ - 1L)
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__ - 1L)
+
+int
+main (void)
+{
+ volatile int j = INT_MAX;
+ volatile int i = 1;
+ volatile int k = j + i;
+ k = i + j;
+ j++;
+ j = INT_MAX - 100;
+ j += (1 << 10);
+
+ j = INT_MIN;
+ i = -1;
+ k = i + j;
+ k = j + i;
+ j = INT_MIN + 100;
+ j += -(1 << 10);
+
+ volatile long int m = LONG_MAX;
+ volatile long int n = 1;
+ volatile long int o = m + n;
+ o = n + m;
+ m++;
+ m = LONG_MAX - 100;
+ m += (1 << 10);
+
+ m = LONG_MIN;
+ n = -1;
+ o = m + n;
+ o = n + m;
+ m = LONG_MIN + 100;
+ m += -(1 << 10);
+
+ return 0;
+}
+
+/* { dg-output "signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 1 \\+ 2147483647 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483547 \\+ 1024 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ -2147483648 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483548 \\+ -1024 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 1 \\+ \[^\n\r]* cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1024 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ -\[^\n\r]* cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1024 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c
new file mode 100644
index 00000000000..0f2ea59df49
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c
@@ -0,0 +1,47 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+void __attribute__((noinline,noclone))
+check (int i, int j)
+{
+ if (i != j)
+ __builtin_abort ();
+}
+
+int
+main (void)
+{
+ /* Test integer promotion. */
+#if __SCHAR_MAX__ == 127
+ volatile signed char a = -2;
+ volatile signed char b = SCHAR_MAX;
+ volatile signed char c = a * b;
+ check (c, 2);
+#endif
+
+#if __SHRT_MAX__ == 32767
+ volatile short d = SHRT_MAX;
+ volatile short e = 2;
+ volatile short f = d * e;
+ check (f, -2);
+#endif
+
+#if __INT_MAX__ == 2147483647
+ volatile int m = INT_MAX;
+ volatile int n = 1;
+ volatile int o = m * n;
+ check (o, INT_MAX);
+
+ m = INT_MIN;
+ o = m * n;
+ check (o, INT_MIN);
+#endif
+
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c
new file mode 100644
index 00000000000..adcbfe1a761
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c
@@ -0,0 +1,27 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define INT_MAX __INT_MAX__
+#define LONG_MAX __LONG_MAX__
+
+int
+main (void)
+{
+ volatile int j = INT_MAX;
+ volatile int i = 2;
+ volatile int k = j * i;
+ k = i * j;
+
+ volatile long int m = LONG_MAX;
+ volatile long int n = 2;
+ volatile long int o = m * n;
+ o = n * m;
+
+ return 0;
+}
+
+/* { dg-output "signed integer overflow: 2147483647 \\* 2 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2 \\* 2147483647 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\* 2 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2 \\* \[^\n\r]* cannot be represented in type 'long int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c
new file mode 100644
index 00000000000..9baada41007
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c
@@ -0,0 +1,14 @@
+/* { dg-do run { target int128 } } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define INT_MIN (-__INT_MAX__ - 1)
+
+int
+main (void)
+{
+ int j = INT_MIN;
+ return -j;
+}
+
+/* { dg-output "negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c
new file mode 100644
index 00000000000..3b955279ba7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c
@@ -0,0 +1,63 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define SCHAR_MAX __SCHAR_MAX__
+#define SCHAR_MIN (-__SCHAR_MAX__ - 1)
+#define SHRT_MAX __SHRT_MAX__
+#define SHRT_MIN (-__SHRT_MAX__ - 1)
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+void __attribute__((noinline,noclone))
+check (int i, int j)
+{
+ if (i != j)
+ __builtin_abort ();
+}
+
+int
+main (void)
+{
+#if __INT_MAX__ == 2147483647
+ /* Here, nothing should fail. */
+ volatile int i = -1;
+ volatile int j = INT_MIN;
+ volatile int k = j - i;
+ check (k, -2147483647);
+ k = i - j;
+ check (k, 2147483647);
+ j++;
+ check (j, -2147483647);
+
+ i = 1;
+ j = INT_MAX;
+ k = i - j;
+ check (k, -2147483646);
+ k = j - i;
+ check (k, 2147483646);
+ j--;
+ check (k, 2147483646);
+#endif
+
+ /* Test integer promotion. */
+#if __SCHAR_MAX__ == 127
+ volatile signed char a = SCHAR_MIN;
+ volatile signed char b = 1;
+ volatile signed char c = a - b;
+ check (c, 127);
+ a--;
+ check (a, 127);
+#endif
+
+#if __SHRT_MAX__ == 32767
+ volatile short d = SHRT_MIN;
+ volatile short e = 1;
+ volatile short f = d - e;
+ check (f, 32767);
+ d--;
+ check (d, 32767);
+#endif
+
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c
new file mode 100644
index 00000000000..e06e3f6e891
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c
@@ -0,0 +1,55 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+#define LONG_MAX __LONG_MAX__
+#define LONG_MIN (-__LONG_MAX__ - 1L)
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__ - 1L)
+
+int
+main (void)
+{
+ volatile int j = INT_MIN;
+ volatile int i = 1;
+ volatile int k = j - i;
+ j--;
+ j = INT_MIN + 100;
+ j -= (1 << 10);
+
+ j = INT_MIN;
+ i = -1;
+ k = j - -i;
+
+ i = INT_MIN + 1000;
+ i -= (1 << 20);
+
+ volatile long int l = LONG_MIN;
+ volatile long int m = 1;
+ volatile long int n = l - m;
+ l--;
+ l = LONG_MIN + 100;
+ l -= (1 << 10);
+
+ l = LONG_MIN;
+ m = -1;
+ n = l - -m;
+
+ m = LONG_MIN + 1000;
+ m -= (1 << 20);
+
+ return 0;
+}
+
+/* { dg-output "signed integer overflow: -2147483648 - 1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483548 \\+ -1024 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147482648 \\+ -1048576 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1024 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1048576 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr59333.c b/gcc/testsuite/c-c++-common/ubsan/pr59333.c
new file mode 100644
index 00000000000..af539204960
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr59333.c
@@ -0,0 +1,19 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+long long int __attribute__ ((noinline, noclone))
+foo (long long int i, long long int j)
+{
+ asm ("");
+ return i + j;
+}
+
+int
+main (void)
+{
+ foo (2LL, __LONG_LONG_MAX__);
+ return 0;
+}
+
+/* { dg-output "signed integer overflow: 2 \\+ 9223372036854775807 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr59397.c b/gcc/testsuite/c-c++-common/ubsan/pr59397.c
new file mode 100644
index 00000000000..0de02583519
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr59397.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=signed-integer-overflow" } */
+
+typedef enum E { A = -1 } e;
+int
+foo (void)
+{
+ e e = A;
+ return e + 1;
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/catch_exc.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/catch_exc.cc
new file mode 100644
index 00000000000..0633d19030c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/catch_exc.cc
@@ -0,0 +1,67 @@
+/* { dg-options "-fcilkplus" } */
+/* { dg-do run { target i?86-*-* x86_64-*-* arm*-*-* } } */
+/* { dg-options "-fcilkplus -lcilkrts" { target { i?86-*-* x86_64-*-* arm*-*-* } } } */
+
+#include <assert.h>
+#include <unistd.h>
+#if HAVE_IO
+#include <cstdio>
+#include <cilk/cilk_api.h>
+#endif
+#include <cstdlib>
+
+
+void func(int volatile* steal_me)
+{
+ while (! (*steal_me))
+ {
+ usleep(2000);
+ }
+#if HAVE_IO
+ printf("Foo executing on %d\n", __cilkrts_get_worker_number());
+#endif
+ throw 5;
+}
+
+void my_test()
+{
+ volatile int steal_me = 0;
+
+ try
+ {
+ _Cilk_spawn func(&steal_me);
+#if HAVE_IO
+ printf("Continuation executing on %d\n",
+ __cilkrts_get_worker_number());
+#endif
+ steal_me = 1;
+ _Cilk_sync;
+ goto bad;
+ }
+
+ catch (int x)
+ {
+#if HAVE_IO
+ printf("We caught x = %d\n", x);
+#endif
+ assert(x == 5);
+ }
+ if (0)
+ {
+ bad:
+#if HAVE_IO
+ printf("We should not be here!\n");
+#endif
+ __builtin_abort ();
+ }
+}
+
+
+int main()
+{
+ my_test();
+#if HAVE_IO
+ printf("PASSED\n");
+#endif
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/const_spawn.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/const_spawn.cc
new file mode 100644
index 00000000000..1ea473f1d57
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/const_spawn.cc
@@ -0,0 +1,78 @@
+/* { dg-options "-fcilkplus" } */
+/* { dg-do run { target i?86-*-* x86_64-*-* arm*-*-* } } */
+/* { dg-options "-fcilkplus -lcilkrts" { target { i?86-*-* x86_64-*-* arm*-*-* } } } */
+
+class Rectangle
+{
+ int area_val, h, w;
+ public:
+ Rectangle (int, int);
+ Rectangle (int, int, int);
+ ~Rectangle ();
+ int area ();
+};
+Rectangle::~Rectangle ()
+{
+ h = 0;
+ w = 0;
+ area_val = 0;
+}
+Rectangle::Rectangle (int height, int width)
+{
+ h = height;
+ w = width;
+ area_val = 0;
+}
+
+Rectangle::Rectangle (int height, int width, int area_orig)
+{
+ h = height;
+ w = width;
+ area_val = area_orig;
+}
+
+int Rectangle::area()
+{
+ return (area_val += (h*w));
+}
+
+/* Spawning constructor. */
+int main1 (void)
+{
+ Rectangle r = _Cilk_spawn Rectangle (4, 3);
+ return r.area();
+}
+
+/* Spawning constructor 2. */
+int main2 (void)
+{
+ Rectangle r (_Cilk_spawn Rectangle (4, 3));
+ return r.area();
+}
+
+/* Spawning copy constructor. */
+int main3 (void)
+{
+ Rectangle r = _Cilk_spawn Rectangle (4, 3, 2);
+ return r.area ();
+}
+
+/* Spawning copy constructor 2. */
+int main4 (void)
+{
+ Rectangle r ( _Cilk_spawn Rectangle (4, 3, 2));
+ return r.area();
+}
+
+int main (void)
+{
+ if (main1 () != 12)
+ __builtin_abort ();
+ if (main2 () != 12)
+ __builtin_abort ();
+ if (main3 () != 14)
+ __builtin_abort ();
+ if (main4() != 14)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/fib-opr-overload.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/fib-opr-overload.cc
new file mode 100644
index 00000000000..6af4a367b7d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/fib-opr-overload.cc
@@ -0,0 +1,94 @@
+/* { dg-options "-fcilkplus" } */
+/* { dg-do run { target i?86-*-* x86_64-*-* arm*-*-* } } */
+/* { dg-options "-fcilkplus -lcilkrts" { target { i?86-*-* x86_64-*-* arm*-*-* } } } */
+
+#if HAVE_IO
+#include <iostream>
+#endif
+
+class Some_Struct
+{
+ int calculated_value;
+ short some_unused_value;
+public:
+ Some_Struct () {
+ this->calculated_value = 0;
+ }
+ Some_Struct (int value) {
+ this->calculated_value = value;
+ }
+ Some_Struct operator=(Some_Struct f) {
+ this->calculated_value = f.calculated_value;
+ return *this;
+ }
+ bool operator!=(Some_Struct f) {
+ return (this->calculated_value != f.calculated_value);
+ }
+ Some_Struct operator+(Some_Struct &f) {
+ Some_Struct z;
+ z.calculated_value = this->calculated_value + f.calculated_value;
+ return z;
+ }
+ Some_Struct operator-(int x) {
+ Some_Struct z;
+ z.calculated_value = this->calculated_value - x;
+ return z;
+ }
+ bool operator<(int x) {
+ return (this->calculated_value < x);
+ }
+ int get_calculated_value () {
+ return this->calculated_value;
+ }
+};
+
+
+template <class T>
+T fibonacci_serial (T f)
+{
+ if (f < 2)
+ return f;
+ T a = fibonacci_serial (f-1);
+ T b = fibonacci_serial (f-2);
+ return (a+b);
+}
+
+template <class T>
+T fibonacci (T f)
+{
+ if (f < 2)
+ return f;
+ T a = _Cilk_spawn fibonacci (f-1);
+ T b = fibonacci (f-2);
+ _Cilk_sync;
+ return (a+b);
+}
+
+int main (void)
+{
+ Some_Struct f (40), f_serial(40);
+ f = fibonacci (f);
+ f_serial = fibonacci_serial (f_serial);
+
+ if (f != f_serial)
+ __builtin_abort ();
+
+ int t = 40, t_serial = 40;
+ t = fibonacci (t);
+ t_serial = fibonacci_serial (t_serial);
+ if (t != t_serial)
+ __builtin_abort ();
+
+ short s = 20, s_serial = 20;
+ s = fibonacci (s);
+ s_serial = fibonacci_serial (s_serial);
+ if (s != s_serial)
+ __builtin_abort ();
+
+#if HAVE_IO
+ std::cout << "Fib_Parallel (40) = " << f.get_calculated_value() << std::endl;
+ std::cout << "Fib_Serial (40) = " << f_serial.get_calculated_value()
+ << std::endl;
+#endif
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/fib-tplt.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/fib-tplt.cc
new file mode 100644
index 00000000000..dbc2da881a9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/fib-tplt.cc
@@ -0,0 +1,53 @@
+/* { dg-options "-fcilkplus" } */
+/* { dg-do run { target i?86-*-* x86_64-*-* arm*-*-* } } */
+/* { dg-options "-fcilkplus -lcilkrts" { target { i?86-*-* x86_64-*-* arm*-*-*-* } } } */
+
+struct fib_struct
+{
+ int x;
+ int *y;
+ int z[3];
+ struct fib_struct *ptr_next;
+ struct fib_struct operator+(struct fib_struct &other) {
+ struct fib_struct z ;
+ z.x = (*this).x + (other.x);
+ return z;
+ }
+ struct fib_struct operator-(int other) {
+ struct fib_struct z ;
+ z.x = this->x - other;
+ return z;
+ }
+ bool operator<(int number) {
+ return (this->x < number);
+ }
+
+};
+
+template <typename T>
+T fib (T z) {
+ if (z < 2) return z;
+ T a = _Cilk_spawn fib<T>(z - 1);
+ T b = fib<T>(z - 2);
+ T c = a + b;
+ return (a+b);
+}
+
+
+int sfib(int x)
+{
+ if (x < 2) return x;
+ int a = sfib(x-1);
+ int b = sfib(x-2);
+ return (a+b);
+}
+
+int main () {
+ int z = 30;
+ int parallel_fib = fib<int>(z);
+ int serial_fib = sfib(z);
+ if (serial_fib != parallel_fib)
+ __builtin_abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns.cc
new file mode 100644
index 00000000000..7448d1a8a30
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns.cc
@@ -0,0 +1,236 @@
+/* { dg-options "-fcilkplus" } */
+/* { dg-do run { target i?86-*-* x86_64-*-* arm*-*-* } } */
+/* { dg-options "-std=c++11 -fcilkplus -lcilkrts" { target { i?86-*-* x86_64-*-* arm*-*-* } } } */
+#define FIRST_NUMBER 5
+#define SECOND_NUMBER 3
+#define HAVE_IO 0
+#if HAVE_IO
+#include <stdio.h>
+#endif
+
+#include <stdlib.h>
+
+int global_var;
+
+void foo1(int *array, int size)
+{
+#if HAVE_IO
+ for (int ii = 0; ii < size; ii++)
+ printf("%2d\t", array[ii]);
+ printf("\n");
+ fflush (stdout);
+#else
+ if (size != 2)
+ __builtin_abort ();
+ if (array[0] != FIRST_NUMBER)
+ __builtin_abort ();
+ if (array[1] != SECOND_NUMBER)
+ __builtin_abort ();
+#endif
+ global_var++;
+}
+void foo1_c(const int *array, int size)
+{
+#if HAVE_IO
+ for (int ii = 0; ii < size; ii++)
+ printf("%2d\t", array[ii]);
+ printf("\n");
+ fflush (stdout);
+#else
+ if (size != 2)
+ __builtin_abort ();
+ if (array[0] != FIRST_NUMBER)
+ __builtin_abort ();
+ if (array[1] != SECOND_NUMBER)
+ __builtin_abort ();
+#endif
+ global_var++;
+}
+
+
+int main2 (int argc) {
+ int A[2] = {FIRST_NUMBER, SECOND_NUMBER};
+ int B[2] = {FIRST_NUMBER, SECOND_NUMBER};
+ int main_size = argc+1; /* We know argc is 1, and so 1+1 = 2. */
+ int q = 0;
+
+ global_var = 0;
+ auto func0 = [=](){ foo1_c(A, 2); };
+ _Cilk_spawn func0();
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func1 = [=](int *Aa){ foo1(Aa, 2); };
+ _Cilk_spawn func1 (A);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func2 = [=](int *Aa, int size){ foo1(Aa, size); };
+ _Cilk_spawn func2 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func3 = [=](int *Aa, int size){ int new_size = (size % 2 + 2);
+ foo1(Aa, size); };
+ _Cilk_spawn func3 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func4 = [](int *Aa){ foo1(Aa, 2); };
+ _Cilk_spawn func4 (A);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func5 = [](int *Aa, int size){ foo1(Aa, size); };
+ _Cilk_spawn func5 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func6 = [&](int *Aa){ foo1(Aa, 2); };
+ _Cilk_spawn func6 (A);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func7 = [&](int *Aa, int size){ foo1(Aa, size); };
+ _Cilk_spawn func7 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func8 = [&](){ foo1(A, 2); };
+ _Cilk_spawn func8 ();
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ /* We ignore the first param here and pass in A from the outer fn. */
+ auto func9 = [&](int *Aa, int size){ foo1(A, size); };
+ _Cilk_spawn func9 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func10 = [=](){ foo1_c(A, main_size); };
+ _Cilk_spawn func10 ();
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ auto func11 = [&](){ foo1(A, main_size); };
+ _Cilk_spawn func11 ();
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ /* We ignore the first & second param here and pass in A from the
+ outer fn. */
+ auto func12 = [&](int *Aa, int size){ foo1(A, main_size); };
+ _Cilk_spawn func12 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [&](int *Aa){ foo1(Aa, 2); }(A);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [&](int *Aa, int size){ foo1(Aa, size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [=](int *Aa){ foo1(Aa, 2); }(A);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [=](int *Aa, int size){ foo1(Aa, size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ /* We ignore the first param here. */
+ _Cilk_spawn [=](int *Aa, int size){ foo1_c(A, size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ /* We ignore the first and second param here. */
+ _Cilk_spawn [=](int *Aa, int size){ foo1_c(A, size); }(B, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [&](){ foo1(A, 2); }();
+ [&](){ foo1(A, 2); }();
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [=](){ foo1_c (A, main_size); }();
+ foo1 (A, 2);
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ global_var = 0;
+ _Cilk_spawn [&](){ foo1(A, main_size); }();
+ [&](){ foo1(A, 2); }();
+ _Cilk_sync;
+ if (global_var != 2)
+ return (++q);
+
+ return q;
+}
+
+int main (void)
+{
+ return main2 (1);
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns_tplt.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns_tplt.cc
new file mode 100644
index 00000000000..2667f5a9992
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/lambda_spawns_tplt.cc
@@ -0,0 +1,173 @@
+/* { dg-options "-fcilkplus" } */
+/* { dg-do run { target i?86-*-* x86_64-*-* arm*-*-* } } */
+/* { dg-options "-std=c++11 -fcilkplus -lcilkrts" { target { i?86-*-* x86_64-*-* arm*-*-* } } } */
+
+#define FIRST_NUMBER 5
+#define SECOND_NUMBER 3
+#define HAVE_IO 0
+#if HAVE_IO
+#include <stdio.h>
+#endif
+
+#include <stdlib.h>
+
+template <class T>
+void foo1(T *array, int size)
+{
+#if HAVE_IO
+ for (int ii = 0; ii < size; ii++)
+ printf("%2d\t", (int)array[ii]);
+ printf("\n");
+ fflush (stdout);
+#else
+ if (size != 2)
+ __builtin_abort ();
+ if (array[0] != FIRST_NUMBER)
+ __builtin_abort ();
+ if (array[1] != SECOND_NUMBER)
+ __builtin_abort ();
+#endif
+}
+template <class T>
+void foo1_c(const T *array, int size)
+{
+#if HAVE_IO
+ for (int ii = 0; ii < size; ii++)
+ printf("%2d\t", (int)array[ii]);
+ printf("\n");
+ fflush (stdout);
+#else
+ if (size != 2)
+ __builtin_abort ();
+ if (array[0] != FIRST_NUMBER)
+ __builtin_abort ();
+ if (array[1] != SECOND_NUMBER)
+ __builtin_abort ();
+#endif
+}
+template <class T>
+int main2 (int argc, char **argv) {
+ T A[2] = {FIRST_NUMBER, SECOND_NUMBER};
+ int main_size = argc+1; /* We know argc is 1, and so 1+1 = 2. */
+ auto func0 = [=](){ foo1_c(A, 2); };
+ _Cilk_spawn func0();
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func1 = [=](T *Aa){ foo1(Aa, 2); };
+ _Cilk_spawn func1 (A);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func2 = [=](T *Aa, int size){ foo1(Aa, size); };
+ _Cilk_spawn func2 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func3 = [=](T *Aa, int size){ int new_size = (size % 2 + 2);
+ foo1(Aa, size); };
+ _Cilk_spawn func3 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func4 = [](T *Aa){ foo1(Aa, 2); };
+ _Cilk_spawn func4 (A);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func5 = [](T *Aa, int size){ foo1(Aa, size); };
+ _Cilk_spawn func5 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func6 = [&](T *Aa){ foo1(Aa, 2); };
+ _Cilk_spawn func6 (A);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func7 = [&](T *Aa, int size){ foo1(Aa, size); };
+ _Cilk_spawn func7 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func8 = [&](){ foo1(A, 2); };
+ _Cilk_spawn func8 ();
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ /* We ignore the first param here and pass in A from the outer fn. */
+ auto func9 = [&](T *Aa, int size){ foo1(A, size); };
+ _Cilk_spawn func9 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func10 = [=](){ foo1_c(A, main_size); };
+ _Cilk_spawn func10 ();
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ auto func11 = [&](){ foo1(A, main_size); };
+ _Cilk_spawn func11 ();
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ /* We ignore the first & second param here and pass in A from the
+ outer fn. */
+ auto func12 = [&](T *Aa, int size){ foo1(A, main_size); };
+ _Cilk_spawn func12 (A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ _Cilk_spawn [&](T *Aa){ foo1(Aa, 2); }(A);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ _Cilk_spawn [&](T *Aa, int size){ foo1(Aa, size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ _Cilk_spawn [=](T *Aa){ foo1(Aa, 2); }(A);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ _Cilk_spawn [=](T *Aa, int size){ foo1(Aa, size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ /* We ignore the first param here. */
+ _Cilk_spawn [=](T *Aa, int size){ foo1_c(A, size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ /* We ignore the first and second param here. */
+ _Cilk_spawn [=](T *Aa, int size){ foo1_c(A, main_size); }(A, 2);
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ _Cilk_spawn [&](){ foo1(A, 2); }();
+ [&](){ foo1(A, 2); }();
+ _Cilk_sync;
+
+ _Cilk_spawn [=](){ foo1_c(A, main_size); }();
+ foo1 (A, 2);
+ _Cilk_sync;
+
+ _Cilk_spawn [&](){ foo1(A, main_size); }();
+ [&](){ foo1(A, 2); }();
+ _Cilk_sync;
+
+ return 0;
+}
+
+int main (void)
+{
+ int argc = 1;
+ char **argv = NULL;
+ int x = 1, y = 1, z = 1, q = 1, p = 1;
+ x = main2<char>(argc,argv);
+ y = main2<short>(argc,argv);
+ z = main2<int>(argc,argv);
+ p = main2<long>(argc,argv);
+ q = main2<long long>(argc,argv);
+ return (x+y+z+p+q);
+}
diff --git a/gcc/testsuite/g++.dg/cilk-plus/cilk-plus.exp b/gcc/testsuite/g++.dg/cilk-plus/cilk-plus.exp
index 707d17ec5e4..36c81112585 100644
--- a/gcc/testsuite/g++.dg/cilk-plus/cilk-plus.exp
+++ b/gcc/testsuite/g++.dg/cilk-plus/cilk-plus.exp
@@ -22,6 +22,14 @@ if { ![check_effective_target_cilkplus] } {
return;
}
+verbose "$tool $libdir" 1
+set library_var "[get_multilibs]"
+# Pointing the ld_library_path to the Cilk Runtime library binaries.
+set ld_library_path "$[get_multilibs]/libcilkrts/.libs"
+
+set ALWAYS_CFLAGS ""
+lappend ALWAYS_CFLAGS "-L${library_var}/libcilkrts/.libs"
+
dg-init
# Run the tests that are shared with C.
g++-dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/PS/*.c]] ""
@@ -63,4 +71,23 @@ dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/AN/*.cc]] " -g -O3
dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/AN/*.cc]] " -O3 -ftree-vectorize -fcilkplus -g" " "
dg-finish
+dg-init
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -O1 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -O2 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -O3 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -g -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -g -O2 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/g++.dg/cilk-plus/CK/*.cc]] " -g -O3 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-finish
+
+dg-init
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -O1 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -O2 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -O3 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -g -fcilkplus" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -g -O2 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/cilk-plus/CK/*.c]] " -g -O3 -fcilkplus $ALWAYS_CFLAGS" " "
+dg-finish
unset TEST_EXTRA_LIBS
diff --git a/gcc/testsuite/g++.dg/cpp0x/access02.C b/gcc/testsuite/g++.dg/cpp0x/access02.C
new file mode 100644
index 00000000000..74960a66a61
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/access02.C
@@ -0,0 +1,39 @@
+// PR c++/58954
+// { dg-require-effective-target c++11 }
+
+template<class T>
+T&& declval();
+
+template<class T>
+struct foo_argument
+{
+ template<class Ret, class C, class Arg>
+ static Arg test(Ret (C::*)(Arg));
+
+ typedef decltype(test(&T::template foo<>)) type;
+};
+
+template<class T, class>
+struct dependent { typedef T type; };
+
+template<class T>
+struct base
+{
+ template<class Ignore = void>
+ auto foo(int i) -> decltype(declval<
+ typename dependent<T&, Ignore>::type
+ >().foo_impl(i));
+};
+
+struct derived : base<derived>
+{
+ friend struct base<derived>;
+private:
+ int foo_impl(int i);
+};
+
+int main()
+{
+ foo_argument<derived>::type var = 0;
+ return var;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-46336.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-46336.C
index 4268a6ba43a..c021e980352 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-46336.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-46336.C
@@ -4,7 +4,7 @@
extern "C" {
enum A { };
inline constexpr A
- f(A a, A b) // { dg-error "previous declaration" }
+ f(A a, A b) // { dg-message "previous declaration" }
{ return A(static_cast<int>(a) & static_cast<int>(b)); }
enum B { };
inline constexpr B
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-template6.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-template6.C
new file mode 100644
index 00000000000..eac6004aeb3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-template6.C
@@ -0,0 +1,20 @@
+// PR c++/59268
+// { dg-do compile }
+// { dg-options "-std=c++11" }
+
+template <typename>
+struct A
+{
+ constexpr A (int) {}
+ virtual void foo ()
+ {
+ constexpr A<void> a (0);
+ }
+};
+
+void
+bar ()
+{
+ A<int> a (3);
+ a.foo ();
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/defaulted2.C b/gcc/testsuite/g++.dg/cpp0x/defaulted2.C
index 3cef60050ac..5f05424dc34 100644
--- a/gcc/testsuite/g++.dg/cpp0x/defaulted2.C
+++ b/gcc/testsuite/g++.dg/cpp0x/defaulted2.C
@@ -1,7 +1,7 @@
// Negative test for defaulted/deleted fns.
// { dg-options "-std=c++11" }
-void f(); // { dg-error "previous" }
+void f(); // { dg-message "previous" }
void f() = delete; // { dg-error "deleted" }
struct A
diff --git a/gcc/testsuite/g++.dg/cpp0x/deleted2.C b/gcc/testsuite/g++.dg/cpp0x/deleted2.C
new file mode 100644
index 00000000000..8590c49f3c5
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/deleted2.C
@@ -0,0 +1,9 @@
+// PR c++/52707
+// { dg-do compile { target c++11 } }
+
+struct A {
+ int m;
+ A() = delete;
+};
+
+A a = {1};
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic-sizeof3.C b/gcc/testsuite/g++.dg/cpp0x/variadic-sizeof3.C
new file mode 100644
index 00000000000..7296500a3ad
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic-sizeof3.C
@@ -0,0 +1,15 @@
+// PR c++/59435
+// { dg-require-effective-target c++11 }
+
+template <typename... E>
+struct T
+{
+ T(unsigned int i = sizeof...(E)){} // does not compile
+
+ static constexpr unsigned int U = sizeof...(E);
+ T(unsigned int j, unsigned int i = U){} // compile
+};
+
+template <typename... T>
+void test(int i = sizeof...(T)) // compile
+{}
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn8.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn8.C
index 072f6145a28..15ccfc566cc 100644
--- a/gcc/testsuite/g++.dg/cpp1y/auto-fn8.C
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn8.C
@@ -1,6 +1,6 @@
// { dg-options "-std=c++1y -pedantic-errors" }
-auto f() { return 42; } // { dg-error "old declaration .auto" }
+auto f() { return 42; } // { dg-message "old declaration .auto" }
auto f(); // OK
int f(); // { dg-error "new declaration" }
diff --git a/gcc/testsuite/g++.dg/dg.exp b/gcc/testsuite/g++.dg/dg.exp
index d107dfe7894..c90a7e6d1a5 100644
--- a/gcc/testsuite/g++.dg/dg.exp
+++ b/gcc/testsuite/g++.dg/dg.exp
@@ -54,6 +54,7 @@ set tests [prune $tests $srcdir/$subdir/guality/*]
set tests [prune $tests $srcdir/$subdir/simulate-thread/*]
set tests [prune $tests $srcdir/$subdir/asan/*]
set tests [prune $tests $srcdir/$subdir/ubsan/*]
+set tests [prune $tests $srcdir/$subdir/tsan/*]
# Main loop.
g++-dg-runtest $tests $DEFAULT_CXXFLAGS
diff --git a/gcc/testsuite/g++.dg/gomp/udr-3.C b/gcc/testsuite/g++.dg/gomp/udr-3.C
index a560fc1b537..9fc6f40820a 100644
--- a/gcc/testsuite/g++.dg/gomp/udr-3.C
+++ b/gcc/testsuite/g++.dg/gomp/udr-3.C
@@ -63,12 +63,12 @@ int y = f4 <S> ();
namespace N1
{
- #pragma omp declare reduction (+: ::S: omp_out.s *= omp_in.s) // { dg-error "previous" }
+ #pragma omp declare reduction (+: ::S: omp_out.s *= omp_in.s) // { dg-message "previous" }
#pragma omp declare reduction (+: S: omp_out.s += omp_in.s) // { dg-error "redeclaration of" }
void
f5 ()
{
- #pragma omp declare reduction (f5: S: omp_out.s *= omp_in.s) // { dg-error "previous" }
+ #pragma omp declare reduction (f5: S: omp_out.s *= omp_in.s) // { dg-message "previous" }
#pragma omp declare reduction (f5: ::S: omp_out.s += omp_in.s) // { dg-error "redeclaration of" }
}
}
@@ -84,10 +84,10 @@ namespace N2
namespace N3
{
- #pragma omp declare reduction (+: ::S: omp_out.s *= omp_in.s) // { dg-error "previous" }
+ #pragma omp declare reduction (+: ::S: omp_out.s *= omp_in.s) // { dg-message "previous" }
#pragma omp declare reduction (+: T: omp_out.t += omp_in.t)
#pragma omp declare reduction (+: S: omp_out.s += omp_in.s) // { dg-error "redeclaration of" }
- #pragma omp declare reduction (n3: long: omp_out += omp_in) // { dg-error "previous" }
+ #pragma omp declare reduction (n3: long: omp_out += omp_in) // { dg-message "previous" }
#pragma omp declare reduction (n3: long int: omp_out += omp_in) // { dg-error "redeclaration of" }
#pragma omp declare reduction (n3: short unsigned: omp_out += omp_in)
#pragma omp declare reduction (n3: short int: omp_out += omp_in)
@@ -95,9 +95,9 @@ namespace N3
f6 ()
{
#pragma omp declare reduction (f6: T: omp_out.t += omp_in.t)
- #pragma omp declare reduction (f6: S: omp_out.s *= omp_in.s) // { dg-error "previous" }
+ #pragma omp declare reduction (f6: S: omp_out.s *= omp_in.s) // { dg-message "previous" }
#pragma omp declare reduction (f6: ::S: omp_out.s += omp_in.s) // { dg-error "redeclaration of" }
- #pragma omp declare reduction (f6: long: omp_out += omp_in) // { dg-error "previous" }
+ #pragma omp declare reduction (f6: long: omp_out += omp_in) // { dg-message "previous" }
#pragma omp declare reduction (f6: long int: omp_out += omp_in) // { dg-error "redeclaration of" }
#pragma omp declare reduction (f6: short unsigned: omp_out += omp_in)
#pragma omp declare reduction (f6: short int: omp_out += omp_in)
@@ -124,7 +124,7 @@ namespace N5
int
f7 ()
{
- #pragma omp declare reduction (f7: T: omp_out.s *= omp_in.s) // { dg-error "previous" }
+ #pragma omp declare reduction (f7: T: omp_out.s *= omp_in.s) // { dg-message "previous" }
#pragma omp declare reduction (f7: T: omp_out.s += omp_in.s) // { dg-error "redeclaration of" }
return 0;
}
@@ -145,9 +145,9 @@ namespace N6
f8 ()
{
#pragma omp declare reduction (f8: T: omp_out.t += omp_in.t)
- #pragma omp declare reduction (f8: U: omp_out.s *= omp_in.s) // { dg-error "previous" }
+ #pragma omp declare reduction (f8: U: omp_out.s *= omp_in.s) // { dg-message "previous" }
#pragma omp declare reduction (f8: ::S: omp_out.s += omp_in.s) // { dg-error "redeclaration of" }
- #pragma omp declare reduction (f8: long: omp_out += omp_in) // { dg-error "previous" }
+ #pragma omp declare reduction (f8: long: omp_out += omp_in) // { dg-message "previous" }
#pragma omp declare reduction (f8: long int: omp_out += omp_in) // { dg-error "redeclaration of" }
#pragma omp declare reduction (f8: short unsigned: omp_out += omp_in)
#pragma omp declare reduction (f8: short int: omp_out += omp_in)
diff --git a/gcc/testsuite/g++.dg/lookup/extern-c-redecl5.C b/gcc/testsuite/g++.dg/lookup/extern-c-redecl5.C
index 51a342d433c..4d934c1045c 100644
--- a/gcc/testsuite/g++.dg/lookup/extern-c-redecl5.C
+++ b/gcc/testsuite/g++.dg/lookup/extern-c-redecl5.C
@@ -6,11 +6,11 @@
class frok
{
int this_errno;
- friend int fork (void); // { dg-error "previous declaration .*?C\\+\\+. linkage" }
+ friend int fork (void); // { dg-message "previous declaration .*?C\\+\\+. linkage" }
};
extern "C" int
-fork (void) // { dg-error "conflicts with new declaration .*?C. linkage" }}
+fork (void) // { dg-error "conflicting declaration .*?C. linkage" }}
{
frok grouped;
return grouped.this_errno;
diff --git a/gcc/testsuite/g++.dg/lookup/linkage1.C b/gcc/testsuite/g++.dg/lookup/linkage1.C
index 6f6bdfdea8a..aa6983c54c8 100644
--- a/gcc/testsuite/g++.dg/lookup/linkage1.C
+++ b/gcc/testsuite/g++.dg/lookup/linkage1.C
@@ -1,4 +1,4 @@
// DR 563
-extern int i; // { dg-error "linkage" }
+extern int i; // { dg-message "linkage" }
extern "C" int i; // { dg-error "linkage" }
diff --git a/gcc/testsuite/g++.dg/opt/pr59470.C b/gcc/testsuite/g++.dg/opt/pr59470.C
new file mode 100644
index 00000000000..4698ab717d2
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/pr59470.C
@@ -0,0 +1,188 @@
+// PR middle-end/59470
+// { dg-do run }
+// { dg-options "-O2 -fstack-protector" }
+// { dg-additional-options "-fPIC" { target fpic } }
+// { dg-require-effective-target fstack_protector }
+
+struct A
+{
+ int a1;
+ A () throw () : a1 (0) {}
+};
+
+struct B
+{
+ unsigned int b1 () throw ();
+};
+
+__attribute__((noinline, noclone)) unsigned int
+B::b1 () throw ()
+{
+ asm volatile ("" : : : "memory");
+ return 0;
+}
+
+struct C
+{
+ const A **c1;
+ void c2 (const A *, unsigned int);
+};
+
+__attribute__((noinline, noclone)) void
+C::c2 (const A *, unsigned int)
+{
+ asm volatile ("" : : : "memory");
+}
+
+struct D
+{
+ C *d1;
+};
+
+struct E
+{
+ int e1;
+ int e2;
+ D e3;
+};
+
+struct F
+{
+ virtual int f1 (const char * s, int n);
+};
+
+struct G
+{
+ F *g1;
+ bool g2;
+ G & g3 (const char * ws, int len)
+ {
+ if (__builtin_expect (!g2, true)
+ && __builtin_expect (this->g1->f1 (ws, len) != len, false))
+ g2 = true;
+ return *this;
+ }
+};
+
+struct H : public A
+{
+ const char *h1;
+ unsigned int h2;
+ bool h3;
+ const char *h4;
+ char h5;
+ char h6;
+ char h7[31];
+ bool h8;
+ H () : h1 (0), h2 (0), h4 (0), h5 (0), h6 (0), h8 (false) {}
+ void h9 (const D &) __attribute__((noinline, noclone));
+};
+
+void
+H::h9 (const D &)
+{
+ h3 = true;
+ __builtin_memset (h7, 0, sizeof (h7));
+ asm volatile ("" : : : "memory");
+};
+
+B b;
+
+inline const H *
+foo (const D &x)
+{
+ const unsigned int i = b.b1 ();
+ const A **j = x.d1->c1;
+ if (!j[i])
+ {
+ H *k = 0;
+ try
+ {
+ k = new H;
+ k->h9 (x);
+ }
+ catch (...)
+ {
+ }
+ x.d1->c2 (k, i);
+ }
+ return static_cast <const H *>(j[i]);
+}
+
+__attribute__((noinline, noclone)) int
+bar (char *x, unsigned long v, const char *y, int z, bool w)
+{
+ asm volatile ("" : : "r" (x), "r" (v), "r" (y) : "memory");
+ asm volatile ("" : : "r" (z), "r" (w) : "memory");
+ return 8;
+}
+
+__attribute__((noinline, noclone)) void
+baz (void *z, const char *g, unsigned int h, char s, E &e, char *n, char *c, int &l)
+{
+ asm volatile ("" : : "r" (z), "r" (g), "r" (h) : "memory");
+ asm volatile ("" : : "r" (s), "r" (&e), "r" (n) : "memory");
+ asm volatile ("" : : "r" (c), "r" (&l) : "memory");
+ if (n == c)
+ __builtin_abort ();
+ int i = 0;
+ asm ("" : "+r" (i));
+ if (i == 0)
+ __builtin_exit (0);
+}
+
+__attribute__((noinline, noclone)) G
+test (void *z, G s, E &x, char, long v)
+{
+ const D &d = x.e3;
+ const H *h = foo (d);
+ const char *q = h->h7;
+ const int f = x.e2;
+ const int i = 5 * sizeof (long);
+ char *c = static_cast <char *>(__builtin_alloca (i));
+ const int b = f & 74;
+ const bool e = (b != 64 && b != 8);
+ const unsigned long u = ((v > 0 || !e) ? (unsigned long) v : -(unsigned long) v);
+ int l = bar (c + i, u, q, f, e);
+ c += i - l;
+ if (h->h3)
+ {
+ char *c2 = static_cast <char *>(__builtin_alloca ((l + 1) * 2));
+ baz (z, h->h1, h->h2, h->h6, x, c2 + 2, c, l);
+ c = c2 + 2;
+ }
+ if (__builtin_expect (e, true))
+ {
+ }
+ else if ((f & 4096) && v)
+ {
+ {
+ const bool m = f & 176;
+ *--c = q[m];
+ *--c = q[1];
+ }
+ }
+ const int w = x.e1;
+ if (w > l)
+ {
+ char * c3 = static_cast <char *>(__builtin_alloca (w));
+ c = c3;
+ }
+ return s.g3 (c, l);
+}
+
+int
+main ()
+{
+ H h;
+ const A *j[1];
+ C c;
+ G g;
+ E e;
+ h.h9 (e.e3);
+ j[0] = &h;
+ c.c1 = j;
+ e.e3.d1 = &c;
+ test (0, g, e, 0, 0);
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/g++.dg/overload/new1.C b/gcc/testsuite/g++.dg/overload/new1.C
index f1b7328366f..bf2190b8e05 100644
--- a/gcc/testsuite/g++.dg/overload/new1.C
+++ b/gcc/testsuite/g++.dg/overload/new1.C
@@ -7,7 +7,7 @@ struct X{
};
-void f(X *x = new X); // { dg-error "" }
+void f(X *x = new X); // { dg-message "" }
void f(X *x = new X(4)); // { dg-error "" }
diff --git a/gcc/testsuite/g++.dg/parse/friend5.C b/gcc/testsuite/g++.dg/parse/friend5.C
index bf1e6bfa6dd..43f06129b20 100644
--- a/gcc/testsuite/g++.dg/parse/friend5.C
+++ b/gcc/testsuite/g++.dg/parse/friend5.C
@@ -2,6 +2,6 @@
extern "C" struct A
{
- friend void foo(int) {} // { dg-error "declaration" }
+ friend void foo(int) {} // { dg-message "declaration" }
friend void foo() {} // { dg-error "foo" "err" }
};
diff --git a/gcc/testsuite/g++.dg/parse/namespace-alias-1.C b/gcc/testsuite/g++.dg/parse/namespace-alias-1.C
index 627a95bc655..4b443359a61 100644
--- a/gcc/testsuite/g++.dg/parse/namespace-alias-1.C
+++ b/gcc/testsuite/g++.dg/parse/namespace-alias-1.C
@@ -2,6 +2,6 @@
namespace N
{
- namespace M = N; // { dg-error "previous declaration" }
+ namespace M = N; // { dg-message "previous declaration" }
namespace M {} // { dg-error "declaration of namespace" }
}
diff --git a/gcc/testsuite/g++.dg/parse/namespace10.C b/gcc/testsuite/g++.dg/parse/namespace10.C
index 9f93d1b6897..5e9541adb1d 100644
--- a/gcc/testsuite/g++.dg/parse/namespace10.C
+++ b/gcc/testsuite/g++.dg/parse/namespace10.C
@@ -1,6 +1,6 @@
// PR c++/16529
-namespace m {} // { dg-error "" }
+namespace m {} // { dg-message "" }
namespace n {
namespace m {}
diff --git a/gcc/testsuite/g++.dg/parse/redef2.C b/gcc/testsuite/g++.dg/parse/redef2.C
index 3ab3667d82d..85c50644557 100644
--- a/gcc/testsuite/g++.dg/parse/redef2.C
+++ b/gcc/testsuite/g++.dg/parse/redef2.C
@@ -1,6 +1,6 @@
// { dg-do compile }
-char * d [10]; // { dg-message "8: 'd' has a previous declaration as" }
+char * d [10]; // { dg-message "8: previous declaration as" }
char e [15][10];
int (*f)();
diff --git a/gcc/testsuite/g++.dg/plugin/selfassign.c b/gcc/testsuite/g++.dg/plugin/selfassign.c
index be5a204c901..041f25dce34 100644
--- a/gcc/testsuite/g++.dg/plugin/selfassign.c
+++ b/gcc/testsuite/g++.dg/plugin/selfassign.c
@@ -261,7 +261,7 @@ execute_warn_self_assign (void)
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
warn_self_assign (gsi_stmt (gsi));
diff --git a/gcc/testsuite/g++.dg/pr59445.C b/gcc/testsuite/g++.dg/pr59445.C
new file mode 100644
index 00000000000..99d6df25195
--- /dev/null
+++ b/gcc/testsuite/g++.dg/pr59445.C
@@ -0,0 +1,81 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+template <typename _Iterator> struct A;
+template <typename _Tp> struct A<_Tp *> {
+ typedef _Tp value_type;
+ typedef int difference_type;
+};
+template <typename _Compare> struct B {};
+template <typename _Compare> struct C {
+ _Compare _M_comp;
+ template <typename _Value, typename _Iterator>
+ int operator()(_Value &p1, _Iterator p2) {
+ return _M_comp(p1, *p2);
+ }
+};
+template <typename _Compare> C<_Compare> __val_comp_iter(B<_Compare>);
+template <typename _RandomAccessIterator, typename _Compare>
+void __unguarded_linear_insert(_RandomAccessIterator p1, _Compare p2) {
+ typename A<_RandomAccessIterator>::value_type a;
+ _RandomAccessIterator b = p1;
+ --b;
+ while (p2(a, b)) {
+ *p1 = 0;
+ p1 = b;
+ --b;
+ }
+}
+template <typename _RandomAccessIterator, typename _Compare>
+void __insertion_sort(_RandomAccessIterator, _Compare p2) {
+ for (_RandomAccessIterator c;; ++c)
+ __unguarded_linear_insert(c, __val_comp_iter(p2));
+}
+template <typename _RandomAccessIterator, typename _Distance, typename _Compare>
+void __chunk_insertion_sort(_RandomAccessIterator, _Distance, _Compare p3) {
+ _RandomAccessIterator d;
+ __insertion_sort(d, p3);
+}
+template <typename _RandomAccessIterator, typename _Pointer, typename _Compare>
+void __merge_sort_with_buffer(_RandomAccessIterator p1, _Pointer, _Compare p3) {
+ __chunk_insertion_sort(p1, 0, p3);
+}
+template <typename _RandomAccessIterator, typename _Pointer, typename _Distance,
+ typename _Compare>
+void __stable_sort_adaptive(_RandomAccessIterator, _Pointer, _Distance,
+ _Compare p4) {
+ _RandomAccessIterator e;
+ __merge_sort_with_buffer(e, 0, p4);
+}
+template <typename _RandomAccessIterator, typename _Compare>
+void __stable_sort(_RandomAccessIterator p1, _Compare p2) {
+ __stable_sort_adaptive(
+ p1, 0, typename A<_RandomAccessIterator>::difference_type(), p2);
+}
+template <typename _RandomAccessIterator, typename _Compare>
+void stable_sort(_RandomAccessIterator, _RandomAccessIterator p2, _Compare) {
+ B<_Compare> f;
+ __stable_sort(p2, f);
+}
+class D {
+public:
+ void m_fn1();
+};
+class F {
+ struct G {
+ D MFI;
+ int operator()(int p1, int p2) {
+ if (p1)
+ return 0;
+ if (p2)
+ return 1;
+ MFI.m_fn1();
+ }
+ };
+ void m_fn1(int &p1) const;
+};
+void F::m_fn1(int &p1) const {
+ int *g, *h;
+ stable_sort(h, g, G());
+}
+
diff --git a/gcc/testsuite/g++.dg/pubtypes.C b/gcc/testsuite/g++.dg/pubtypes.C
index 74bff595a83..8f89c4b6ba3 100644
--- a/gcc/testsuite/g++.dg/pubtypes.C
+++ b/gcc/testsuite/g++.dg/pubtypes.C
@@ -2,7 +2,7 @@
/* { dg-do compile { target *-*-darwin* } } */
/* { dg-options "-O0 -gdwarf-2 -dA -fno-eliminate-unused-debug-types" } */
/* { dg-final { scan-assembler "__debug_pubtypes" } } */
-/* { dg-final { scan-assembler "long+\[ \t\]+\(0x\)?\[0-9a-f]+\[ \t\n\]+\[#;@]+\[ \t\]+Length of Public Type Names Info" } } */
+/* { dg-final { scan-assembler "long+\[ \t\]+\(0x\)?\[0-9a-f]+\[ \t\n\]+\[#;@]+\[ \t\]+Pub Info Length" } } */
/* { dg-final { scan-assembler "\"empty\\\\0\"+\[ \t\]+\[#;@]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler "\"A\\\\0\"+\[ \t\]+\[#;@]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler "\"B\\\\0\"+\[ \t\]+\[#;@]+\[ \t\]+external name" } } */
diff --git a/gcc/testsuite/g++.dg/template/friend44.C b/gcc/testsuite/g++.dg/template/friend44.C
index 814fec1d5f7..6ff4db3a3b6 100644
--- a/gcc/testsuite/g++.dg/template/friend44.C
+++ b/gcc/testsuite/g++.dg/template/friend44.C
@@ -3,7 +3,7 @@
template<int> struct A
{
- friend int foo(); // { dg-error "14:new declaration" }
+ friend int foo(); // { dg-error "14:ambiguating new declaration" }
};
-void foo() { A<0> a; } // { dg-error "6:ambiguates old declaration" }
+void foo() { A<0> a; } // { dg-message "6:old declaration" }
diff --git a/gcc/testsuite/g++.dg/template/partial14.C b/gcc/testsuite/g++.dg/template/partial14.C
new file mode 100644
index 00000000000..3870164f0ec
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/partial14.C
@@ -0,0 +1,16 @@
+// PR c++/59044
+
+template <class T>
+class C {
+private:
+ template <T a, T b>
+ struct Implementation {};
+public:
+ typedef typename Implementation<0, 0>::Typedef Type;
+};
+
+template <class T>
+template <T b>
+struct C<T>::Implementation<0, b> { typedef void Typedef; };
+
+template class C<unsigned>;
diff --git a/gcc/testsuite/g++.dg/torture/pr59163.C b/gcc/testsuite/g++.dg/torture/pr59163.C
new file mode 100644
index 00000000000..2f9a9997078
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr59163.C
@@ -0,0 +1,30 @@
+// PR target/59163
+// { dg-do run }
+
+struct A { float a[4]; };
+struct B { int b; A a; };
+
+__attribute__((noinline, noclone)) void
+bar (A &a)
+{
+ if (a.a[0] != 36.0f || a.a[1] != 42.0f || a.a[2] != 48.0f || a.a[3] != 54.0f)
+ __builtin_abort ();
+}
+
+__attribute__((noinline, noclone)) void
+foo (A &a)
+{
+ int i;
+ A c = a;
+ for (i = 0; i < 4; i++)
+ c.a[i] *= 6.0f;
+ a = c;
+ bar (a);
+}
+
+int
+main ()
+{
+ B b = { 5, { 6, 7, 8, 9 } };
+ foo (b.a);
+}
diff --git a/gcc/testsuite/g++.dg/tsan/aligned_vs_unaligned_race.C b/gcc/testsuite/g++.dg/tsan/aligned_vs_unaligned_race.C
new file mode 100644
index 00000000000..ccac527e1d4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/aligned_vs_unaligned_race.C
@@ -0,0 +1,31 @@
+/* { dg-do run { target { x86_64-*-linux* } } } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+
+uint64_t Global[2];
+
+void *Thread1(void *x) {
+ Global[1]++;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ char *p1 = reinterpret_cast<char *>(&Global[0]);
+ uint64_t *p4 = reinterpret_cast<uint64_t *>(p1 + 1);
+ (*p4)++;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ printf("Pass\n");
+ /* { dg-prune-output "ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+ /* { dg-output "Pass.*" } */
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/tsan/atomic_free.C b/gcc/testsuite/g++.dg/tsan/atomic_free.C
new file mode 100644
index 00000000000..afaad777de6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/atomic_free.C
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *a) {
+ __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
+ return 0;
+}
+
+int main() {
+ int *a = new int(0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, a);
+ sleep(1);
+ delete a;
+ pthread_join(t, 0);
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/g++.dg/tsan/atomic_free2.C b/gcc/testsuite/g++.dg/tsan/atomic_free2.C
new file mode 100644
index 00000000000..7ccaa1a95d9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/atomic_free2.C
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *a) {
+ sleep(1);
+ __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
+ return 0;
+}
+
+int main() {
+ int *a = new int(0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, a);
+ delete a;
+ pthread_join(t, 0);
+}
+
+/* { dg-output "WARNING: ThreadSanitizer: heap-use-after-free.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/g++.dg/tsan/benign_race.C b/gcc/testsuite/g++.dg/tsan/benign_race.C
new file mode 100644
index 00000000000..d67b31b2805
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/benign_race.C
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+int WTFGlobal;
+
+extern "C" {
+void AnnotateBenignRaceSized(const char *f, int l,
+ void *mem, unsigned int size, const char *desc);
+void WTFAnnotateBenignRaceSized(const char *f, int l,
+ void *mem, unsigned int size,
+ const char *desc);
+}
+
+
+void *Thread(void *x) {
+ Global = 42;
+ WTFGlobal = 142;
+ return 0;
+}
+
+int main() {
+ AnnotateBenignRaceSized(__FILE__, __LINE__,
+ &Global, sizeof(Global), "Race on Global");
+ WTFAnnotateBenignRaceSized(__FILE__, __LINE__,
+ &WTFGlobal, sizeof(WTFGlobal),
+ "Race on WTFGlobal");
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ Global = 43;
+ WTFGlobal = 143;
+ pthread_join(t, 0);
+ printf("OK\n");
+}
+
+/* { dg-prune-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/g++.dg/tsan/cond_race.C b/gcc/testsuite/g++.dg/tsan/cond_race.C
new file mode 100644
index 00000000000..d28912f2023
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/cond_race.C
@@ -0,0 +1,37 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+/* { dg-output "ThreadSanitizer: data race.*" } */
+/* { dg-output "pthread_cond_signal.*" } */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+struct Ctx {
+ pthread_mutex_t m;
+ pthread_cond_t c;
+ bool done;
+};
+
+void *thr(void *p) {
+ Ctx *c = (Ctx*)p;
+ pthread_mutex_lock(&c->m);
+ c->done = true;
+ pthread_mutex_unlock(&c->m);
+ pthread_cond_signal(&c->c);
+ return 0;
+}
+
+int main() {
+ Ctx *c = new Ctx();
+ pthread_mutex_init(&c->m, 0);
+ pthread_cond_init(&c->c, 0);
+ pthread_t th;
+ pthread_create(&th, 0, thr, c);
+ pthread_mutex_lock(&c->m);
+ while (!c->done)
+ pthread_cond_wait(&c->c, &c->m);
+ pthread_mutex_unlock(&c->m);
+ delete c;
+ pthread_join(th, 0);
+}
diff --git a/gcc/testsuite/g++.dg/tsan/default_options.C b/gcc/testsuite/g++.dg/tsan/default_options.C
new file mode 100644
index 00000000000..b688abff4a9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/default_options.C
@@ -0,0 +1,34 @@
+/* { dg-do run } */
+/* { dg-shouldfail "tsan" } */
+
+#include <pthread.h>
+#include <stdio.h>
+
+extern "C" const char *__tsan_default_options() {
+ return "report_bugs=0";
+}
+
+int Global;
+
+void *Thread1(void *x) {
+ Global = 42;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global = 43;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ fprintf(stderr, "DONE\n");
+ return 0;
+}
+
+/* { dg-prune-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+/* { dg-output "DONE" } */
diff --git a/gcc/testsuite/g++.dg/tsan/fd_close_norace.C b/gcc/testsuite/g++.dg/tsan/fd_close_norace.C
new file mode 100644
index 00000000000..a31428a3456
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/fd_close_norace.C
@@ -0,0 +1,32 @@
+/* { dg-do run } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+void *Thread1(void *x) {
+ int f = open("/dev/random", O_RDONLY);
+ close(f);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ sleep(1);
+ int f = open("/dev/random", O_RDONLY);
+ close(f);
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ printf("OK\n");
+}
+
+/* { dg-prune-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/g++.dg/tsan/fd_close_norace2.C b/gcc/testsuite/g++.dg/tsan/fd_close_norace2.C
new file mode 100644
index 00000000000..f2d394c2896
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/fd_close_norace2.C
@@ -0,0 +1,31 @@
+/* { dg-do run } */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int pipes[2];
+
+void *Thread(void *x) {
+ // wait for shutown signal
+ while (read(pipes[0], &x, 1) != 1) {
+ }
+ close(pipes[0]);
+ close(pipes[1]);
+ return 0;
+}
+
+int main() {
+ if (pipe(pipes))
+ return 1;
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ // send shutdown signal
+ while (write(pipes[1], &t, 1) != 1) {
+ }
+ pthread_join(t, 0);
+ printf("OK\n");
+}
+
+/* { dg-prune-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
+/* { dg-output "OK" } */
diff --git a/gcc/testsuite/g++.dg/tsan/tsan.exp b/gcc/testsuite/g++.dg/tsan/tsan.exp
new file mode 100644
index 00000000000..9bcf6cc8a1d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tsan/tsan.exp
@@ -0,0 +1,47 @@
+# Copyright (C) 2013 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib g++-dg.exp
+load_lib tsan-dg.exp
+load_lib torture-options.exp
+
+if ![check_effective_target_fthread_sanitizer] {
+ return
+}
+
+# Initialize `dg'.
+dg-init
+torture-init
+set-torture-options [list \
+ { -O0 } \
+ { -O2 } ]
+
+if [tsan_init] {
+
+# Main loop.
+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.C $srcdir/c-c++-common/tsan/*.c]] ""
+
+}
+
+# All done.
+tsan_finish
+torture-finish
+dg-finish
diff --git a/gcc/testsuite/g++.dg/ubsan/pr59415.C b/gcc/testsuite/g++.dg/ubsan/pr59415.C
new file mode 100644
index 00000000000..4c373f7c927
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr59415.C
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=null -Wall -fvtable-verify=std" } */
+
+void
+foo (void)
+{
+ throw 0;
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr59437.C b/gcc/testsuite/g++.dg/ubsan/pr59437.C
new file mode 100644
index 00000000000..0e77ccde46e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr59437.C
@@ -0,0 +1,24 @@
+// { dg-do compile }
+// { dg-options "-fsanitize=null -fvtable-verify=std" }
+// { dg-skip-if "" { *-*-* } { "-flto" } { "" } }
+
+template < typename T > struct A
+{
+ T foo ();
+};
+template < typename T > struct C: virtual public A < T >
+{
+ C & operator<< (C & (C &));
+};
+template < typename T >
+C < T > &endl (C < int > &c)
+{
+ c.foo ();
+ return c;
+}
+C < int > cout;
+void
+fn ()
+{
+ cout << endl;
+}
diff --git a/gcc/testsuite/g++.dg/warn/pr15774-1.C b/gcc/testsuite/g++.dg/warn/pr15774-1.C
index 116ec835d90..6148ffecb5b 100644
--- a/gcc/testsuite/g++.dg/warn/pr15774-1.C
+++ b/gcc/testsuite/g++.dg/warn/pr15774-1.C
@@ -1,6 +1,6 @@
// { dg-do compile { target { { i?86-*-* x86_64-*-* } && ia32 } } }
// Test that an new declartion with different attributes then old one fail.
-extern void foo (int); // { dg-error "ambiguates old declaration" }
+extern void foo (int); // { dg-message "old declaration" }
void
bar (void)
diff --git a/gcc/testsuite/g++.old-deja/g++.brendan/crash42.C b/gcc/testsuite/g++.old-deja/g++.brendan/crash42.C
index 8777ef82cf0..4b34a520f66 100644
--- a/gcc/testsuite/g++.old-deja/g++.brendan/crash42.C
+++ b/gcc/testsuite/g++.old-deja/g++.brendan/crash42.C
@@ -1,6 +1,6 @@
// { dg-do assemble }
// GROUPS passed old-abort
-int fn();// { dg-error "" } ambiguates.*
+int fn();// { dg-message "" } ambiguates.*
int x;
int& fn() {// { dg-error "" } new decl.*
return x;}
diff --git a/gcc/testsuite/g++.old-deja/g++.brendan/crash52.C b/gcc/testsuite/g++.old-deja/g++.brendan/crash52.C
index 6db818aa102..1318ea6e9fe 100644
--- a/gcc/testsuite/g++.old-deja/g++.brendan/crash52.C
+++ b/gcc/testsuite/g++.old-deja/g++.brendan/crash52.C
@@ -5,9 +5,9 @@
class A {
public:
- friend A f(A &a);// { dg-error "ambiguates" }
+ friend A f(A &a);// { dg-message "old declaration" }
};
-A &f(A &a) {// { dg-error "new decl" }
+A &f(A &a) {// { dg-error "new declaration" }
std::cout << "Blah\n";
} // { dg-warning "no return statement" }
diff --git a/gcc/testsuite/g++.old-deja/g++.brendan/crash55.C b/gcc/testsuite/g++.old-deja/g++.brendan/crash55.C
index 8295962411a..3faa538253b 100644
--- a/gcc/testsuite/g++.old-deja/g++.brendan/crash55.C
+++ b/gcc/testsuite/g++.old-deja/g++.brendan/crash55.C
@@ -1,6 +1,6 @@
// { dg-do compile }
// GROUPS passed old-abort
- extern int f(int); // { dg-error "ambiguates" }
+ extern int f(int); // { dg-message "old declaration" }
int& f(int x) // { dg-error "new declaration" }
{
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/overload21.C b/gcc/testsuite/g++.old-deja/g++.jason/overload21.C
index 229be93da7c..72397930367 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/overload21.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/overload21.C
@@ -1,7 +1,7 @@
// { dg-do assemble }
struct X {
void f (int = 4, char = 'r'); // { dg-error "previous specification" }
- void g (int = 4, char = 'r'); // { dg-error "previous specification" }
+ void g (int = 4, char = 'r'); // { dg-message "previous specification" }
};
void
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/overload5.C b/gcc/testsuite/g++.old-deja/g++.jason/overload5.C
index 9280c9d5e0d..c1d53f3cee0 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/overload5.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/overload5.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
// Testcase for simple overloading resolution.
-int foo (); // { dg-error "" }
+int foo (); // { dg-message "" }
void foo (); // { dg-error "" } disallowed overload
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/redecl1.C b/gcc/testsuite/g++.old-deja/g++.jason/redecl1.C
index 6fda9d27ac9..172a4410490 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/redecl1.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/redecl1.C
@@ -3,7 +3,7 @@ class A
{
public:
A (const A& ccref);
- friend A const re (const A& v1); // { dg-error "ambiguates" }
+ friend A const re (const A& v1); // { dg-message "old declaration" }
};
A // const
diff --git a/gcc/testsuite/g++.old-deja/g++.law/arm8.C b/gcc/testsuite/g++.old-deja/g++.law/arm8.C
index 8b82af9514f..dfc3b30d3ac 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/arm8.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/arm8.C
@@ -7,7 +7,7 @@
// Date: Tue, 16 Mar 93 12:05:24 +0100
struct K {
- void f( int *p = 0); // { dg-error "" } previous specification
+ void f( int *p = 0); // { dg-message "" } previous specification
};
extern int * q;
diff --git a/gcc/testsuite/g++.old-deja/g++.other/main1.C b/gcc/testsuite/g++.old-deja/g++.other/main1.C
index c5cfe8b9eb0..de689f60fc1 100644
--- a/gcc/testsuite/g++.old-deja/g++.other/main1.C
+++ b/gcc/testsuite/g++.old-deja/g++.other/main1.C
@@ -1,12 +1,12 @@
// { dg-do compile }
-int main() // { dg-error "previous declaration" }
+int main() // { dg-message "previous declaration" }
{
return 0;
}
-int main(int, const char**) // { dg-error "conflicts" }
+int main(int, const char**) // { dg-error "conflicting" }
{
return 0;
}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr39834.c b/gcc/testsuite/gcc.c-torture/compile/pr39834.c
index ecc3977a1f0..c0ad8fd3b94 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pr39834.c
+++ b/gcc/testsuite/gcc.c-torture/compile/pr39834.c
@@ -1,4 +1,4 @@
-/* { dg-options "-O1 -Winline" } */
+/* { dg-options "-Winline" } */
void quit_mined ();
void bottom_line ();
typedef enum { False, True } FLAG;
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr48929.c b/gcc/testsuite/gcc.c-torture/compile/pr48929.c
index f085dc645ae..b28ad9d000c 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pr48929.c
+++ b/gcc/testsuite/gcc.c-torture/compile/pr48929.c
@@ -1,4 +1,4 @@
-/*{ dg-options "-O -findirect-inlining" }*/
+/*{ dg-options "-findirect-inlining" }*/
void bar ();
static void
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr55569.c b/gcc/testsuite/gcc.c-torture/compile/pr55569.c
index 2a53c803957..cffbcfc7521 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pr55569.c
+++ b/gcc/testsuite/gcc.c-torture/compile/pr55569.c
@@ -1,4 +1,4 @@
-/* { dg-options "-O1 -ftree-vectorize" } */
+/* { dg-options "-ftree-vectorize" } */
int *bar (void);
void
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr59134.c b/gcc/testsuite/gcc.c-torture/compile/pr59134.c
new file mode 100644
index 00000000000..5268805ec81
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr59134.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+
+extern void* malloc(__SIZE_TYPE__) __attribute__((malloc));
+
+typedef struct {
+ char pad;
+ int arr[0];
+} __attribute__((packed)) str;
+
+str *
+foo (void)
+{
+ str *s = malloc (sizeof (str) + sizeof (int));
+ s->arr[0] = 0x12345678;
+ return s;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr59386.c b/gcc/testsuite/gcc.c-torture/compile/pr59386.c
new file mode 100644
index 00000000000..b014f707ce7
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr59386.c
@@ -0,0 +1,24 @@
+/* PR tree-optimization/59386 */
+
+struct S { int s; };
+struct T { int t; struct S u; } c;
+int b;
+
+struct S
+foo ()
+{
+ struct T d;
+ if (b)
+ while (c.t)
+ ;
+ else
+ return d.u;
+}
+
+struct S
+bar ()
+{
+ struct T a;
+ a.u = foo ();
+ return a.u;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr59417.c b/gcc/testsuite/gcc.c-torture/compile/pr59417.c
new file mode 100644
index 00000000000..227c5d84105
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr59417.c
@@ -0,0 +1,39 @@
+/* PR tree-optimization/59417 */
+
+int a, b, d;
+short c;
+
+void
+f (void)
+{
+ if (b)
+ {
+ int *e;
+
+ if (d)
+ {
+ for (; b; a++)
+ lbl1:
+ d = 0;
+
+ for (; d <= 1; d++)
+ {
+ int **q = &e;
+ for (**q = 0; **q <= 0; **q++)
+ d = 0;
+ }
+ }
+ }
+
+ else
+ {
+ int t;
+ for (c = 0; c < 77; c++)
+ for (c = 0; c < 46; c++);
+ for (; t <= 0; t++)
+ lbl2:
+ ;
+ goto lbl1;
+ }
+ goto lbl2;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/sra-1.c b/gcc/testsuite/gcc.c-torture/compile/sra-1.c
index 06dcf1002be..59213039fc2 100644
--- a/gcc/testsuite/gcc.c-torture/compile/sra-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/sra-1.c
@@ -1,5 +1,3 @@
-/* { dg-do compile } */
-/* { dg-options "-O1" } */
/* Let gimple verifier check what SRA does to unions and single-field
strucutres . */
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr58726.c b/gcc/testsuite/gcc.c-torture/execute/pr58726.c
new file mode 100644
index 00000000000..9fa8b6953f1
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr58726.c
@@ -0,0 +1,26 @@
+/* PR rtl-optimization/58726 */
+
+int a, c;
+union { int f1; int f2 : 1; } b;
+
+short
+foo (short p)
+{
+ return p < 0 ? p : a;
+}
+
+int
+main ()
+{
+ if (sizeof (short) * __CHAR_BIT__ != 16
+ || sizeof (int) * __CHAR_BIT__ != 32)
+ return 0;
+ b.f1 = 56374;
+ unsigned short d;
+ int e = b.f2;
+ d = e == 0 ? b.f1 : 0;
+ c = foo (d);
+ if (c != (short) 56374)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr59388.c b/gcc/testsuite/gcc.c-torture/execute/pr59388.c
new file mode 100644
index 00000000000..de3648a003e
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr59388.c
@@ -0,0 +1,11 @@
+/* PR tree-optimization/59388 */
+
+int a;
+struct S { unsigned int f:1; } b;
+
+int
+main ()
+{
+ a = (0 < b.f) | b.f;
+ return a;
+}
diff --git a/gcc/testsuite/gcc.dg/c11-align-6.c b/gcc/testsuite/gcc.dg/c11-align-6.c
new file mode 100644
index 00000000000..7ea994da400
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/c11-align-6.c
@@ -0,0 +1,40 @@
+/* Test C11 _Alignof returning minimum alignment for a type. PR
+ 52023. */
+/* { dg-do run } */
+/* { dg-options "-std=c11" } */
+
+extern void abort (void);
+extern void exit (int);
+
+#define CHECK_ALIGN(TYPE) \
+ do \
+ { \
+ struct { char c; TYPE v; } x; \
+ if (_Alignof (TYPE) > __alignof__ (x.v)) \
+ abort (); \
+ } \
+ while (0)
+
+int
+main (void)
+{
+ CHECK_ALIGN (_Bool);
+ CHECK_ALIGN (char);
+ CHECK_ALIGN (signed char);
+ CHECK_ALIGN (unsigned char);
+ CHECK_ALIGN (signed short);
+ CHECK_ALIGN (unsigned short);
+ CHECK_ALIGN (signed int);
+ CHECK_ALIGN (unsigned int);
+ CHECK_ALIGN (signed long);
+ CHECK_ALIGN (unsigned long);
+ CHECK_ALIGN (signed long long);
+ CHECK_ALIGN (unsigned long long);
+ CHECK_ALIGN (float);
+ CHECK_ALIGN (double);
+ CHECK_ALIGN (long double);
+ CHECK_ALIGN (_Complex float);
+ CHECK_ALIGN (_Complex double);
+ CHECK_ALIGN (_Complex long double);
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/cpp/expr-overflow-1.c b/gcc/testsuite/gcc.dg/cpp/expr-overflow-1.c
new file mode 100644
index 00000000000..8a67aaa6d46
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/cpp/expr-overflow-1.c
@@ -0,0 +1,44 @@
+/* Test overflow in preprocessor arithmetic. PR 55715. */
+/* { dg-do preprocess } */
+/* { dg-options "-std=c99" } */
+
+#include <stdint.h>
+
+#if -1 - INTMAX_MIN
+#endif
+
+#if 0 - INTMAX_MIN /* { dg-warning "overflow" } */
+#endif
+
+#if 1 * INTMAX_MIN
+#endif
+
+#if -1 * INTMAX_MIN /* { dg-warning "overflow" } */
+#endif
+
+#if 0 * INTMAX_MIN
+#endif
+
+#if -INTMAX_MIN /* { dg-warning "overflow" } */
+#endif
+
+#if +INTMAX_MIN
+#endif
+
+#if INTMAX_MIN / 1
+#endif
+
+#if INTMAX_MIN / -1 /* { dg-warning "overflow" } */
+#endif
+
+#if UINTMAX_MAX * UINTMAX_MAX
+#endif
+
+#if UINTMAX_MAX / -1
+#endif
+
+#if UINTMAX_MAX + INTMAX_MAX
+#endif
+
+#if UINTMAX_MAX - INTMAX_MIN
+#endif
diff --git a/gcc/testsuite/gcc.dg/ipa/ipa-pta-14.c b/gcc/testsuite/gcc.dg/ipa/ipa-pta-14.c
index ed59cbfda00..b62b08ff03a 100644
--- a/gcc/testsuite/gcc.dg/ipa/ipa-pta-14.c
+++ b/gcc/testsuite/gcc.dg/ipa/ipa-pta-14.c
@@ -21,9 +21,8 @@ int main()
void *p;
a.p = (void *)&c;
p = foo(&a, &a);
- /* { dg-final { scan-ipa-dump "foo.result = { NULL a\[^ \]* c\[^ \]* }" "pta" { xfail *-*-* } } } */
- /* { dg-final { scan-ipa-dump "foo.result = { NULL a\[^ \]* a\[^ \]* c\[^ \]* }" "pta" { target { ! keeps_null_pointer_checks } } } } */
- /* { dg-final { scan-ipa-dump "foo.result = { NONLOCAL a\[^ \]* a\[^ \]* c\[^ \]* }" "pta" { target { keeps_null_pointer_checks } } } } */
+ /* { dg-final { scan-ipa-dump "foo.result = { NULL a\[^ \]* c\[^ \]* }" "pta" { target { ! keeps_null_pointer_checks } } } } */
+ /* { dg-final { scan-ipa-dump "foo.result = { NONLOCAL a\[^ \]* c\[^ \]* }" "pta" { target { keeps_null_pointer_checks } } } } */
((struct X *)p)->p = (void *)0;
if (a.p != (void *)0)
abort ();
diff --git a/gcc/testsuite/gcc.dg/macro-fusion-1.c b/gcc/testsuite/gcc.dg/macro-fusion-1.c
index 4ac98668bdc..b2d11423acf 100644
--- a/gcc/testsuite/gcc.dg/macro-fusion-1.c
+++ b/gcc/testsuite/gcc.dg/macro-fusion-1.c
@@ -1,6 +1,5 @@
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
/* { dg-options "-O2 -mtune=corei7 -fdump-rtl-sched2" } */
-/* { dg-final { scan-rtl-dump-not "compare.*insn.*jump_insn.*jump_insn" "sched2" } } */
int a[100];
@@ -11,3 +10,6 @@ double bar (double sum)
sum += (0.5 + (a[i%100] - 128));
return sum;
}
+
+/* { dg-final { scan-rtl-dump-not "compare.*insn.*jump_insn.*jump_insn" "sched2" } } */
+/* { dg-final { cleanup-rtl-dump "sched2" } } */
diff --git a/gcc/testsuite/gcc.dg/macro-fusion-2.c b/gcc/testsuite/gcc.dg/macro-fusion-2.c
index 638350d9926..ad7489a73ad 100644
--- a/gcc/testsuite/gcc.dg/macro-fusion-2.c
+++ b/gcc/testsuite/gcc.dg/macro-fusion-2.c
@@ -1,6 +1,5 @@
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
/* { dg-options "-O2 -mtune=corei7-avx -fdump-rtl-sched2" } */
-/* { dg-final { scan-rtl-dump-not "compare.*insn.*jump_insn.*jump_insn" "sched2" } } */
int a[100];
@@ -14,3 +13,6 @@ double bar (double sum)
}
return sum;
}
+
+/* { dg-final { scan-rtl-dump-not "compare.*insn.*jump_insn.*jump_insn" "sched2" } } */
+/* { dg-final { cleanup-rtl-dump "sched2" } } */
diff --git a/gcc/testsuite/gcc.dg/plugin/selfassign.c b/gcc/testsuite/gcc.dg/plugin/selfassign.c
index be5a204c901..041f25dce34 100644
--- a/gcc/testsuite/gcc.dg/plugin/selfassign.c
+++ b/gcc/testsuite/gcc.dg/plugin/selfassign.c
@@ -261,7 +261,7 @@ execute_warn_self_assign (void)
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
warn_self_assign (gsi_stmt (gsi));
diff --git a/gcc/testsuite/gcc.dg/pr23623.c b/gcc/testsuite/gcc.dg/pr23623.c
new file mode 100644
index 00000000000..c844f945e1a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr23623.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrict-volatile-bitfields -fdump-rtl-final" } */
+
+/* With -fstrict-volatile-bitfields, the volatile accesses to bf2.b
+ and bf3.b must do unsigned int reads/writes. The non-volatile
+ accesses to bf1.b are not so constrained. */
+
+extern struct
+{
+ unsigned int b : 1;
+ unsigned int : 31;
+} bf1;
+
+extern volatile struct
+{
+ unsigned int b : 1;
+ unsigned int : 31;
+} bf2;
+
+extern struct
+{
+ volatile unsigned int b : 1;
+ volatile unsigned int : 31;
+} bf3;
+
+void writeb(void)
+{
+ bf1.b = 1;
+ bf2.b = 1; /* volatile read + volatile write */
+ bf3.b = 1; /* volatile read + volatile write */
+}
+
+extern unsigned int x1, x2, x3;
+
+void readb(void)
+{
+ x1 = bf1.b;
+ x2 = bf2.b; /* volatile write */
+ x3 = bf3.b; /* volatile write */
+}
+
+/* There should be 6 volatile MEMs total, but scan-rtl-dump-times counts
+ the number of match variables and not the number of matches. Since
+ the parenthesized subexpression in the regexp introduces an extra match
+ variable, we need to give a count of 12 instead of 6 here. */
+/* { dg-final { scan-rtl-dump-times "mem/v(/.)*:SI" 12 "final" } } */
+/* { dg-final { cleanup-rtl-dump "final" } } */
+
diff --git a/gcc/testsuite/gcc.dg/pr38984.c b/gcc/testsuite/gcc.dg/pr38984.c
index 0c031805ea8..3ccb0e492fc 100644
--- a/gcc/testsuite/gcc.dg/pr38984.c
+++ b/gcc/testsuite/gcc.dg/pr38984.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fno-delete-null-pointer-checks -fdump-tree-optimized -fno-isolate-erroneous-paths" }
+/* { dg-options "-O2 -fno-delete-null-pointer-checks -fdump-tree-optimized -fno-isolate-erroneous-paths-dereference" }
* */
int f(int *p)
diff --git a/gcc/testsuite/gcc.dg/pr41488.c b/gcc/testsuite/gcc.dg/pr41488.c
new file mode 100644
index 00000000000..c4bc42832b3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr41488.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-sccp-scev" } */
+
+struct struct_t
+{
+ int* data;
+};
+
+void foo (struct struct_t* sp, int start, int end)
+{
+ int i;
+
+ for (i = 0; i+start < end; i++)
+ sp->data[i+start] = 0;
+}
+
+/* { dg-final { scan-tree-dump-times "Simplify PEELED_CHREC into POLYNOMIAL_CHREC" 1 "sccp" } } */
+/* { dg-final { cleanup-tree-dump "sccp" } } */
diff --git a/gcc/testsuite/gcc.dg/pr48784-1.c b/gcc/testsuite/gcc.dg/pr48784-1.c
new file mode 100644
index 00000000000..bbcad9b18ed
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr48784-1.c
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#pragma pack(1)
+volatile struct S0 {
+ signed a : 7;
+ unsigned b : 28; /* b can't be fetched with an aligned 32-bit access, */
+ /* but it certainly can be fetched with an unaligned access */
+} g = {0,0xfffffff};
+
+int main() {
+ unsigned b = g.b;
+ if (b != 0xfffffff)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr48784-2.c b/gcc/testsuite/gcc.dg/pr48784-2.c
new file mode 100644
index 00000000000..6d532631294
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr48784-2.c
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fno-strict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#pragma pack(1)
+volatile struct S0 {
+ signed a : 7;
+ unsigned b : 28; /* b can't be fetched with an aligned 32-bit access, */
+ /* but it certainly can be fetched with an unaligned access */
+} g = {0,0xfffffff};
+
+int main() {
+ unsigned b = g.b;
+ if (b != 0xfffffff)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr54113.c b/gcc/testsuite/gcc.dg/pr54113.c
new file mode 100644
index 00000000000..4c68099b364
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr54113.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+/* { dg-options "-Wmissing-prototypes" } */
+
+inline int foo (void) { return 42; } /* { dg-bogus "no previous prototype" } */
+extern int foo(void);
diff --git a/gcc/testsuite/gcc.dg/pr56341-1.c b/gcc/testsuite/gcc.dg/pr56341-1.c
new file mode 100644
index 00000000000..91cf80ba286
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr56341-1.c
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+struct test0
+{
+ unsigned char b1[2];
+} __attribute__((packed, aligned(2)));
+
+struct test1
+{
+ volatile unsigned long a1;
+ unsigned char b1[4];
+} __attribute__((packed, aligned(2)));
+
+struct test2
+{
+ struct test0 t0;
+ struct test1 t1;
+ struct test0 t2;
+} __attribute__((packed, aligned(2)));
+
+struct test2 xx;
+struct test2 *x1 = &xx;
+
+#define MAGIC 0x12345678
+
+void test0 (struct test2* x1)
+{
+ x1->t1.a1 = MAGIC;
+}
+
+int main()
+{
+ test0 (x1);
+ if (xx.t1.a1 != MAGIC)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr56341-2.c b/gcc/testsuite/gcc.dg/pr56341-2.c
new file mode 100644
index 00000000000..e6f6569f089
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr56341-2.c
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-fno-strict-volatile-bitfields" } */
+
+extern void abort (void);
+
+struct test0
+{
+ unsigned char b1[2];
+} __attribute__((packed, aligned(2)));
+
+struct test1
+{
+ volatile unsigned long a1;
+ unsigned char b1[4];
+} __attribute__((packed, aligned(2)));
+
+struct test2
+{
+ struct test0 t0;
+ struct test1 t1;
+ struct test0 t2;
+} __attribute__((packed, aligned(2)));
+
+struct test2 xx;
+struct test2 *x1 = &xx;
+
+#define MAGIC 0x12345678
+
+void test0 (struct test2* x1)
+{
+ x1->t1.a1 = MAGIC;
+}
+
+int main()
+{
+ test0 (x1);
+ if (xx.t1.a1 != MAGIC)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr56997-1.c b/gcc/testsuite/gcc.dg/pr56997-1.c
new file mode 100644
index 00000000000..42458a106c6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr56997-1.c
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field. */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned short
+#define MAGIC (unsigned short)0x102u
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+ v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+ return v.Type;
+}
+
+int main()
+{
+ test_type temp;
+ foo(MAGIC);
+ __builtin_memcpy(&g, (void *)&v, sizeof(g));
+ if (g.Type != MAGIC)
+ abort ();
+
+ g.Type = MAGIC;
+ __builtin_memcpy((void *)&v, &g, sizeof(v));
+ temp = bar();
+ if (temp != MAGIC)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr56997-2.c b/gcc/testsuite/gcc.dg/pr56997-2.c
new file mode 100644
index 00000000000..08e631180f1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr56997-2.c
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field. */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned int
+#define MAGIC 0x1020304u
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+ v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+ return v.Type;
+}
+
+int main()
+{
+ test_type temp;
+ foo(MAGIC);
+ __builtin_memcpy(&g, (void *)&v, sizeof(g));
+ if (g.Type != MAGIC)
+ abort ();
+
+ g.Type = MAGIC;
+ __builtin_memcpy((void *)&v, &g, sizeof(v));
+ temp = bar();
+ if (temp != MAGIC)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr56997-3.c b/gcc/testsuite/gcc.dg/pr56997-3.c
new file mode 100644
index 00000000000..3754b108ac6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr56997-3.c
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field. */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned long long
+#define MAGIC 0x102030405060708ull
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+ v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+ return v.Type;
+}
+
+int main()
+{
+ test_type temp;
+ foo(MAGIC);
+ __builtin_memcpy(&g, (void *)&v, sizeof(g));
+ if (g.Type != MAGIC)
+ abort ();
+
+ g.Type = MAGIC;
+ __builtin_memcpy((void *)&v, &g, sizeof(v));
+ temp = bar();
+ if (temp != MAGIC)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pubtypes-1.c b/gcc/testsuite/gcc.dg/pubtypes-1.c
index 5c6767e8faa..3f09646c806 100644
--- a/gcc/testsuite/gcc.dg/pubtypes-1.c
+++ b/gcc/testsuite/gcc.dg/pubtypes-1.c
@@ -2,7 +2,7 @@
/* { dg-options "-O0 -gdwarf-2 -dA -fno-eliminate-unused-debug-types" } */
/* { dg-skip-if "Unmatchable assembly" { mmix-*-* } { "*" } { "" } } */
/* { dg-final { scan-assembler "__debug_pubtypes" } } */
-/* { dg-final { scan-assembler "long+\[ \t\]+0x\[0-9a-f]+\[ \t\]+\[#;]+\[ \t\]+Length of Public Type Names Info" } } */
+/* { dg-final { scan-assembler "long+\[ \t\]+0x\[0-9a-f]+\[ \t\]+\[#;]+\[ \t\]+Pub Info Length" } } */
/* { dg-final { scan-assembler "used_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler "unused_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
diff --git a/gcc/testsuite/gcc.dg/pubtypes-2.c b/gcc/testsuite/gcc.dg/pubtypes-2.c
index 22f61d380ab..b4ba8712c5e 100644
--- a/gcc/testsuite/gcc.dg/pubtypes-2.c
+++ b/gcc/testsuite/gcc.dg/pubtypes-2.c
@@ -2,7 +2,7 @@
/* { dg-options "-O0 -gdwarf-2 -dA" } */
/* { dg-skip-if "Unmatchable assembly" { mmix-*-* } { "*" } { "" } } */
/* { dg-final { scan-assembler "__debug_pubtypes" } } */
-/* { dg-final { scan-assembler "long+\[ \t\]+0x13b+\[ \t\]+\[#;]+\[ \t\]+Length of Public Type Names Info" } } */
+/* { dg-final { scan-assembler "long+\[ \t\]+0x13b+\[ \t\]+\[#;]+\[ \t\]+Pub Info Length" } } */
/* { dg-final { scan-assembler "used_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler-not "unused_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
diff --git a/gcc/testsuite/gcc.dg/pubtypes-3.c b/gcc/testsuite/gcc.dg/pubtypes-3.c
index d3fa57619a8..f4b0468ab19 100644
--- a/gcc/testsuite/gcc.dg/pubtypes-3.c
+++ b/gcc/testsuite/gcc.dg/pubtypes-3.c
@@ -2,7 +2,7 @@
/* { dg-options "-O0 -gdwarf-2 -dA" } */
/* { dg-skip-if "Unmatchable assembly" { mmix-*-* } { "*" } { "" } } */
/* { dg-final { scan-assembler "__debug_pubtypes" } } */
-/* { dg-final { scan-assembler "long+\[ \t\]+0x13b+\[ \t\]+\[#;]+\[ \t\]+Length of Public Type Names Info" } } */
+/* { dg-final { scan-assembler "long+\[ \t\]+0x13b+\[ \t\]+\[#;]+\[ \t\]+Pub Info Length" } } */
/* { dg-final { scan-assembler "used_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler-not "unused_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler-not "\"list_name_type\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
diff --git a/gcc/testsuite/gcc.dg/pubtypes-4.c b/gcc/testsuite/gcc.dg/pubtypes-4.c
index f974c4b2b31..76d7c4a9805 100644
--- a/gcc/testsuite/gcc.dg/pubtypes-4.c
+++ b/gcc/testsuite/gcc.dg/pubtypes-4.c
@@ -2,7 +2,7 @@
/* { dg-options "-O0 -gdwarf-2 -dA" } */
/* { dg-skip-if "Unmatchable assembly" { mmix-*-* } { "*" } { "" } } */
/* { dg-final { scan-assembler "__debug_pubtypes" } } */
-/* { dg-final { scan-assembler "long+\[ \t\]+0x172+\[ \t\]+\[#;]+\[ \t\]+Length of Public Type Names Info" } } */
+/* { dg-final { scan-assembler "long+\[ \t\]+0x172+\[ \t\]+\[#;]+\[ \t\]+Pub Info Length" } } */
/* { dg-final { scan-assembler "used_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler-not "unused_struct\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
/* { dg-final { scan-assembler "\"list_name_type\\\\0\"+\[ \t\]+\[#;]+\[ \t\]+external name" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr59058.c b/gcc/testsuite/gcc.dg/torture/pr59058.c
new file mode 100644
index 00000000000..b3a5a3960c3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr59058.c
@@ -0,0 +1,19 @@
+/* { dg-do run } */
+
+extern void abort (void);
+
+short b = 0;
+
+int
+main ()
+{
+ int c = 0;
+l1:
+ b++;
+ c |= b;
+ if (b)
+ goto l1;
+ if (c != -1)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr59374-1.c b/gcc/testsuite/gcc.dg/torture/pr59374-1.c
new file mode 100644
index 00000000000..6230ae9ca3d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr59374-1.c
@@ -0,0 +1,24 @@
+/* { dg-do run } */
+/* { dg-additional-options "-ftree-slp-vectorize" } */
+
+extern void abort (void);
+
+static struct X { void *a; void *b; } a, b;
+
+void __attribute__((noinline))
+foo (void)
+{
+ void *tem = a.b;
+ a.b = (void *)0;
+ b.b = tem;
+ b.a = a.a;
+}
+
+int main()
+{
+ a.b = &a;
+ foo ();
+ if (b.b != &a)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr59374-2.c b/gcc/testsuite/gcc.dg/torture/pr59374-2.c
new file mode 100644
index 00000000000..d791b987ef6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr59374-2.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-additional-options "-ftree-slp-vectorize" } */
+
+extern void abort (void);
+
+static struct X { void *a; void *b; } a, b;
+static struct X *p;
+
+void __attribute__((noinline))
+foo (void)
+{
+ void *tem = a.b;
+ p->b = (void *)0;
+ b.b = tem;
+ b.a = a.a;
+}
+
+int main()
+{
+ p = &a;
+ a.b = &a;
+ foo ();
+ if (b.b != &a)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c
index 290b44c5bd4..bfcaa2b01da 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-isolate-paths -fdump-tree-phicprop1" } */
+/* { dg-options "-O2 -fisolate-erroneous-paths-attribute -fdump-tree-isolate-paths -fdump-tree-phicprop1" } */
int z;
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c
index 6937d25580a..c9c074df62b 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-isolate-paths -fdump-tree-phicprop1" } */
+/* { dg-options "-O2 -fisolate-erroneous-paths-attribute -fdump-tree-isolate-paths -fdump-tree-phicprop1" } */
extern void foo(void *) __attribute__ ((__nonnull__ (1)));
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-31.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-31.c
index 4f226374bdb..fa18f5e0421 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/loop-31.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-31.c
@@ -15,7 +15,7 @@ short foo (int len, int v)
/* When we do not have addressing mode including multiplication,
the memory access should be strength-reduced. */
-/* { dg-final { scan-tree-dump-times " \\+ 2" 1 "optimized" { target arm*-*-* } } } */
-/* { dg-final { scan-tree-dump-times " \\+ 2" 1 "optimized" { target { ia64-*-* && ilp32 } } } } */
-/* { dg-final { scan-tree-dump-times " \\+ 2" 2 "optimized" { target { ia64-*-* && lp64 } } } } */
+/* { dg-final { scan-tree-dump-times " \\+ 2;" 1 "optimized" { target arm*-*-* } } } */
+/* { dg-final { scan-tree-dump-times " \\+ 2;" 1 "optimized" { target { ia64-*-* && ilp32 } } } } */
+/* { dg-final { scan-tree-dump-times " \\+ 2;" 2 "optimized" { target { ia64-*-* && lp64 } } } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr45085.c b/gcc/testsuite/gcc.dg/tree-ssa/pr45085.c
index 5c1ec6a5c96..d95ac8620ca 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pr45085.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr45085.c
@@ -1,3 +1,4 @@
+/* { dg-do compile } */
/* { dg-options "-O2 -Wuninitialized" } */
struct S { char *s1; long s2; };
struct T { int t1; long t2; long t3; };
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr45685.c b/gcc/testsuite/gcc.dg/tree-ssa/pr45685.c
new file mode 100644
index 00000000000..06289430fa3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr45685.c
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -fdump-tree-phiopt1-details" } */
+
+typedef unsigned long int uint64_t;
+typedef long int int64_t;
+int summation_helper_1(int64_t* products, uint64_t count)
+{
+ int s = 0;
+ uint64_t i;
+ for(i=0; i<count; i++)
+ {
+ int64_t val = (products[i]>0) ? 1 : -1;
+ products[i] *= val;
+ if(products[i] != i)
+ val = -val;
+ products[i] = val;
+ s += val;
+ }
+ return s;
+}
+
+
+int summation_helper_2(int64_t* products, uint64_t count)
+{
+ int s = 0;
+ uint64_t i;
+ for(i=0; i<count; i++)
+ {
+ int val = (products[i]>0) ? 1 : -1;
+ products[i] *= val;
+ if(products[i] != i)
+ val = -val;
+ products[i] = val;
+ s += val;
+ }
+ return s;
+}
+
+/* { dg-final { scan-tree-dump-times "converted to straightline code" 2 "phiopt1" } } */
+/* { dg-final { cleanup-tree-dump "phiopt1" } } */
+
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/scev-7.c b/gcc/testsuite/gcc.dg/tree-ssa/scev-7.c
new file mode 100644
index 00000000000..d6ceb208258
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/scev-7.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-sccp-scev" } */
+
+struct struct_t
+{
+ int* data;
+};
+
+void foo (struct struct_t* sp, int start, int end)
+{
+ int i;
+
+ for (i = 1000; i+start > end; i--)
+ sp->data[i+start] = 0;
+}
+
+/* { dg-final { scan-tree-dump-times "Simplify PEELED_CHREC into POLYNOMIAL_CHREC" 1 "sccp" } } */
+/* { dg-final { cleanup-tree-dump "sccp" } } */
diff --git a/gcc/testsuite/gcc.dg/tsan/tsan.exp b/gcc/testsuite/gcc.dg/tsan/tsan.exp
new file mode 100644
index 00000000000..2bf535a870c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tsan/tsan.exp
@@ -0,0 +1,47 @@
+# Copyright (C) 2013 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib gcc-dg.exp
+load_lib tsan-dg.exp
+load_lib torture-options.exp
+
+if ![check_effective_target_fthread_sanitizer] {
+ return
+}
+
+# Initialize `dg'.
+dg-init
+torture-init
+set-torture-options [list \
+ { -O0 } \
+ { -O2 } ]
+
+if [tsan_init] {
+
+# Main loop.
+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c $srcdir/c-c++-common/tsan/*.c]] ""
+
+}
+
+# All done.
+tsan_finish
+torture-finish
+dg-finish
diff --git a/gcc/testsuite/gcc.dg/vect/pr56787.c b/gcc/testsuite/gcc.dg/vect/pr56787.c
index 313d477492f..3f8a07ac291 100644
--- a/gcc/testsuite/gcc.dg/vect/pr56787.c
+++ b/gcc/testsuite/gcc.dg/vect/pr56787.c
@@ -5,7 +5,7 @@ inline void
bar (const float s[5], float z[3][5])
{
float a = s[0], b = s[1], c = s[2], d = s[3], e = s[4];
- float f = 1.0f / a;
+ float f = a;
float u = f * b, v = f * c, w = f * d;
float p = 0.4f * (e - 0.5f * (b * u + c * v + d * w));
z[0][3] = b * w;
diff --git a/gcc/testsuite/gcc.dg/vect/pr58508.c b/gcc/testsuite/gcc.dg/vect/pr58508.c
index c4921bb768f..c1ca3347c14 100644
--- a/gcc/testsuite/gcc.dg/vect/pr58508.c
+++ b/gcc/testsuite/gcc.dg/vect/pr58508.c
@@ -66,5 +66,5 @@ void test5 (int* a, int* b)
}
}
-/* { dg-final { scan-tree-dump-times "hoist" 8 "vect" } } */
+/* { dg-final { scan-tree-dump-times "hoist" 8 "vect" { xfail vect_no_align } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-nop-move.c b/gcc/testsuite/gcc.dg/vect/vect-nop-move.c
new file mode 100644
index 00000000000..92614c6ad72
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-nop-move.c
@@ -0,0 +1,65 @@
+/* { dg-do run } */
+/* { dg-require-effective-target vect_float } */
+/* { dg-additional-options "-fdump-rtl-combine-details" } */
+
+extern void abort (void);
+
+#define NOINLINE __attribute__((noinline))
+
+typedef float float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef float float32x2_t __attribute__ ((__vector_size__ (8)));
+
+NOINLINE float
+foo32x4_be (float32x4_t x)
+{
+ return x[3];
+}
+
+NOINLINE float
+foo32x4_le (float32x4_t x)
+{
+ return x[0];
+}
+
+NOINLINE float
+bar (float a)
+{
+ return a;
+}
+
+NOINLINE float
+foo32x2_be (float32x2_t x)
+{
+ return bar (x[1]);
+}
+
+NOINLINE float
+foo32x2_le (float32x2_t x)
+{
+ return bar (x[0]);
+}
+
+int
+main()
+{
+ float32x4_t a = { 0.0f, 1.0f, 2.0f, 3.0f };
+ float32x2_t b = { 0.0f, 1.0f };
+
+ if (foo32x4_be (a) != 3.0f)
+ abort ();
+
+ if (foo32x4_le (a) != 0.0f)
+ abort ();
+
+ if (foo32x2_be (b) != 1.0f)
+ abort ();
+
+ if (foo32x2_le (b) != 0.0f)
+ abort ();
+
+ return 0;
+}
+
+/* { dg-final { scan-rtl-dump "deleting noop move" "combine" { target aarch64*-*-* } } } */
+/* { dg-final { cleanup-rtl-dump "combine" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-pattern-3.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-pattern-3.c
index 06a94168cd0..39f3b6b3b58 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-reduc-pattern-3.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-pattern-3.c
@@ -1,4 +1,4 @@
-/* { dg-require-effective-target vect_int } */
+/* { dg-require-effective-target vect_int_mult } */
#include <stdarg.h>
#include "tree-vect.h"
diff --git a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-10a.c b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-10a.c
index 1314039643f..c49473df112 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-10a.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-10a.c
@@ -15,3 +15,5 @@ bar (int a, int b, long int c)
{
return a + b + c;
}
+
+/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-12a.c b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-12a.c
index fcd04614e97..c76508f9ac4 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-12a.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-12a.c
@@ -15,3 +15,5 @@ bar (int a, int b, long int c)
{
return a + b + c;
}
+
+/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.target/arm/builtin-trap.c b/gcc/testsuite/gcc.target/arm/builtin-trap.c
new file mode 100644
index 00000000000..4ff8d253e75
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/builtin-trap.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm32 } */
+
+void
+trap ()
+{
+ __builtin_trap ();
+}
+
+/* { dg-final { scan-assembler "0xe7f000f0" { target { arm_nothumb } } } } */
diff --git a/gcc/testsuite/gcc.target/arm/ldrd-strd-offset.c b/gcc/testsuite/gcc.target/arm/ldrd-strd-offset.c
new file mode 100644
index 00000000000..a128a0a0e4b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/ldrd-strd-offset.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef struct
+{
+ int x;
+ int i, j;
+} off_struct;
+
+int foo (char *str, int *a, int b, int c)
+{
+ off_struct *p = (off_struct *)(str + 3);
+ b = p->i;
+ c = p->j;
+ *a = b + c;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/thumb-builtin-trap.c b/gcc/testsuite/gcc.target/arm/thumb-builtin-trap.c
new file mode 100644
index 00000000000..22e90e7d2cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/thumb-builtin-trap.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-mthumb" } */
+/* { dg-require-effective-target arm_thumb1_ok } */
+
+void
+trap ()
+{
+ __builtin_trap ();
+}
+
+/* { dg-final { scan-assembler "0xdeff" } } */
diff --git a/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-1.c b/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-1.c
index d9121228307..cc524c8a641 100644
--- a/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-1.c
@@ -15,7 +15,7 @@ void static
avx_test (void)
{
union256d u;
- double e [4] __attribute__ ((aligned (8))) = {41124.234,2344.2354,8653.65635,856.43576};
+ double e [4] __attribute__ ((aligned (32))) = {41124.234,2344.2354,8653.65635,856.43576};
u.x = test (e);
diff --git a/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-2.c b/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-2.c
index 96a664ac11e..9224484cac1 100644
--- a/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx-vmovapd-256-2.c
@@ -15,7 +15,7 @@ void static
avx_test (void)
{
union256d u;
- double e [4] __attribute__ ((aligned (8))) = {0.0};
+ double e [4] __attribute__ ((aligned (32))) = {0.0};
u.x = _mm256_set_pd (39578.467285, 7856.342941, 85632.783567, 47563.234215);
diff --git a/gcc/testsuite/gcc.target/i386/pr59390.c b/gcc/testsuite/gcc.target/i386/pr59390.c
new file mode 100644
index 00000000000..49ce02d1a21
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr59390.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99 -O3" } */
+
+#include "math.h"
+void fun() __attribute__((target("fma")));
+
+void
+other_fun(double *restrict out, double * restrict a, double * restrict b, double * restrict c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ out[i] = fma(a[i], b[i], c[i]);
+ }
+}
+
+/* { dg-final { scan-assembler-not "vfmadd" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr59390_1.c b/gcc/testsuite/gcc.target/i386/pr59390_1.c
new file mode 100644
index 00000000000..2bd32a4ebdb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr59390_1.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99 -O3" } */
+
+#include "math.h"
+void fun() __attribute__((target("fma")));
+
+__attribute__((target("fma")))
+void
+other_fun(double *restrict out, double * restrict a, double * restrict b, double * restrict c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ out[i] = fma(a[i], b[i], c[i]);
+ }
+}
+
+/* { dg-final { scan-assembler "vfmadd" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr59390_2.c b/gcc/testsuite/gcc.target/i386/pr59390_2.c
new file mode 100644
index 00000000000..55a181a8ad4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr59390_2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99 -O3 -mfma" } */
+
+#include "math.h"
+void fun() __attribute__((target("fma")));
+
+void
+other_fun(double *restrict out, double * restrict a, double * restrict b, double * restrict c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ out[i] = fma(a[i], b[i], c[i]);
+ }
+}
+
+/* { dg-final { scan-assembler "vfmadd" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr59405.c b/gcc/testsuite/gcc.target/i386/pr59405.c
new file mode 100644
index 00000000000..1136e2e4501
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr59405.c
@@ -0,0 +1,24 @@
+/* { dg-do run } */
+/* { dg-options "-mmmx -mfpmath=387" } */
+
+#include "mmx-check.h"
+
+#include <mmintrin.h>
+
+typedef float float32x2_t __attribute__ ((vector_size (8)));
+
+float
+foo32x2_be (float32x2_t x)
+{
+ _mm_empty ();
+ return x[1];
+}
+
+static void
+mmx_test (void)
+{
+ float32x2_t b = { 0.0f, 1.0f };
+
+ if (foo32x2_be (b) != 1.0f)
+ abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/readeflags-1.c b/gcc/testsuite/gcc.target/i386/readeflags-1.c
new file mode 100644
index 00000000000..6b2fa7e8d14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/readeflags-1.c
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-O0" } */
+
+#include <x86intrin.h>
+
+#ifdef __x86_64__
+#define EFLAGS_TYPE unsigned long long int
+#else
+#define EFLAGS_TYPE unsigned int
+#endif
+
+static EFLAGS_TYPE
+readeflags_test (unsigned int a, unsigned int b)
+{
+ unsigned x = (a == b);
+ return __readeflags ();
+}
+
+int
+main ()
+{
+ EFLAGS_TYPE flags;
+
+ flags = readeflags_test (100, 100);
+
+ if ((flags & 1) != 0) /* Read CF */
+ abort ();
+
+ flags = readeflags_test (100, 101);
+
+ if ((flags & 1) == 0) /* Read CF */
+ abort ();
+
+#ifdef DEBUG
+ printf ("PASSED\n");
+#endif
+
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.target/i386/sse2-movapd-1.c b/gcc/testsuite/gcc.target/i386/sse2-movapd-1.c
index b8b9dba0c20..55d9f594f55 100644
--- a/gcc/testsuite/gcc.target/i386/sse2-movapd-1.c
+++ b/gcc/testsuite/gcc.target/i386/sse2-movapd-1.c
@@ -25,7 +25,7 @@ static void
TEST (void)
{
union128d u;
- double e[2] __attribute__ ((aligned (8))) = {2134.3343,1234.635654};
+ double e[2] __attribute__ ((aligned (16))) = {2134.3343,1234.635654};
u.x = test (e);
diff --git a/gcc/testsuite/gcc.target/i386/sse2-movapd-2.c b/gcc/testsuite/gcc.target/i386/sse2-movapd-2.c
index 8298551baf5..87da332779a 100644
--- a/gcc/testsuite/gcc.target/i386/sse2-movapd-2.c
+++ b/gcc/testsuite/gcc.target/i386/sse2-movapd-2.c
@@ -25,7 +25,7 @@ static void
TEST (void)
{
union128d u;
- double e[2] __attribute__ ((aligned (8))) = {0.0};
+ double e[2] __attribute__ ((aligned (16))) = {0.0};
u.x = _mm_set_pd (2134.3343,1234.635654);
diff --git a/gcc/testsuite/gcc.target/i386/writeeflags-1.c b/gcc/testsuite/gcc.target/i386/writeeflags-1.c
new file mode 100644
index 00000000000..446840cb33c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/writeeflags-1.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+/* { dg-options "-O0" } */
+
+#include <x86intrin.h>
+
+#ifdef __x86_64__
+#define EFLAGS_TYPE unsigned long long int
+#else
+#define EFLAGS_TYPE unsigned int
+#endif
+
+int
+main ()
+{
+ EFLAGS_TYPE flags = 0xD7; /* 111010111b */
+
+ __writeeflags (flags);
+
+ flags = __readeflags ();
+
+ if ((flags & 0xFF) != 0xD7)
+ abort ();
+
+#ifdef DEBUG
+ printf ("PASSED\n");
+#endif
+
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.target/ia64/pr52731.c b/gcc/testsuite/gcc.target/ia64/pr52731.c
new file mode 100644
index 00000000000..50ef1d78d75
--- /dev/null
+++ b/gcc/testsuite/gcc.target/ia64/pr52731.c
@@ -0,0 +1,19 @@
+/* { dg-do compile { target ia64-*-* } } */
+/* { dg-options "-O2" } */
+
+char* area;
+long int area_size;
+char* base;
+
+void fun(unsigned long int addr)
+{
+ unsigned long int size32 = (addr + 4096 - 1) & ~(4096 - 1);
+ unsigned long int size = size32 * sizeof(unsigned int);
+
+ if (size > 0) {
+ size = (size + 1) & ~(1);
+ }
+
+ area_size = size;
+ area = base + size;
+}
diff --git a/gcc/testsuite/gcc.target/mips/pr59317.c b/gcc/testsuite/gcc.target/mips/pr59317.c
new file mode 100644
index 00000000000..dd23f7c8649
--- /dev/null
+++ b/gcc/testsuite/gcc.target/mips/pr59317.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-options "-mips16" } */
+extern void abort();
+
+int i_0, i_1, i_2, i_3, i_4, i_5, i_6, i_7, i_8, i_9;
+int j_0, j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9;
+
+int main()
+{
+ register int *x1 = &i_1;
+ register int *x2 = &i_2;
+ register int *x3 = &i_3;
+ register int *x4 = &i_4;
+ register int *x5 = &i_5;
+ register int *x6 = &i_6;
+ register int *x7 = &i_7;
+ register int *x8 = &i_8;
+ register int *x9 = &i_9;
+
+ register int *y0 = &j_0;
+ register int *y1 = &j_1;
+ register int *y2 = &i_2;
+ register int *y3 = &j_3;
+ register int *y4 = &j_4;
+ register int *y5 = &j_5;
+ register int *y6 = &j_6;
+ register int *y7 = &j_7;
+ register int *y8 = &j_8;
+ register int *y9 = &j_9;
+
+ asm volatile ("" : "=r" (x2) : "0" (x2));
+ asm volatile ("" : "=r" (x3) : "0" (x3));
+ asm volatile ("" : "=r" (x4) : "0" (x4));
+ asm volatile ("" : "=r" (x5) : "0" (x5));
+ asm volatile ("" : "=r" (x6) : "0" (x6));
+ asm volatile ("" : "=r" (x7) : "0" (x7));
+ asm volatile ("" : "=r" (x8) : "0" (x8));
+ asm volatile ("" : "=r" (x9) : "0" (x9));
+
+ asm volatile ("" : "=r" (y0) : "0" (y0));
+ asm volatile ("" : "=r" (y1) : "0" (y1));
+ asm volatile ("" : "=r" (y2) : "0" (y2));
+ asm volatile ("" : "=r" (y3) : "0" (y3));
+ asm volatile ("" : "=r" (y4) : "0" (y4));
+ asm volatile ("" : "=r" (y5) : "0" (y5));
+ asm volatile ("" : "=r" (y6) : "0" (y6));
+ asm volatile ("" : "=r" (y7) : "0" (y7));
+ asm volatile ("" : "=r" (y8) : "0" (y8));
+ asm volatile ("" : "=r" (y9) : "0" (y9));
+
+ asm volatile ("" : "=r" (x1) : "0" (x1));
+ asm volatile ("" : "=r" (x2) : "0" (x2));
+ asm volatile ("" : "=r" (x3) : "0" (x3));
+ asm volatile ("" : "=r" (x4) : "0" (x4));
+ asm volatile ("" : "=r" (x5) : "0" (x5));
+ asm volatile ("" : "=r" (x6) : "0" (x6));
+ asm volatile ("" : "=r" (x7) : "0" (x7));
+ asm volatile ("" : "=r" (x8) : "0" (x8));
+ asm volatile ("" : "=r" (x9) : "0" (x9));
+
+ asm volatile ("" : "=r" (y0) : "0" (y0));
+ asm volatile ("" : "=r" (y1) : "0" (y1));
+ asm volatile ("" : "=r" (y2) : "0" (y2));
+ asm volatile ("" : "=r" (y3) : "0" (y3));
+ asm volatile ("" : "=r" (y4) : "0" (y4));
+ asm volatile ("" : "=r" (y5) : "0" (y5));
+ asm volatile ("" : "=r" (y6) : "0" (y6));
+ asm volatile ("" : "=r" (y7) : "0" (y7));
+ asm volatile ("" : "=r" (y8) : "0" (y8));
+ asm volatile ("" : "=r" (y9) : "0" (y9));
+
+ if (y0 != &j_0) abort ();
+ if (y1 != &j_1) abort ();
+ if (y2 != &j_2) abort ();
+ if (y3 != &j_3) abort ();
+ if (y4 != &j_4) abort ();
+ if (y5 != &j_5) abort ();
+ if (y6 != &j_6) abort ();
+ if (y7 != &j_7) abort ();
+ if (y8 != &j_8) abort ();
+ if (y9 != &j_9) abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/sh/pr51697.c b/gcc/testsuite/gcc.target/sh/pr51697.c
new file mode 100644
index 00000000000..d63e329bff9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/sh/pr51697.c
@@ -0,0 +1,21 @@
+/* Check that DImode comparisons are optimized as expected when compiling
+ with -Os. */
+/* { dg-do compile } */
+/* { dg-options "-Os" } */
+/* { dg-skip-if "" { "sh*-*-*" } { "-m5*"} { "" } } */
+/* { dg-final { scan-assembler-times "tst" 2 } } */
+/* { dg-final { scan-assembler-not "cmp" } } */
+
+int
+test_00 (long long* x)
+{
+ /* 1x tst, no cmp/* insns. */
+ return *x & 0xFFFFFFFF ? -20 : -40;
+}
+
+int
+test_01 (unsigned long long x)
+{
+ /* 1x tst, no cmp/* insns. */
+ return x >= 0x100000000LL ? -20 : -40;
+}
diff --git a/gcc/testsuite/gcc.target/sparc/pdistn-2.c b/gcc/testsuite/gcc.target/sparc/pdistn-2.c
new file mode 100644
index 00000000000..008496f9ae7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/sparc/pdistn-2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-mcpu=ultrasparc -mvis3 -O1 -fdump-tree-optimized" } */
+
+typedef unsigned char vec8 __attribute__((vector_size(8)));
+
+#define _(A) (unsigned char)A
+
+long foo () {
+ vec8 a = { _(1), _(2), _(3), _(4), _(5), _(6), _(7), _(255) };
+ vec8 b = { _(2), _(4), _(8), _(16), _(32), _(64), _(128), _(8) };
+ return __builtin_vis_pdistn (a, b);
+}
+
+/* { dg-final { scan-assembler-not "pdistn\t%" } } */
+/* { dg-final { scan-tree-dump "return 473" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.target/sparc/pdistn.c b/gcc/testsuite/gcc.target/sparc/pdistn.c
new file mode 100644
index 00000000000..2f534f70b7d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/sparc/pdistn.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-mcpu=ultrasparc -mvis3" } */
+
+typedef unsigned char vec8 __attribute__((vector_size(8)));
+
+long foo (vec8 a, vec8 b) {
+ return __builtin_vis_pdistn (a, b);
+}
+
+/* { dg-final { scan-assembler-times "pdistn\t%" 1 } } */
diff --git a/gcc/testsuite/gfortran.dg/allocate_with_source_4.f90 b/gcc/testsuite/gfortran.dg/allocate_with_source_4.f90
new file mode 100644
index 00000000000..dcd42a7981a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/allocate_with_source_4.f90
@@ -0,0 +1,12 @@
+! { dg-do compile }
+!
+! PR 58916: [F03] Allocation of scalar with array source not rejected
+!
+! Contributed by Vladimir Fuka <vladimir.fuka@gmail.com>
+
+ class(*), allocatable :: a1
+ real, allocatable :: a2
+ real b(1)
+ allocate(a1, source=b) ! { dg-error "must be scalar or have the same rank" }
+ allocate(a2, source=b) ! { dg-error "must be scalar or have the same rank" }
+end
diff --git a/gcc/testsuite/gfortran.dg/c_by_val_5.f90 b/gcc/testsuite/gfortran.dg/c_by_val_5.f90
index 069d8171175..3a8bc3bf750 100644
--- a/gcc/testsuite/gfortran.dg/c_by_val_5.f90
+++ b/gcc/testsuite/gfortran.dg/c_by_val_5.f90
@@ -23,7 +23,7 @@ module x
! "external" only.
interface
subroutine bmp_write(nx)
- integer :: nx
+ integer, value :: nx
end subroutine bmp_write
end interface
contains
diff --git a/gcc/testsuite/gfortran.dg/class_result_2.f90 b/gcc/testsuite/gfortran.dg/class_result_2.f90
new file mode 100644
index 00000000000..be37a1991da
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_result_2.f90
@@ -0,0 +1,21 @@
+! { dg-do compile }
+!
+! PR 59414: [OOP] Class array pointers: compile error on valid code (Different ranks in pointer assignment)
+!
+! Contributed by Antony Lewis <antony@cosmologist.info>
+
+ implicit none
+
+ Type TObjectList
+ end Type
+
+ Class(TObjectList), pointer :: Arr(:)
+ Arr => ArrayItem()
+
+ contains
+
+ function ArrayItem() result(P)
+ Class(TObjectList), pointer :: P(:)
+ end function
+
+end
diff --git a/gcc/testsuite/gfortran.dg/dummy_procedure_10.f90 b/gcc/testsuite/gfortran.dg/dummy_procedure_10.f90
new file mode 100644
index 00000000000..2720b8f2eb6
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/dummy_procedure_10.f90
@@ -0,0 +1,56 @@
+! { dg-do compile }
+!
+! PR 35831: [F95] Shape mismatch check missing for dummy procedure argument
+!
+! Contributed by Janus Weil <janus@gcc.gnu.org>
+
+program test_attributes
+
+ call tester1 (a1) ! { dg-error "ASYNCHRONOUS mismatch in argument" }
+ call tester2 (a2) ! { dg-error "CONTIGUOUS mismatch in argument" }
+ call tester3 (a1) ! { dg-error "VALUE mismatch in argument" }
+ call tester4 (a1) ! { dg-error "VOLATILE mismatch in argument" }
+
+contains
+
+ subroutine a1(aa)
+ real :: aa
+ end subroutine
+
+ subroutine a2(bb)
+ real :: bb(:)
+ end subroutine
+
+ subroutine tester1 (f1)
+ interface
+ subroutine f1 (a)
+ real, asynchronous :: a
+ end subroutine
+ end interface
+ end subroutine
+
+ subroutine tester2 (f2)
+ interface
+ subroutine f2 (b)
+ real, contiguous :: b(:)
+ end subroutine
+ end interface
+ end subroutine
+
+ subroutine tester3 (f3)
+ interface
+ subroutine f3 (c)
+ real, value :: c
+ end subroutine
+ end interface
+ end subroutine
+
+ subroutine tester4 (f4)
+ interface
+ subroutine f4 (d)
+ real, volatile :: d
+ end subroutine
+ end interface
+ end subroutine
+
+end
diff --git a/gcc/testsuite/gfortran.dg/elemental_subroutine_8.f90 b/gcc/testsuite/gfortran.dg/elemental_subroutine_8.f90
new file mode 100644
index 00000000000..c557d3a9d95
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/elemental_subroutine_8.f90
@@ -0,0 +1,50 @@
+! { dg-do compile }
+!
+! PR fortran/58099
+!
+! See also interpretation request F03-0130 in 09-217 and 10-006T5r1.
+!
+! - ELEMENTAL is only permitted for external names with PROCEDURE/INTERFACE
+! but not for dummy arguments or proc-pointers
+! - Using PROCEDURE with an elemental intrinsic as interface name a is valid,
+! but doesn't make the proc-pointer/dummy argument elemental
+!
+
+ interface
+ elemental real function x(y)
+ real, intent(in) :: y
+ end function x
+ end interface
+ intrinsic :: sin
+ procedure(x) :: xx1 ! OK
+ procedure(x), pointer :: xx2 ! { dg-error "Procedure pointer 'xx2' at .1. shall not be elemental" }
+ procedure(real), pointer :: pp
+ procedure(sin) :: bar ! OK
+ procedure(sin), pointer :: foo ! { dg-error "Procedure pointer 'foo' at .1. shall not be elemental" }
+ pp => sin !OK
+contains
+ subroutine sub1(z) ! { dg-error "Dummy procedure 'z' at .1. shall not be elemental" }
+ procedure(x) :: z
+ end subroutine sub1
+ subroutine sub2(z) ! { dg-error "Procedure pointer 'z' at .1. shall not be elemental" }
+ procedure(x), pointer :: z
+ end subroutine sub2
+ subroutine sub3(z)
+ interface
+ elemental real function z(y) ! { dg-error "Dummy procedure 'z' at .1. shall not be elemental" }
+ real, intent(in) :: y
+ end function z
+ end interface
+ end subroutine sub3
+ subroutine sub4(z)
+ interface
+ elemental real function z(y) ! { dg-error "Procedure pointer 'z' at .1. shall not be elemental" }
+ real, intent(in) :: y
+ end function z
+ end interface
+ pointer :: z
+ end subroutine sub4
+ subroutine sub5(z) ! { dg-error "Dummy procedure 'z' at .1. shall not be elemental" }
+ procedure(sin) :: z
+ end subroutine sub5
+end
diff --git a/gcc/testsuite/gfortran.dg/gomp/pr59467.f90 b/gcc/testsuite/gfortran.dg/gomp/pr59467.f90
new file mode 100644
index 00000000000..e69c9eb49a0
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/pr59467.f90
@@ -0,0 +1,24 @@
+! PR libgomp/59467
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+ FUNCTION t()
+ INTEGER :: a, b, t
+ a = 0
+ b = 0
+ !$OMP PARALLEL REDUCTION(+:b)
+ !$OMP SINGLE ! { dg-error "is not threadprivate or private in outer context" }
+ !$OMP ATOMIC WRITE
+ a = 6
+ !$OMP END SINGLE COPYPRIVATE (a)
+ b = a
+ !$OMP END PARALLEL
+ t = b
+ b = 0
+ !$OMP PARALLEL REDUCTION(+:b)
+ !$OMP SINGLE
+ !$OMP ATOMIC WRITE
+ b = 6
+ !$OMP END SINGLE COPYPRIVATE (b)
+ !$OMP END PARALLEL
+ t = t + b
+ END FUNCTION
diff --git a/gcc/testsuite/gfortran.dg/namelist_83.f90 b/gcc/testsuite/gfortran.dg/namelist_83.f90
new file mode 100644
index 00000000000..f87d4cdf61b
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/namelist_83.f90
@@ -0,0 +1,22 @@
+! { dg-do link }
+! { dg-options "-g" }
+! { dg-additional-sources namelist_83_2.f90 }
+!
+! Note: compilation would be sufficient, but "compile" cannot be combined
+! with dg-additional-sources.
+!
+! PR fortran/59440
+!
+! Contributed by Harald Anlauf
+!
+! Was ICEing during DWARF generation.
+!
+! This is the first file - dg-additional-sources contains the second one
+!
+
+module mo_t_datum
+ implicit none
+ integer :: qbit_conv = 0
+end module mo_t_datum
+
+! { dg-final { cleanup-modules "gfcbug126" } }
diff --git a/gcc/testsuite/gfortran.dg/namelist_83_2.f90 b/gcc/testsuite/gfortran.dg/namelist_83_2.f90
new file mode 100644
index 00000000000..0a0ca6ed34d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/namelist_83_2.f90
@@ -0,0 +1,22 @@
+! { dg-do compile { target { ! *-*-* } } }
+!
+! To be compiled with "-g" via namelist_83.f90
+!
+! PR fortran/59440
+!
+! Contributed by Harald Anlauf
+!
+! Was ICEing during DWARF generation.
+!
+! This is the second file, the module is in namelist_83.f90
+!
+
+!
+MODULE gfcbug126
+ use mo_t_datum, only: qbit_conv
+ implicit none
+ namelist /OBSERVATIONS/ qbit_conv
+end module gfcbug126
+
+! As we have to link, add an empty main program:
+end
diff --git a/gcc/testsuite/gfortran.dg/proc_decl_9.f90 b/gcc/testsuite/gfortran.dg/proc_decl_9.f90
index 58ae321899e..455c27ce986 100644
--- a/gcc/testsuite/gfortran.dg/proc_decl_9.f90
+++ b/gcc/testsuite/gfortran.dg/proc_decl_9.f90
@@ -1,7 +1,7 @@
! { dg-do run }
! PR33162 INTRINSIC functions as ACTUAL argument
! Test case adapted from PR by Jerry DeLisle <jvdelisle@gcc.gnu.org>
-real function t(x)
+elemental real function t(x)
real, intent(in) ::x
t = x
end function
@@ -9,6 +9,6 @@ end function
program p
implicit none
intrinsic sin
- procedure(sin):: t
+ procedure(sin) :: t
if (t(1.0) /= 1.0) call abort
end program
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_11.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_11.f90
index bee73f45213..61921e78ad0 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_11.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_11.f90
@@ -7,16 +7,23 @@
program bsp
implicit none
-
+ intrinsic :: isign, iabs
abstract interface
subroutine up()
end subroutine up
+ ! As intrinsics but not elemental
+ pure integer function isign_interf(a, b)
+ integer, intent(in) :: a, b
+ end function isign_interf
+ pure integer function iabs_interf(x)
+ integer, intent(in) :: x
+ end function iabs_interf
end interface
procedure( up ) , pointer :: pptr
- procedure(isign), pointer :: q
+ procedure(isign_interf), pointer :: q
- procedure(iabs),pointer :: p1
+ procedure(iabs_interf),pointer :: p1
procedure(f), pointer :: p2
pointer :: p3
@@ -48,13 +55,13 @@ program bsp
contains
- function add( a, b )
+ pure function add( a, b )
integer :: add
integer, intent( in ) :: a, b
add = a + b
end function add
- integer function f(x)
+ pure integer function f(x)
integer,intent(in) :: x
f = 317 + x
end function
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_32.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_32.f90
index 9cae65be0d8..9b1ed582bd1 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_32.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_32.f90
@@ -5,8 +5,8 @@
! Contributed by James Van Buskirk
implicit none
- procedure(my_dcos), pointer :: f
- f => my_dcos ! { dg-error "invalid in procedure pointer assignment" }
+ procedure(my_dcos), pointer :: f ! { dg-error "Procedure pointer 'f' at .1. shall not be elemental" }
+ f => my_dcos ! { dg-error "Nonintrinsic elemental procedure 'my_dcos' is invalid in procedure pointer assignment" }
contains
real elemental function my_dcos(x)
real, intent(in) :: x
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_33.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_33.f90
index 973162bf5e0..30014610a01 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_33.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_33.f90
@@ -22,7 +22,7 @@ end module
program start
use funcs
implicit none
- procedure(fun), pointer :: f
+ procedure(fun), pointer :: f ! { dg-error "Procedure pointer 'f' at .1. shall not be elemental" }
real x(3)
x = [1,2,3]
f => my_dcos ! { dg-error "Mismatch in PURE attribute" }
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_result_1.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_result_1.f90
index a7ea21821d7..4a8020e35b8 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_result_1.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_result_1.f90
@@ -171,7 +171,13 @@ contains
end function
function l()
- procedure(iabs),pointer :: l
+ ! we cannot use iabs directly as it is elemental
+ abstract interface
+ pure function interf_iabs(x)
+ integer, intent(in) :: x
+ end function interf_iabs
+ end interface
+ procedure(interf_iabs),pointer :: l
integer :: i
l => iabs
if (l(-11)/=11) call abort()
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_result_4.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_result_4.f90
index 97e67e558ef..5dd67bfe2bb 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_result_4.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_result_4.f90
@@ -8,7 +8,13 @@ contains
function f()
intrinsic :: sin
- procedure(sin), pointer :: f
+ abstract interface
+ pure real function sin_interf(x)
+ real, intent(in) :: x
+ end function sin_interf
+ end interface
+ ! We cannot use "sin" directly as it is ELEMENTAL
+ procedure(sin_interf), pointer :: f
f => sin
end function f
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_result_7.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_result_7.f90
index 1d810c6b5fa..b77e40b7b69 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_result_7.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_result_7.f90
@@ -9,7 +9,14 @@ type :: t
end type
type(t) :: x
-procedure(iabs), pointer :: pp
+
+! We cannot use "iabs" directly as it is elemental.
+abstract interface
+ pure integer function interf_iabs(x)
+ integer, intent(in) :: x
+ end function interf_iabs
+end interface
+procedure(interf_iabs), pointer :: pp
x%p => a
@@ -20,7 +27,7 @@ if (pp(-3) /= 3) call abort
contains
function a() result (b)
- procedure(iabs), pointer :: b
+ procedure(interf_iabs), pointer :: b
b => iabs
end function
diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_result_8.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_result_8.f90
index 17812bc4422..be23f5196cd 100644
--- a/gcc/testsuite/gfortran.dg/proc_ptr_result_8.f90
+++ b/gcc/testsuite/gfortran.dg/proc_ptr_result_8.f90
@@ -26,7 +26,14 @@ type :: t
end type
type(t) :: x
-procedure(iabs), pointer :: pp
+! We cannot use iabs directly as it is elemental
+abstract interface
+ integer pure function interf_iabs(x)
+ integer, intent(in) :: x
+ end function interf_iabs
+end interface
+
+procedure(interf_iabs), pointer :: pp
procedure(foo), pointer :: pp1
x%p => a ! ok
@@ -47,7 +54,7 @@ contains
function a (c) result (b)
integer, intent(in) :: c
- procedure(iabs), pointer :: b
+ procedure(interf_iabs), pointer :: b
if (c .eq. 1) then
b => iabs
else
@@ -55,7 +62,7 @@ contains
end if
end function
- integer function foo (arg)
+ pure integer function foo (arg)
integer, intent (in) :: arg
foo = -iabs(arg)
end function
diff --git a/gcc/testsuite/gnat.dg/misaligned_volatile.adb b/gcc/testsuite/gnat.dg/misaligned_volatile.adb
new file mode 100644
index 00000000000..c76975b3ecd
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/misaligned_volatile.adb
@@ -0,0 +1,28 @@
+-- { dg-do run }
+-- { dg-options "-gnatp -fstrict-volatile-bitfields" }
+
+procedure Misaligned_Volatile is
+
+ type Byte is mod 2**8;
+
+ type Block is record
+ B : Boolean;
+ V : Byte;
+ end record;
+ pragma Volatile (Block);
+ pragma Pack (Block);
+ for Block'Alignment use 1;
+
+ type Pair is array (1 .. 2) of Block;
+
+ P : Pair;
+begin
+ for K in P'Range loop
+ P(K).V := 237;
+ end loop;
+ for K in P'Range loop
+ if P(K).V /= 237 then
+ raise Program_error;
+ end if;
+ end loop;
+end;
diff --git a/gcc/testsuite/gnat.dg/pack19.adb b/gcc/testsuite/gnat.dg/pack19.adb
new file mode 100644
index 00000000000..601039ac0ff
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/pack19.adb
@@ -0,0 +1,56 @@
+-- { dg-do run }
+
+procedure Pack19 is
+
+ subtype Always_False is Boolean range False .. False;
+
+ type Rec1 is record
+ B1 : Boolean;
+ B2 : Boolean;
+ B3 : Boolean;
+ B4 : Boolean;
+ B5 : Boolean;
+ B6 : Boolean;
+ B7 : Always_False;
+ B8 : Boolean;
+ end record;
+ pragma Pack (Rec1);
+
+ subtype Always_True is Boolean range True .. True;
+
+ type Rec2 is record
+ B1 : Boolean;
+ B2 : Boolean;
+ B3 : Boolean;
+ B4 : Boolean;
+ B5 : Boolean;
+ B6 : Boolean;
+ B7 : Always_True;
+ B8 : Boolean;
+ end record;
+ pragma Pack (Rec2);
+
+ R1 : Rec1 := (True, True, True, True, True, True, False, False);
+ R2 : Rec2 := (False, False, False, False, False, False, True, True);
+
+begin
+ R1.B8 := True;
+ if R1.B7 /= False then
+ raise Program_Error;
+ end if;
+
+ R1.B7 := False;
+ if R1.B7 /= False then
+ raise Program_Error;
+ end if;
+
+ R2.B8 := False;
+ if R2.B7 /= True then
+ raise Program_Error;
+ end if;
+
+ R2.B7 := True;
+ if R2.B7 /= True then
+ raise Program_Error;
+ end if;
+end;
diff --git a/gcc/testsuite/go.test/test/bench/shootout/timing.log b/gcc/testsuite/go.test/test/bench/shootout/timing.log
index ee1f889b4a5..4e7d17a11b9 100644
--- a/gcc/testsuite/go.test/test/bench/shootout/timing.log
+++ b/gcc/testsuite/go.test/test/bench/shootout/timing.log
@@ -1161,3 +1161,94 @@ chameneos 6000000
gccgo -O2 chameneosredux.go 11.28u 6.68s 18.00r
gc chameneosredux 6.94u 0.00s 6.96r
+# May 23, 2013
+# Go 1.1, which includes precise GC, new scheduler, faster maps.
+# 20%-ish speedups across many benchmarks.
+# gccgo showing significant improvement (even though it's not yet up to Go 1.1)
+#
+# Standouts:
+# fannkuch, regex-dna, k-nucleotide, threadring, chameneos
+
+fasta -n 25000000
+ gcc -m64 -O2 fasta.c 1.54u 0.01s 1.55r
+ gccgo -O2 fasta.go 1.42u 0.00s 1.43r
+ gc fasta 1.50u 0.01s 1.52r # -16%
+ gc_B fasta 1.46u 0.00s 1.46r # -17%
+
+reverse-complement < output-of-fasta-25000000
+ gcc -m64 -O2 reverse-complement.c 0.87u 0.37s 4.36r
+ gccgo -O2 reverse-complement.go 0.77u 0.15s 0.93r # -15%
+ gc reverse-complement 0.99u 0.12s 1.12r # -15%
+ gc_B reverse-complement 0.85u 0.17s 1.02r # -21%
+
+nbody -n 50000000
+ gcc -m64 -O2 nbody.c -lm 13.50u 0.00s 13.53r
+ gccgo -O2 nbody.go 13.98u 0.01s 14.02r
+ gc nbody 16.63u 0.01s 16.67r
+ gc_B nbody 15.74u 0.00s 15.76r
+
+binary-tree 15 # too slow to use 20
+ gcc -m64 -O2 binary-tree.c -lm 0.61u 0.00s 0.61r
+ gccgo -O2 binary-tree.go 1.11u 0.01s 1.12r # -13%
+ gccgo -O2 binary-tree-freelist.go 0.22u 0.01s 0.23r
+ gc binary-tree 1.83u 0.02s 1.83r # -7%
+ gc binary-tree-freelist 0.32u 0.00s 0.32r
+
+fannkuch 12
+ gcc -m64 -O2 fannkuch.c 45.56u 0.00s 45.67r
+ gccgo -O2 fannkuch.go 57.71u 0.00s 57.85r # -4%
+ gccgo -O2 fannkuch-parallel.go 146.31u 0.00s 37.50r #-37%
+ gc fannkuch 70.06u 0.03s 70.17r # -3%
+ gc fannkuch-parallel 131.88u 0.06s 33.59r # -23%
+ gc_B fannkuch 45.55u 0.02s 45.63r # -15%
+
+regex-dna 100000
+ gcc -m64 -O2 regex-dna.c -lpcre 0.44u 0.01s 0.45r
+ gccgo -O2 regex-dna.go 5.59u 0.00s 5.61r # -14%
+ gccgo -O2 regex-dna-parallel.go 10.85u 0.30s 3.34r # -24%
+ gc regex-dna 2.23u 0.01s 2.25r # -43%
+ gc regex-dna-parallel 2.35u 0.00s 0.93r # -40%
+ gc_B regex-dna 2.24u 0.01s 2.25r # -43%
+
+spectral-norm 5500
+ gcc -m64 -O2 spectral-norm.c -lm 14.84u 0.00s 14.88r
+ gccgo -O2 spectral-norm.go 15.33u 0.00s 15.37r
+ gc spectral-norm 16.75u 0.02s 16.79r # -15%
+ gc_B spectral-norm 16.77u 0.01s 16.79r # -15%
+
+k-nucleotide 1000000
+ gcc -O2 k-nucleotide.c -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include -lglib-2.0 4.50u 0.00s 4.52r
+ gccgo -O2 k-nucleotide.go 3.72u 0.04s 3.77r # -21%
+ gccgo -O2 k-nucleotide-parallel.go 3.88u 0.03s 1.42r # -35%
+ gc k-nucleotide 6.32u 0.01s 6.33r # -31%
+ gc k-nucleotide-parallel 6.47u 0.05s 2.13r # -33%
+ gc_B k-nucleotide 6.45u 0.01s 6.47r # - 28%
+
+mandelbrot 16000
+ gcc -m64 -O2 mandelbrot.c 36.03u 0.00s 36.11r
+ gccgo -O2 mandelbrot.go 37.61u 0.00s 37.74r # -14%
+ gc mandelbrot 38.19u 0.05s 38.29r
+ gc_B mandelbrot 38.19u 0.03s 38.26r
+
+meteor 2098
+ gcc -m64 -O2 meteor-contest.c 0.08u 0.00s 0.08r
+ gccgo -O2 meteor-contest.go 0.09u 0.01s 0.10r
+ gc meteor-contest 0.12u 0.00s 0.12r # -15% although perhaps just noise
+ gc_B meteor-contest 0.11u 0.00s 0.12r # -8% although perhaps just noise
+
+pidigits 10000
+ gcc -m64 -O2 pidigits.c -lgmp 2.27u 0.00s 2.28r
+ gccgo -O2 pidigits.go 8.95u 0.02s 8.99r
+ gc pidigits 2.88u 0.14s 2.91r
+ gc_B pidigits 2.92u 0.10s 2.91r
+
+threadring 50000000
+ gcc -m64 -O2 threadring.c -lpthread 14.75u 167.88s 212.23r
+ gccgo -O2 threadring.go 36.72u 12.08s 48.91r # -29%
+ gc threadring 10.93u 0.01s 10.95r # -16%
+
+chameneos 6000000
+ gcc -m64 -O2 chameneosredux.c -lpthread 8.89u 56.62s 9.75r
+ gccgo -O2 chameneosredux.go 9.48u 2.48s 11.99r # -33%
+ gc chameneosredux 5.80u 0.00s 5.81r # -16%
+
diff --git a/gcc/testsuite/go.test/test/blank.go b/gcc/testsuite/go.test/test/blank.go
index 7f7d9f6f7fb..0539debb1f2 100644
--- a/gcc/testsuite/go.test/test/blank.go
+++ b/gcc/testsuite/go.test/test/blank.go
@@ -27,6 +27,10 @@ func (T) _() {
func (T) _() {
}
+type U struct {
+ _ struct{ a, b, c int }
+}
+
const (
c0 = iota
_
@@ -107,8 +111,7 @@ func main() {
panic(sum)
}
- // exp/ssa/interp doesn't yet skip blank fields in struct
- // equivalence. It also cannot support unsafe.Pointer.
+ // go.tools/ssa/interp cannot support unsafe.Pointer.
if os.Getenv("GOSSAINTERP") == "" {
type T1 struct{ x, y, z int }
t1 := *(*T)(unsafe.Pointer(&T1{1, 2, 3}))
@@ -116,6 +119,13 @@ func main() {
if t1 != t2 {
panic("T{} != T{}")
}
+
+ var u1, u2 interface{}
+ u1 = *(*U)(unsafe.Pointer(&T1{1, 2, 3}))
+ u2 = *(*U)(unsafe.Pointer(&T1{4, 5, 6}))
+ if u1 != u2 {
+ panic("U{} != U{}")
+ }
}
h(a, b)
diff --git a/gcc/testsuite/go.test/test/blank1.go b/gcc/testsuite/go.test/test/blank1.go
index f7e98b44168..54a72976b77 100644
--- a/gcc/testsuite/go.test/test/blank1.go
+++ b/gcc/testsuite/go.test/test/blank1.go
@@ -13,9 +13,16 @@ var t struct {
_ int
}
+type T struct {
+ _ []int
+}
+
func main() {
_() // ERROR "cannot use _ as value"
x := _+1 // ERROR "cannot use _ as value"
_ = x
_ = t._ // ERROR "cannot refer to blank field|invalid use of"
+
+ var v1, v2 T
+ _ = v1 == v2 // ERROR "cannot be compared|non-comparable"
}
diff --git a/gcc/testsuite/go.test/test/chan/doubleselect.go b/gcc/testsuite/go.test/test/chan/doubleselect.go
index ac559302d9b..6be3faf55a9 100644
--- a/gcc/testsuite/go.test/test/chan/doubleselect.go
+++ b/gcc/testsuite/go.test/test/chan/doubleselect.go
@@ -36,7 +36,7 @@ func sender(n int, c1, c2, c3, c4 chan<- int) {
}
// mux receives the values from sender and forwards them onto another channel.
-// It would be simplier to just have sender's four cases all be the same
+// It would be simpler to just have sender's four cases all be the same
// channel, but this doesn't actually trigger the bug.
func mux(out chan<- int, in <-chan int, done chan<- bool) {
for v := range in {
diff --git a/gcc/testsuite/go.test/test/chan/select2.go b/gcc/testsuite/go.test/test/chan/select2.go
index 4a081391261..ccf9dab81bc 100644
--- a/gcc/testsuite/go.test/test/chan/select2.go
+++ b/gcc/testsuite/go.test/test/chan/select2.go
@@ -47,7 +47,8 @@ func main() {
runtime.GC()
runtime.ReadMemStats(memstats)
- if memstats.Alloc-alloc > 1.1e5 {
+ // Be careful to avoid wraparound.
+ if memstats.Alloc > alloc && memstats.Alloc-alloc > 1.1e5 {
println("BUG: too much memory for 100,000 selects:", memstats.Alloc-alloc)
}
}
diff --git a/gcc/testsuite/go.test/test/cmp.go b/gcc/testsuite/go.test/test/cmp.go
index 5be64561d59..73de502f39f 100644
--- a/gcc/testsuite/go.test/test/cmp.go
+++ b/gcc/testsuite/go.test/test/cmp.go
@@ -43,8 +43,8 @@ func main() {
var d string = "hel" // try to get different pointer
d = d + "lo"
- // exp/ssa/interp can't handle unsafe.Pointer.
- if os.Getenv("GOSSAINTERP") != "" {
+ // go.tools/ssa/interp can't handle unsafe.Pointer.
+ if os.Getenv("GOSSAINTERP") == "" {
if stringptr(c) == stringptr(d) {
panic("compiler too smart -- got same string")
}
@@ -296,7 +296,7 @@ func main() {
{
var x = struct {
x int
- _ []int
+ _ string
y float64
_ float64
z int
diff --git a/gcc/testsuite/go.test/test/cmp6.go b/gcc/testsuite/go.test/test/cmp6.go
index 7d99aae18b3..839c274bcca 100644
--- a/gcc/testsuite/go.test/test/cmp6.go
+++ b/gcc/testsuite/go.test/test/cmp6.go
@@ -53,7 +53,7 @@ func main() {
// Comparison of structs should have a good message
use(t3 == t3) // ERROR "struct|expected"
- use(t4 == t4) // ok; the []int is a blank field
+ use(t4 == t4) // ERROR "cannot be compared|non-comparable"
// Slices, functions, and maps too.
var x []int
diff --git a/gcc/testsuite/go.test/test/deferfin.go b/gcc/testsuite/go.test/test/deferfin.go
new file mode 100644
index 00000000000..fa5a93354dc
--- /dev/null
+++ b/gcc/testsuite/go.test/test/deferfin.go
@@ -0,0 +1,63 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that defers do not prevent garbage collection.
+
+package main
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+var sink func()
+
+func main() {
+ // Does not work on 32-bits due to partially conservative GC.
+ // Try to enable when we have fully precise GC.
+ if runtime.GOARCH != "amd64" {
+ return
+ }
+ // Likewise for gccgo.
+ if runtime.Compiler == "gccgo" {
+ return
+ }
+ N := 10
+ count := int32(N)
+ var wg sync.WaitGroup
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go func() {
+ defer wg.Done()
+ v := new(int)
+ f := func() {
+ if *v != 0 {
+ panic("oops")
+ }
+ }
+ if *v != 0 {
+ // let the compiler think f escapes
+ sink = f
+ }
+ runtime.SetFinalizer(v, func(p *int) {
+ atomic.AddInt32(&count, -1)
+ })
+ defer f()
+ }()
+ }
+ wg.Wait()
+ for i := 0; i < 3; i++ {
+ time.Sleep(10 * time.Millisecond)
+ runtime.GC()
+ }
+ if count != 0 {
+ println(count, "out of", N, "finalizer are not called")
+ panic("not all finalizers are called")
+ }
+}
+
diff --git a/gcc/testsuite/go.test/test/divmod.go b/gcc/testsuite/go.test/test/divmod.go
new file mode 100644
index 00000000000..49fed0222c6
--- /dev/null
+++ b/gcc/testsuite/go.test/test/divmod.go
@@ -0,0 +1,460 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test division of variables. Generate many test cases,
+// compute correct answer using shift and subtract,
+// and then compare against results from divison and
+// modulus operators.
+//
+// Primarily useful for testing software div/mod.
+
+package main
+
+const long = false
+
+func main() {
+ if long {
+ // About 3e9 test cases (calls to checkdiv3).
+ // Too long for everyday testing.
+ gen2(3, 64, 2, 64, checkdiv1)
+ println(ntest)
+ } else {
+ // About 4e6 test cases (calls to checkdiv3).
+ // Runs for 8 seconds on ARM chromebook, much faster elsewhere.
+ gen2(2, 64, 1, 64, checkdiv1)
+ }
+}
+
+// generate all uint64 values x where x has at most n bits set in the low w
+// and call f(x) for each.
+func gen1(n, w int, f func(uint64)) {
+ gen(0, 0, n, w-1, f)
+}
+
+func gen(val uint64, nbits, maxbits, pos int, f func(uint64)) {
+ if pos < 0 {
+ f(val)
+ return
+ }
+ gen(val, nbits, maxbits, pos-1, f)
+ if nbits < maxbits {
+ gen(val|1<<uint(pos), nbits+1, maxbits, pos-1, f)
+ }
+}
+
+// generate all uint64 values x, y where x has at most n1 bits set in the low w1
+// and y has at most n2 bits set in the low w2 and call f(x, y) for each.
+func gen2(n1, w1, n2, w2 int, f func(uint64, uint64)) {
+ gen1(n1, w1, func(x uint64) {
+ gen1(n2, w2, func(y uint64) {
+ f(x, y)
+ })
+ })
+}
+
+// x and y are uint64s with at most 2 bits set.
+// Check those values and values above and below,
+// along with bitwise inversions of the same (done in checkdiv2).
+func checkdiv1(x, y uint64) {
+ checkdiv2(x, y)
+ // If the low bit is set in x or y, adding or subtracting 1
+ // produces a number that checkdiv1 is going to be called
+ // with anyway, so don't duplicate effort.
+ if x&1 == 0 {
+ checkdiv2(x+1, y)
+ checkdiv2(x-1, y)
+ }
+ if y&1 == 0 {
+ checkdiv2(x, y-1)
+ checkdiv2(x, y+1)
+ if x&1 == 0 {
+ checkdiv2(x+1, y-1)
+ checkdiv2(x-1, y-1)
+ checkdiv2(x-1, y+1)
+ checkdiv2(x+1, y+1)
+ }
+ }
+}
+
+func checkdiv2(x, y uint64) {
+ checkdiv3(x, y)
+ checkdiv3(^x, y)
+ checkdiv3(x, ^y)
+ checkdiv3(^x, ^y)
+}
+
+var ntest int64 = 0
+
+func checkdiv3(x, y uint64) {
+ ntest++
+ if ntest&(ntest-1) == 0 && long {
+ println(ntest, "...")
+ }
+ checkuint64(x, y)
+ if (uint64(uint32(x)) == x || uint64(uint32(^x)) == ^x) && (uint64(uint32(y)) == y || uint64(uint32(^y)) == ^y) {
+ checkuint32(uint32(x), uint32(y))
+ }
+ if (uint64(uint16(x)) == x || uint64(uint16(^x)) == ^x) && (uint64(uint16(y)) == y || uint64(uint16(^y)) == ^y) {
+ checkuint16(uint16(x), uint16(y))
+ }
+ if (uint64(uint8(x)) == x || uint64(uint8(^x)) == ^x) && (uint64(uint8(y)) == y || uint64(uint8(^y)) == ^y) {
+ checkuint8(uint8(x), uint8(y))
+ }
+
+
+ sx := int64(x)
+ sy := int64(y)
+ checkint64(sx, sy)
+ if (int64(int32(sx)) == sx || int64(int32(^sx)) == ^sx) && (int64(int32(sy)) == sy || int64(int32(^sy)) == ^sy) {
+ checkint32(int32(sx), int32(sy))
+ }
+ if (int64(int16(sx)) == sx || int64(int16(^sx)) == ^sx) && (int64(int16(sy)) == sy || int64(int16(^sy)) == ^sy) {
+ checkint16(int16(sx), int16(sy))
+ }
+ if (int64(int8(sx)) == sx || int64(int8(^sx)) == ^sx) && (int64(int8(sy)) == sy || int64(int8(^sy)) == ^sy) {
+ checkint8(int8(sx), int8(sy))
+ }
+}
+
+// Check result of x/y, x%y for various types.
+
+func checkuint(x, y uint) {
+ if y == 0 {
+ divzerouint(x, y)
+ modzerouint(x, y)
+ return
+ }
+ q, r := udiv(uint64(x), uint64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != uint(q) {
+ print("uint(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != uint(r) {
+ print("uint(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkuint64(x, y uint64) {
+ if y == 0 {
+ divzerouint64(x, y)
+ modzerouint64(x, y)
+ return
+ }
+ q, r := udiv(x, y)
+ q1 := x/y
+ r1 := x%y
+ if q1 != q {
+ print("uint64(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != r {
+ print("uint64(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkuint32(x, y uint32) {
+ if y == 0 {
+ divzerouint32(x, y)
+ modzerouint32(x, y)
+ return
+ }
+ q, r := udiv(uint64(x), uint64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != uint32(q) {
+ print("uint32(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != uint32(r) {
+ print("uint32(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkuint16(x, y uint16) {
+ if y == 0 {
+ divzerouint16(x, y)
+ modzerouint16(x, y)
+ return
+ }
+ q, r := udiv(uint64(x), uint64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != uint16(q) {
+ print("uint16(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != uint16(r) {
+ print("uint16(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkuint8(x, y uint8) {
+ if y == 0 {
+ divzerouint8(x, y)
+ modzerouint8(x, y)
+ return
+ }
+ q, r := udiv(uint64(x), uint64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != uint8(q) {
+ print("uint8(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != uint8(r) {
+ print("uint8(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkint(x, y int) {
+ if y == 0 {
+ divzeroint(x, y)
+ modzeroint(x, y)
+ return
+ }
+ q, r := idiv(int64(x), int64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != int(q) {
+ print("int(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != int(r) {
+ print("int(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkint64(x, y int64) {
+ if y == 0 {
+ divzeroint64(x, y)
+ modzeroint64(x, y)
+ return
+ }
+ q, r := idiv(x, y)
+ q1 := x/y
+ r1 := x%y
+ if q1 != q {
+ print("int64(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != r {
+ print("int64(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkint32(x, y int32) {
+ if y == 0 {
+ divzeroint32(x, y)
+ modzeroint32(x, y)
+ return
+ }
+ q, r := idiv(int64(x), int64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != int32(q) {
+ print("int32(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != int32(r) {
+ print("int32(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkint16(x, y int16) {
+ if y == 0 {
+ divzeroint16(x, y)
+ modzeroint16(x, y)
+ return
+ }
+ q, r := idiv(int64(x), int64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != int16(q) {
+ print("int16(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != int16(r) {
+ print("int16(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func checkint8(x, y int8) {
+ if y == 0 {
+ divzeroint8(x, y)
+ modzeroint8(x, y)
+ return
+ }
+ q, r := idiv(int64(x), int64(y))
+ q1 := x/y
+ r1 := x%y
+ if q1 != int8(q) {
+ print("int8(", x, "/", y, ") = ", q1, ", want ", q, "\n")
+ }
+ if r1 != int8(r) {
+ print("int8(", x, "%", y, ") = ", r1, ", want ", r, "\n")
+ }
+}
+
+func divzerouint(x, y uint) uint {
+ defer checkudivzero("uint", uint64(x))
+ return x / y
+}
+
+func divzerouint64(x, y uint64) uint64 {
+ defer checkudivzero("uint64", uint64(x))
+ return x / y
+}
+
+func divzerouint32(x, y uint32) uint32 {
+ defer checkudivzero("uint32", uint64(x))
+ return x / y
+}
+
+func divzerouint16(x, y uint16) uint16 {
+ defer checkudivzero("uint16", uint64(x))
+ return x / y
+}
+
+func divzerouint8(x, y uint8) uint8 {
+ defer checkudivzero("uint8", uint64(x))
+ return x / y
+}
+
+func checkudivzero(typ string, x uint64) {
+ if recover() == nil {
+ print(typ, "(", x, " / 0) did not panic")
+ }
+}
+
+func divzeroint(x, y int) int {
+ defer checkdivzero("int", int64(x))
+ return x / y
+}
+
+func divzeroint64(x, y int64) int64 {
+ defer checkdivzero("int64", int64(x))
+ return x / y
+}
+
+func divzeroint32(x, y int32) int32 {
+ defer checkdivzero("int32", int64(x))
+ return x / y
+}
+
+func divzeroint16(x, y int16) int16 {
+ defer checkdivzero("int16", int64(x))
+ return x / y
+}
+
+func divzeroint8(x, y int8) int8 {
+ defer checkdivzero("int8", int64(x))
+ return x / y
+}
+
+func checkdivzero(typ string, x int64) {
+ if recover() == nil {
+ print(typ, "(", x, " / 0) did not panic")
+ }
+}
+
+func modzerouint(x, y uint) uint {
+ defer checkumodzero("uint", uint64(x))
+ return x % y
+}
+
+func modzerouint64(x, y uint64) uint64 {
+ defer checkumodzero("uint64", uint64(x))
+ return x % y
+}
+
+func modzerouint32(x, y uint32) uint32 {
+ defer checkumodzero("uint32", uint64(x))
+ return x % y
+}
+
+func modzerouint16(x, y uint16) uint16 {
+ defer checkumodzero("uint16", uint64(x))
+ return x % y
+}
+
+func modzerouint8(x, y uint8) uint8 {
+ defer checkumodzero("uint8", uint64(x))
+ return x % y
+}
+
+func checkumodzero(typ string, x uint64) {
+ if recover() == nil {
+ print(typ, "(", x, " % 0) did not panic")
+ }
+}
+
+func modzeroint(x, y int) int {
+ defer checkmodzero("int", int64(x))
+ return x % y
+}
+
+func modzeroint64(x, y int64) int64 {
+ defer checkmodzero("int64", int64(x))
+ return x % y
+}
+
+func modzeroint32(x, y int32) int32 {
+ defer checkmodzero("int32", int64(x))
+ return x % y
+}
+
+func modzeroint16(x, y int16) int16 {
+ defer checkmodzero("int16", int64(x))
+ return x % y
+}
+
+func modzeroint8(x, y int8) int8 {
+ defer checkmodzero("int8", int64(x))
+ return x % y
+}
+
+func checkmodzero(typ string, x int64) {
+ if recover() == nil {
+ print(typ, "(", x, " % 0) did not panic")
+ }
+}
+
+// unsigned divide and mod using shift and subtract.
+func udiv(x, y uint64) (q, r uint64) {
+ sh := 0
+ for y+y > y && y+y <= x {
+ sh++
+ y <<= 1
+ }
+ for ; sh >= 0; sh-- {
+ q <<= 1
+ if x >= y {
+ x -= y
+ q |= 1
+ }
+ y >>= 1
+ }
+ return q, x
+}
+
+// signed divide and mod: do unsigned and adjust signs.
+func idiv(x, y int64) (q, r int64) {
+ // special case for minint / -1 = minint
+ if x-1 > x && y == -1 {
+ return x, 0
+ }
+ ux := uint64(x)
+ uy := uint64(y)
+ if x < 0 {
+ ux = -ux
+ }
+ if y < 0 {
+ uy = -uy
+ }
+ uq, ur := udiv(ux, uy)
+ q = int64(uq)
+ r = int64(ur)
+ if x < 0 {
+ r = -r
+ }
+ if (x < 0) != (y < 0) {
+ q = -q
+ }
+ return q, r
+}
diff --git a/gcc/testsuite/go.test/test/errchk b/gcc/testsuite/go.test/test/errchk
index b8b312a9237..de0c4fd2f87 100755
--- a/gcc/testsuite/go.test/test/errchk
+++ b/gcc/testsuite/go.test/test/errchk
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
diff --git a/gcc/testsuite/go.test/test/escape2.go b/gcc/testsuite/go.test/test/escape2.go
index 511b74a1cca..be89c2d8408 100644
--- a/gcc/testsuite/go.test/test/escape2.go
+++ b/gcc/testsuite/go.test/test/escape2.go
@@ -1136,6 +1136,7 @@ func foo126() {
px = &i // ERROR "&i escapes"
}()
}
+ _ = px
}
var px *int
@@ -1325,3 +1326,34 @@ func foo142() {
t := new(Tm) // ERROR "escapes to heap"
gf = t.M // ERROR "t.M escapes to heap"
}
+
+// issue 3888.
+func foo143() {
+ for i := 0; i < 1000; i++ {
+ func() { // ERROR "func literal does not escape"
+ for i := 0; i < 1; i++ {
+ var t Tm
+ t.M() // ERROR "t does not escape"
+ }
+ }()
+ }
+}
+
+// issue 5773
+// Check that annotations take effect regardless of whether they
+// are before or after the use in the source code.
+
+//go:noescape
+
+func foo144a(*int)
+
+func foo144() {
+ var x int
+ foo144a(&x) // ERROR "&x does not escape"
+ var y int
+ foo144b(&y) // ERROR "&y does not escape"
+}
+
+//go:noescape
+
+func foo144b(*int)
diff --git a/gcc/testsuite/go.test/test/escape5.go b/gcc/testsuite/go.test/test/escape5.go
index 6b327fe9e31..c9646872d51 100644
--- a/gcc/testsuite/go.test/test/escape5.go
+++ b/gcc/testsuite/go.test/test/escape5.go
@@ -142,3 +142,10 @@ func f9() {
var j T1 // ERROR "moved to heap: j"
f8(&j) // ERROR "&j escapes to heap"
}
+
+func f10() {
+ // These don't escape but are too big for the stack
+ var x [1<<30]byte // ERROR "moved to heap: x"
+ var y = make([]byte, 1<<30) // ERROR "does not escape"
+ _ = x[0] + y[0]
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/a.go
index b87ad6f4fee..139a8a3a230 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/a.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/a.go
@@ -4,8 +4,10 @@
package a
+var A int
+
func init() {
- println("a");
+ A = 1
}
type T int;
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/b.go
index 3e780ac0dd8..36770f6fc99 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/b.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/b.go
@@ -4,8 +4,10 @@
package b
+var B int
+
func init() {
- println("b");
+ B = 2
}
type V int;
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/main.go b/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/main.go
index 995134ccfcb..2d24dd12d52 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/main.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug191.dir/main.go
@@ -11,4 +11,7 @@ var _ T
var _ V
func main() {
+ if A != 1 || B != 2 {
+ panic("wrong vars")
+ }
}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug295.go b/gcc/testsuite/go.test/test/fixedbugs/bug295.go
index e2e5206ca14..63a12a3a741 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug295.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug295.go
@@ -6,7 +6,9 @@
package main
-import . "testing" // defines top-level T
+import . "testing" // defines file-level T
+
+type _ B // make use of package "testing" (but don't refer to T)
type S struct {
T int
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug385_64.go b/gcc/testsuite/go.test/test/fixedbugs/bug385_64.go
index b593cd53cf1..6789c0abf0f 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug385_64.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug385_64.go
@@ -10,13 +10,214 @@
package main
-func main() { // GC_ERROR "stack frame too large"
- var arr [1000200030]int32
- arr_bkup := arr
- _ = arr_bkup
-}
+var z [10<<20]byte
-func F() { // GC_ERROR "stack frame too large"
- var arr [1 << 30]int32
- _ = arr[42]
+func main() { // GC_ERROR "stack frame too large"
+ // seq 1 206 | sed 's/.*/ var x& [10<<20]byte; z = x&/'
+ var x1 [10<<20]byte; z = x1
+ var x2 [10<<20]byte; z = x2
+ var x3 [10<<20]byte; z = x3
+ var x4 [10<<20]byte; z = x4
+ var x5 [10<<20]byte; z = x5
+ var x6 [10<<20]byte; z = x6
+ var x7 [10<<20]byte; z = x7
+ var x8 [10<<20]byte; z = x8
+ var x9 [10<<20]byte; z = x9
+ var x10 [10<<20]byte; z = x10
+ var x11 [10<<20]byte; z = x11
+ var x12 [10<<20]byte; z = x12
+ var x13 [10<<20]byte; z = x13
+ var x14 [10<<20]byte; z = x14
+ var x15 [10<<20]byte; z = x15
+ var x16 [10<<20]byte; z = x16
+ var x17 [10<<20]byte; z = x17
+ var x18 [10<<20]byte; z = x18
+ var x19 [10<<20]byte; z = x19
+ var x20 [10<<20]byte; z = x20
+ var x21 [10<<20]byte; z = x21
+ var x22 [10<<20]byte; z = x22
+ var x23 [10<<20]byte; z = x23
+ var x24 [10<<20]byte; z = x24
+ var x25 [10<<20]byte; z = x25
+ var x26 [10<<20]byte; z = x26
+ var x27 [10<<20]byte; z = x27
+ var x28 [10<<20]byte; z = x28
+ var x29 [10<<20]byte; z = x29
+ var x30 [10<<20]byte; z = x30
+ var x31 [10<<20]byte; z = x31
+ var x32 [10<<20]byte; z = x32
+ var x33 [10<<20]byte; z = x33
+ var x34 [10<<20]byte; z = x34
+ var x35 [10<<20]byte; z = x35
+ var x36 [10<<20]byte; z = x36
+ var x37 [10<<20]byte; z = x37
+ var x38 [10<<20]byte; z = x38
+ var x39 [10<<20]byte; z = x39
+ var x40 [10<<20]byte; z = x40
+ var x41 [10<<20]byte; z = x41
+ var x42 [10<<20]byte; z = x42
+ var x43 [10<<20]byte; z = x43
+ var x44 [10<<20]byte; z = x44
+ var x45 [10<<20]byte; z = x45
+ var x46 [10<<20]byte; z = x46
+ var x47 [10<<20]byte; z = x47
+ var x48 [10<<20]byte; z = x48
+ var x49 [10<<20]byte; z = x49
+ var x50 [10<<20]byte; z = x50
+ var x51 [10<<20]byte; z = x51
+ var x52 [10<<20]byte; z = x52
+ var x53 [10<<20]byte; z = x53
+ var x54 [10<<20]byte; z = x54
+ var x55 [10<<20]byte; z = x55
+ var x56 [10<<20]byte; z = x56
+ var x57 [10<<20]byte; z = x57
+ var x58 [10<<20]byte; z = x58
+ var x59 [10<<20]byte; z = x59
+ var x60 [10<<20]byte; z = x60
+ var x61 [10<<20]byte; z = x61
+ var x62 [10<<20]byte; z = x62
+ var x63 [10<<20]byte; z = x63
+ var x64 [10<<20]byte; z = x64
+ var x65 [10<<20]byte; z = x65
+ var x66 [10<<20]byte; z = x66
+ var x67 [10<<20]byte; z = x67
+ var x68 [10<<20]byte; z = x68
+ var x69 [10<<20]byte; z = x69
+ var x70 [10<<20]byte; z = x70
+ var x71 [10<<20]byte; z = x71
+ var x72 [10<<20]byte; z = x72
+ var x73 [10<<20]byte; z = x73
+ var x74 [10<<20]byte; z = x74
+ var x75 [10<<20]byte; z = x75
+ var x76 [10<<20]byte; z = x76
+ var x77 [10<<20]byte; z = x77
+ var x78 [10<<20]byte; z = x78
+ var x79 [10<<20]byte; z = x79
+ var x80 [10<<20]byte; z = x80
+ var x81 [10<<20]byte; z = x81
+ var x82 [10<<20]byte; z = x82
+ var x83 [10<<20]byte; z = x83
+ var x84 [10<<20]byte; z = x84
+ var x85 [10<<20]byte; z = x85
+ var x86 [10<<20]byte; z = x86
+ var x87 [10<<20]byte; z = x87
+ var x88 [10<<20]byte; z = x88
+ var x89 [10<<20]byte; z = x89
+ var x90 [10<<20]byte; z = x90
+ var x91 [10<<20]byte; z = x91
+ var x92 [10<<20]byte; z = x92
+ var x93 [10<<20]byte; z = x93
+ var x94 [10<<20]byte; z = x94
+ var x95 [10<<20]byte; z = x95
+ var x96 [10<<20]byte; z = x96
+ var x97 [10<<20]byte; z = x97
+ var x98 [10<<20]byte; z = x98
+ var x99 [10<<20]byte; z = x99
+ var x100 [10<<20]byte; z = x100
+ var x101 [10<<20]byte; z = x101
+ var x102 [10<<20]byte; z = x102
+ var x103 [10<<20]byte; z = x103
+ var x104 [10<<20]byte; z = x104
+ var x105 [10<<20]byte; z = x105
+ var x106 [10<<20]byte; z = x106
+ var x107 [10<<20]byte; z = x107
+ var x108 [10<<20]byte; z = x108
+ var x109 [10<<20]byte; z = x109
+ var x110 [10<<20]byte; z = x110
+ var x111 [10<<20]byte; z = x111
+ var x112 [10<<20]byte; z = x112
+ var x113 [10<<20]byte; z = x113
+ var x114 [10<<20]byte; z = x114
+ var x115 [10<<20]byte; z = x115
+ var x116 [10<<20]byte; z = x116
+ var x117 [10<<20]byte; z = x117
+ var x118 [10<<20]byte; z = x118
+ var x119 [10<<20]byte; z = x119
+ var x120 [10<<20]byte; z = x120
+ var x121 [10<<20]byte; z = x121
+ var x122 [10<<20]byte; z = x122
+ var x123 [10<<20]byte; z = x123
+ var x124 [10<<20]byte; z = x124
+ var x125 [10<<20]byte; z = x125
+ var x126 [10<<20]byte; z = x126
+ var x127 [10<<20]byte; z = x127
+ var x128 [10<<20]byte; z = x128
+ var x129 [10<<20]byte; z = x129
+ var x130 [10<<20]byte; z = x130
+ var x131 [10<<20]byte; z = x131
+ var x132 [10<<20]byte; z = x132
+ var x133 [10<<20]byte; z = x133
+ var x134 [10<<20]byte; z = x134
+ var x135 [10<<20]byte; z = x135
+ var x136 [10<<20]byte; z = x136
+ var x137 [10<<20]byte; z = x137
+ var x138 [10<<20]byte; z = x138
+ var x139 [10<<20]byte; z = x139
+ var x140 [10<<20]byte; z = x140
+ var x141 [10<<20]byte; z = x141
+ var x142 [10<<20]byte; z = x142
+ var x143 [10<<20]byte; z = x143
+ var x144 [10<<20]byte; z = x144
+ var x145 [10<<20]byte; z = x145
+ var x146 [10<<20]byte; z = x146
+ var x147 [10<<20]byte; z = x147
+ var x148 [10<<20]byte; z = x148
+ var x149 [10<<20]byte; z = x149
+ var x150 [10<<20]byte; z = x150
+ var x151 [10<<20]byte; z = x151
+ var x152 [10<<20]byte; z = x152
+ var x153 [10<<20]byte; z = x153
+ var x154 [10<<20]byte; z = x154
+ var x155 [10<<20]byte; z = x155
+ var x156 [10<<20]byte; z = x156
+ var x157 [10<<20]byte; z = x157
+ var x158 [10<<20]byte; z = x158
+ var x159 [10<<20]byte; z = x159
+ var x160 [10<<20]byte; z = x160
+ var x161 [10<<20]byte; z = x161
+ var x162 [10<<20]byte; z = x162
+ var x163 [10<<20]byte; z = x163
+ var x164 [10<<20]byte; z = x164
+ var x165 [10<<20]byte; z = x165
+ var x166 [10<<20]byte; z = x166
+ var x167 [10<<20]byte; z = x167
+ var x168 [10<<20]byte; z = x168
+ var x169 [10<<20]byte; z = x169
+ var x170 [10<<20]byte; z = x170
+ var x171 [10<<20]byte; z = x171
+ var x172 [10<<20]byte; z = x172
+ var x173 [10<<20]byte; z = x173
+ var x174 [10<<20]byte; z = x174
+ var x175 [10<<20]byte; z = x175
+ var x176 [10<<20]byte; z = x176
+ var x177 [10<<20]byte; z = x177
+ var x178 [10<<20]byte; z = x178
+ var x179 [10<<20]byte; z = x179
+ var x180 [10<<20]byte; z = x180
+ var x181 [10<<20]byte; z = x181
+ var x182 [10<<20]byte; z = x182
+ var x183 [10<<20]byte; z = x183
+ var x184 [10<<20]byte; z = x184
+ var x185 [10<<20]byte; z = x185
+ var x186 [10<<20]byte; z = x186
+ var x187 [10<<20]byte; z = x187
+ var x188 [10<<20]byte; z = x188
+ var x189 [10<<20]byte; z = x189
+ var x190 [10<<20]byte; z = x190
+ var x191 [10<<20]byte; z = x191
+ var x192 [10<<20]byte; z = x192
+ var x193 [10<<20]byte; z = x193
+ var x194 [10<<20]byte; z = x194
+ var x195 [10<<20]byte; z = x195
+ var x196 [10<<20]byte; z = x196
+ var x197 [10<<20]byte; z = x197
+ var x198 [10<<20]byte; z = x198
+ var x199 [10<<20]byte; z = x199
+ var x200 [10<<20]byte; z = x200
+ var x201 [10<<20]byte; z = x201
+ var x202 [10<<20]byte; z = x202
+ var x203 [10<<20]byte; z = x203
+ var x204 [10<<20]byte; z = x204
+ var x205 [10<<20]byte; z = x205
+ var x206 [10<<20]byte; z = x206
}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug435.go b/gcc/testsuite/go.test/test/fixedbugs/bug435.go
index 9c30b143bcf..45323d8eed6 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug435.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug435.go
@@ -12,4 +12,4 @@
package main
func foo() {
- bar(1, // ERROR "unexpected|missing|undefined"
+ bar(1, // ERROR "unexpected|missing|undefined" \ No newline at end of file
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/a.go
index 02a287b3177..29049d9aae5 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/a.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/a.go
@@ -6,4 +6,8 @@ package a
type Foo struct {
int
+ int8
+ error
+ rune
+ byte
}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/b.go
index 1868afe073e..5c0a0c47e3c 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/b.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug460.dir/b.go
@@ -9,6 +9,9 @@ import "./a"
var x a.Foo
func main() {
- x.int = 20 // ERROR "unexported field"
+ x.int = 20 // ERROR "unexported field"
+ x.int8 = 20 // ERROR "unexported field"
+ x.error = nil // ERROR "unexported field"
+ x.rune = 'a' // ERROR "unexported field"
+ x.byte = 20 // ERROR "unexported field"
}
-
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug475.go b/gcc/testsuite/go.test/test/fixedbugs/bug475.go
new file mode 100644
index 00000000000..1bd6fa35ce7
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug475.go
@@ -0,0 +1,22 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Variable in enclosing function with same name as field in struct
+// composite literal confused gccgo.
+
+package p
+
+type s1 struct {
+ f *s1
+}
+
+func F() {
+ var f *s1
+ _ = func() {
+ _ = s1{f: nil}
+ }
+ _ = f
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug476.go b/gcc/testsuite/go.test/test/fixedbugs/bug476.go
new file mode 100644
index 00000000000..4ea21740484
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug476.go
@@ -0,0 +1,23 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Logical operation on named boolean type returns the same type,
+// supporting an implicit convertion to an interface type. This used
+// to crash gccgo.
+
+package p
+
+type B bool
+
+func (b B) M() {}
+
+type I interface {
+ M()
+}
+
+func F(a, b B) I {
+ return a && b
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug477.go b/gcc/testsuite/go.test/test/fixedbugs/bug477.go
new file mode 100644
index 00000000000..86289afa6db
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug477.go
@@ -0,0 +1,34 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test multiple identical unnamed structs with methods. This caused
+// a compilation error with gccgo.
+
+package p
+
+type S1 struct{}
+
+func (s S1) M() {}
+
+type S2 struct {
+ F1 struct {
+ S1
+ }
+ F2 struct {
+ S1
+ }
+}
+
+type I interface {
+ M()
+}
+
+func F() {
+ var s2 S2
+ var i1 I = s2.F1
+ var i2 I = s2.F2
+ _, _ = i1, i2
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug478.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/bug478.dir/a.go
new file mode 100644
index 00000000000..a40e454f9b1
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug478.dir/a.go
@@ -0,0 +1,9 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p1
+
+type S1 struct{}
+
+func (s S1) f() {}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug478.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/bug478.dir/b.go
new file mode 100644
index 00000000000..c0fdf1127b4
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug478.dir/b.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p2
+
+import "./a"
+
+type S2 struct {
+ p1.S1
+}
+
+func (s S2) f() {}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug478.go b/gcc/testsuite/go.test/test/fixedbugs/bug478.go
new file mode 100644
index 00000000000..5e339e801d5
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug478.go
@@ -0,0 +1,10 @@
+// compiledir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Using the same unexported name for a method as a method on an
+// imported embedded type caused a gccgo compilation failure.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug479.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/bug479.dir/a.go
new file mode 100644
index 00000000000..5ff3bef1d16
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug479.dir/a.go
@@ -0,0 +1,15 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type S2 struct {}
+
+const C = unsafe.Sizeof(S2{})
+
+type S1 struct {
+ S2
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug479.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/bug479.dir/b.go
new file mode 100644
index 00000000000..a1b27b33264
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug479.dir/b.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "./a"
+
+type S3 struct {
+ p.S1
+}
+
+func main() {
+ var i interface{} = S3{}
+ _ = i
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug479.go b/gcc/testsuite/go.test/test/fixedbugs/bug479.go
new file mode 100644
index 00000000000..f8a0f93c736
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug479.go
@@ -0,0 +1,10 @@
+// rundir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Gccgo was not consistent in deciding how to compare a struct type
+// for equality, leading to an undefined symbol at link time.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug480.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/bug480.dir/a.go
new file mode 100644
index 00000000000..6dff51586b7
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug480.dir/a.go
@@ -0,0 +1,17 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type S interface{
+ F() T
+}
+
+type T struct {
+ S
+}
+
+type U struct {
+ error
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug480.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/bug480.dir/b.go
new file mode 100644
index 00000000000..620736540ae
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug480.dir/b.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+var t a.T
+
+func F() error {
+ return a.U{}
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug480.go b/gcc/testsuite/go.test/test/fixedbugs/bug480.go
new file mode 100644
index 00000000000..5b44af43083
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug480.go
@@ -0,0 +1,9 @@
+// compiledir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Gccgo mishandled an import of a forward declared type.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug481.go b/gcc/testsuite/go.test/test/fixedbugs/bug481.go
new file mode 100644
index 00000000000..d0922a5a4ff
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug481.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Returning an index into a conversion from string to slice caused a
+// compilation error when using gccgo.
+
+package p
+
+func F1(s string) byte {
+ return []byte(s)[0]
+}
+
+func F2(s string) rune {
+ return []rune(s)[0]
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/bug482.go b/gcc/testsuite/go.test/test/fixedbugs/bug482.go
new file mode 100644
index 00000000000..10c48287d3a
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/bug482.go
@@ -0,0 +1,20 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Using the same name for a field in a composite literal and for a
+// global variable that depends on the variable being initialized
+// caused gccgo to erroneously report "variable initializer refers to
+// itself".
+
+package p
+
+type S struct {
+ F int
+}
+
+var V = S{F: 1}
+
+var F = V.F
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue4085a.go b/gcc/testsuite/go.test/test/fixedbugs/issue4085a.go
index 1d8e57cb7c7..089637d86b8 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/issue4085a.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue4085a.go
@@ -9,10 +9,10 @@ package main
type T []int
func main() {
- _ = make(T, -1) // ERROR "negative"
- _ = make(T, 0.5) // ERROR "constant 0.5 truncated to integer|non-integer"
- _ = make(T, 1.0) // ok
- _ = make(T, 1<<63) // ERROR "len argument too large"
- _ = make(T, 0, -1) // ERROR "negative cap"
+ _ = make(T, -1) // ERROR "negative"
+ _ = make(T, 0.5) // ERROR "constant 0.5 truncated to integer|non-integer len argument"
+ _ = make(T, 1.0) // ok
+ _ = make(T, 1<<63) // ERROR "len argument too large"
+ _ = make(T, 0, -1) // ERROR "negative cap"
_ = make(T, 10, 0) // ERROR "len larger than cap"
}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue4251.go b/gcc/testsuite/go.test/test/fixedbugs/issue4251.go
index a14e0896a47..3668d4c89a8 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/issue4251.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue4251.go
@@ -9,13 +9,13 @@
package p
func F1(s []byte) []byte {
- return s[2:1] // ERROR "inverted"
+ return s[2:1] // ERROR "invalid slice index|inverted slice range"
}
func F2(a [10]byte) []byte {
- return a[2:1] // ERROR "inverted"
+ return a[2:1] // ERROR "invalid slice index|inverted slice range"
}
func F3(s string) string {
- return s[2:1] // ERROR "inverted"
+ return s[2:1] // ERROR "invalid slice index|inverted slice range"
}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue4517d.go b/gcc/testsuite/go.test/test/fixedbugs/issue4517d.go
new file mode 100644
index 00000000000..3d727d433ed
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue4517d.go
@@ -0,0 +1,9 @@
+// errorcheck
+
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import init "fmt" // ERROR "cannot import package as init"
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue4776.go b/gcc/testsuite/go.test/test/fixedbugs/issue4776.go
new file mode 100644
index 00000000000..13781af1f36
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue4776.go
@@ -0,0 +1,10 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 4776: missing package declaration error should be fatal.
+
+type MyInt int32 // ERROR "package statement must be first|package clause"
+
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue4847.go b/gcc/testsuite/go.test/test/fixedbugs/issue4847.go
new file mode 100644
index 00000000000..91a6568f271
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue4847.go
@@ -0,0 +1,24 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 4847: initialization loop is not detected.
+
+package p
+
+type (
+ E int
+ S int
+)
+
+type matcher func(s *S) E
+
+func matchList(s *S) E { return matcher(matchAnyFn)(s) }
+
+var foo = matcher(matchList)
+
+var matchAny = matcher(matchList) // ERROR "initialization loop|depends upon itself"
+
+func matchAnyFn(s *S) (err E) { return matchAny(s) }
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5172.go b/gcc/testsuite/go.test/test/fixedbugs/issue5172.go
new file mode 100644
index 00000000000..a6acbd3db78
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5172.go
@@ -0,0 +1,19 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// issue 5172: spurious warn about type conversion on broken type inside go and defer
+
+package main
+
+type foo struct {
+ x bar // ERROR "undefined"
+}
+
+func main() {
+ var f foo
+ go f.bar() // GCCGO_ERROR "undefined"
+ defer f.bar() // GCCGO_ERROR "undefined"
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5358.go b/gcc/testsuite/go.test/test/fixedbugs/issue5358.go
new file mode 100644
index 00000000000..c2b1da9e0e1
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5358.go
@@ -0,0 +1,17 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// issue 5358: incorrect error message when using f(g()) form on ... args.
+
+package main
+
+func f(x int, y ...int) {}
+
+func g() (int, []int)
+
+func main() {
+ f(g()) // ERROR "as type int in|incompatible type"
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5493.go b/gcc/testsuite/go.test/test/fixedbugs/issue5493.go
index bbc62ffac0a..2ee0398af2c 100644
--- a/gcc/testsuite/go.test/test/fixedbugs/issue5493.go
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5493.go
@@ -52,7 +52,7 @@ func main() {
runtime.GC()
}
if count != 0 {
- println(count, "out of", N, "finalizer are called")
+ println(count, "out of", N, "finalizer are not called")
panic("not all finalizers are called")
}
}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5581.go b/gcc/testsuite/go.test/test/fixedbugs/issue5581.go
new file mode 100644
index 00000000000..36a4ad671d2
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5581.go
@@ -0,0 +1,34 @@
+// errorcheck
+
+// Used to emit a spurious "invalid recursive type" error.
+// See golang.org/issue/5581.
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+func NewBar() *Bar { return nil }
+
+func (x *Foo) Method() (int, error) {
+ for y := range x.m {
+ _ = y.A
+ }
+ return 0, nil
+}
+
+type Foo struct {
+ m map[*Bar]int
+}
+
+type Bar struct {
+ A *Foo
+ B chan Blah // ERROR "undefined.*Blah"
+}
+
+func main() {
+ fmt.Println("Hello, playground")
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5609.go b/gcc/testsuite/go.test/test/fixedbugs/issue5609.go
new file mode 100644
index 00000000000..ea770b48654
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5609.go
@@ -0,0 +1,13 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// issue 5609: overflow when calculating array size
+
+package pkg
+
+const Large uint64 = 18446744073709551615
+
+var foo [Large]uint64 // ERROR "array bound is too large|array bound overflows"
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5698.go b/gcc/testsuite/go.test/test/fixedbugs/issue5698.go
new file mode 100644
index 00000000000..035bbd35d25
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5698.go
@@ -0,0 +1,18 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 5698: can define a key type with slices.
+
+package main
+
+type Key struct {
+ a int16 // the compiler was confused by the padding.
+ b []int
+}
+
+type Val struct{}
+
+type Map map[Key]Val // ERROR "invalid map key type"
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5704.go b/gcc/testsuite/go.test/test/fixedbugs/issue5704.go
new file mode 100644
index 00000000000..1dfa072143e
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5704.go
@@ -0,0 +1,46 @@
+// run
+
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 5704: Conversions of empty strings to byte
+// or rune slices return empty but non-nil slices.
+
+package main
+
+type (
+ mystring string
+ mybytes []byte
+ myrunes []rune
+)
+
+func checkBytes(s []byte, arg string) {
+ if len(s) != 0 {
+ panic("len(" + arg + ") != 0")
+ }
+ if s == nil {
+ panic(arg + " == nil")
+ }
+}
+
+func checkRunes(s []rune, arg string) {
+ if len(s) != 0 {
+ panic("len(" + arg + ") != 0")
+ }
+ if s == nil {
+ panic(arg + " == nil")
+ }
+}
+
+func main() {
+ checkBytes([]byte(""), `[]byte("")`)
+ checkBytes([]byte(mystring("")), `[]byte(mystring(""))`)
+ checkBytes(mybytes(""), `mybytes("")`)
+ checkBytes(mybytes(mystring("")), `mybytes(mystring(""))`)
+
+ checkRunes([]rune(""), `[]rune("")`)
+ checkRunes([]rune(mystring("")), `[]rune(mystring(""))`)
+ checkRunes(myrunes(""), `myrunes("")`)
+ checkRunes(myrunes(mystring("")), `myrunes(mystring(""))`)
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5856.go b/gcc/testsuite/go.test/test/fixedbugs/issue5856.go
new file mode 100644
index 00000000000..35cadf8c9e7
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5856.go
@@ -0,0 +1,38 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+)
+
+func main() {
+ f()
+ panic("deferred function not run")
+}
+
+var x = 1
+
+func f() {
+ if x == 0 {
+ return
+ }
+ defer g()
+ panic("panic")
+}
+
+func g() {
+ _, file, line, _ := runtime.Caller(2)
+ if !strings.HasSuffix(file, "issue5856.go") || line != 28 {
+ fmt.Printf("BUG: defer called from %s:%d, want issue5856.go:28\n", file, line)
+ os.Exit(1)
+ }
+ os.Exit(0)
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/a.go
new file mode 100644
index 00000000000..b236c15c7d3
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/a.go
@@ -0,0 +1,22 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Package struct {
+ name string
+}
+
+type Future struct {
+ result chan struct {
+ *Package
+ error
+ }
+}
+
+func (t *Future) Result() (*Package, error) {
+ result := <-t.result
+ t.result <- result
+ return result.Package, result.error
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/main.go b/gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/main.go
new file mode 100644
index 00000000000..c5d42ea0986
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5910.dir/main.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "a"
+
+func main() {
+ f := new(a.Future)
+ f.Result()
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5910.go b/gcc/testsuite/go.test/test/fixedbugs/issue5910.go
new file mode 100644
index 00000000000..54e74bac8dd
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5910.go
@@ -0,0 +1,10 @@
+// compiledir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 5910: parsing of unnamed struct types
+// in inlined bodies was broken.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/a.go
new file mode 100644
index 00000000000..7411d5fcd54
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/a.go
@@ -0,0 +1,3 @@
+package surprise
+
+var X int
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/b.go
new file mode 100644
index 00000000000..9bc561b9ce5
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/b.go
@@ -0,0 +1,2 @@
+package surprise2
+
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/c.go b/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/c.go
new file mode 100644
index 00000000000..a1781d4d406
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5957.dir/c.go
@@ -0,0 +1,12 @@
+package p
+
+import (
+ "./a" // ERROR "imported and not used: \x22a\x22 as surprise|imported and not used: surprise"
+ "./b" // GC_ERROR "imported and not used: \x22b\x22 as surprise2|imported and not used: surprise2"
+ b "./b" // ERROR "imported and not used: \x22b\x22$|imported and not used: surprise2"
+ foo "math" // ERROR "imported and not used: \x22math\x22 as foo|imported and not used: math"
+ "fmt" // actually used
+ "strings" // ERROR "imported and not used: \x22strings\x22|imported and not used: strings"
+)
+
+var _ = fmt.Printf
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5957.go b/gcc/testsuite/go.test/test/fixedbugs/issue5957.go
new file mode 100644
index 00000000000..891d8e6d2ee
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5957.go
@@ -0,0 +1,7 @@
+// errorcheckdir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue5963.go b/gcc/testsuite/go.test/test/fixedbugs/issue5963.go
new file mode 100644
index 00000000000..190e8f45647
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue5963.go
@@ -0,0 +1,50 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Used to die in runtime due to init goroutine exiting while
+// locked to main thread.
+
+package main
+
+import (
+ "os"
+ "runtime"
+)
+
+func init() {
+ c := make(chan int, 1)
+ defer func() {
+ c <- 0
+ }()
+ go func() {
+ os.Exit(<-c)
+ }()
+ runtime.Goexit()
+}
+
+func main() {
+}
+
+/* Before fix:
+
+invalid m->locked = 2
+fatal error: internal lockOSThread error
+
+goroutine 2 [runnable]:
+runtime.MHeap_Scavenger()
+ /Users/rsc/g/go/src/pkg/runtime/mheap.c:438
+runtime.goexit()
+ /Users/rsc/g/go/src/pkg/runtime/proc.c:1313
+created by runtime.main
+ /Users/rsc/g/go/src/pkg/runtime/proc.c:165
+
+goroutine 3 [runnable]:
+main.func·002()
+ /Users/rsc/g/go/test/fixedbugs/issue5963.go:22
+created by main.init·1
+ /Users/rsc/g/go/test/fixedbugs/issue5963.go:24 +0xb9
+exit status 2
+*/
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6004.go b/gcc/testsuite/go.test/test/fixedbugs/issue6004.go
new file mode 100644
index 00000000000..45aaffd2c90
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6004.go
@@ -0,0 +1,15 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ _ = nil // ERROR "use of untyped nil"
+ _, _ = nil, 1 // ERROR "use of untyped nil"
+ _, _ = 1, nil // ERROR "use of untyped nil"
+ _ = append(nil, 1, 2, 3) // ERROR "untyped nil"
+}
+
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6036.go b/gcc/testsuite/go.test/test/fixedbugs/issue6036.go
new file mode 100644
index 00000000000..5f787c56900
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6036.go
@@ -0,0 +1,44 @@
+// +build amd64
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 6036: 6g's backend generates OINDREG with
+// offsets larger than 32-bit.
+
+package main
+
+type T struct {
+ Large [1 << 31]byte
+ A int
+ B int
+}
+
+func F(t *T) {
+ t.B = t.A
+}
+
+type T2 [1<<31 + 2]byte
+
+func F2(t *T2) {
+ t[1<<31+1] = 42
+}
+
+type T3 [1<<15 + 1][1<<15 + 1]int
+
+func F3(t *T3) {
+ t[1<<15][1<<15] = 42
+}
+
+type S struct {
+ A int32
+ B int32
+}
+
+type T4 [1<<29 + 1]S
+
+func F4(t *T4) {
+ t[1<<29].B = 42
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6055.go b/gcc/testsuite/go.test/test/fixedbugs/issue6055.go
new file mode 100644
index 00000000000..698f62ac956
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6055.go
@@ -0,0 +1,35 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "runtime"
+
+type Closer interface {
+ Close()
+}
+
+func nilInterfaceDeferCall() {
+ defer func() {
+ // make sure a traceback happens with jmpdefer on the stack
+ runtime.GC()
+ }()
+ var x Closer
+ defer x.Close()
+}
+
+func shouldPanic(f func()) {
+ defer func() {
+ if recover() == nil {
+ panic("did not panic")
+ }
+ }()
+ f()
+}
+
+func main() {
+ shouldPanic(nilInterfaceDeferCall)
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6131.go b/gcc/testsuite/go.test/test/fixedbugs/issue6131.go
new file mode 100644
index 00000000000..817e4a877cd
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6131.go
@@ -0,0 +1,20 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 6131: missing typecheck after reducing
+// n%1 == 0 to a constant value.
+
+package main
+
+func isGood(n int) bool {
+ return n%1 == 0
+}
+
+func main() {
+ if !isGood(256) {
+ panic("!isGood")
+ }
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6140.go b/gcc/testsuite/go.test/test/fixedbugs/issue6140.go
new file mode 100644
index 00000000000..d494933b2e2
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6140.go
@@ -0,0 +1,31 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 6140: compiler incorrectly rejects method values
+// whose receiver has an unnamed interface type.
+
+package p
+
+type T *interface {
+ m() int
+}
+
+var x T
+
+var _ = (*x).m
+
+var y interface {
+ m() int
+}
+
+var _ = y.m
+
+type I interface {
+ String() string
+}
+
+var z *struct{ I }
+var _ = z.String
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6247.go b/gcc/testsuite/go.test/test/fixedbugs/issue6247.go
new file mode 100644
index 00000000000..eea8f9c878f
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6247.go
@@ -0,0 +1,17 @@
+// compile
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 6247: 5g used to be confused by the numbering
+// of floating-point registers.
+
+package main
+
+var p map[string]interface{}
+var v interface{}
+
+func F() {
+ p["hello"] = v.(complex128) * v.(complex128)
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6269.go b/gcc/testsuite/go.test/test/fixedbugs/issue6269.go
new file mode 100644
index 00000000000..af5feb72866
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6269.go
@@ -0,0 +1,39 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// issue 6269: name collision on method names for function local types.
+
+package main
+
+type foo struct{}
+
+func (foo) Error() string {
+ return "ok"
+}
+
+type bar struct{}
+
+func (bar) Error() string {
+ return "fail"
+}
+
+func unused() {
+ type collision struct {
+ bar
+ }
+ _ = collision{}
+}
+
+func main() {
+ type collision struct {
+ foo
+ }
+ s := error(collision{})
+ if str := s.Error(); str != "ok" {
+ println("s.Error() ==", str)
+ panic(`s.Error() != "ok"`)
+ }
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6298.go b/gcc/testsuite/go.test/test/fixedbugs/issue6298.go
new file mode 100644
index 00000000000..6303dbe5b09
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6298.go
@@ -0,0 +1,15 @@
+// compile
+
+// golang.org/issue/6298.
+// Used to cause "internal error: typename ideal bool"
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ var x interface{} = "abc"[0] == 'a'
+ _ = x
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6399.go b/gcc/testsuite/go.test/test/fixedbugs/issue6399.go
new file mode 100644
index 00000000000..b3d1c855b2c
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6399.go
@@ -0,0 +1,27 @@
+// compile
+
+package main
+
+type Foo interface {
+ Print()
+}
+
+type Bar struct{}
+
+func (b Bar) Print() {}
+
+func main() {
+ b := make([]Bar, 20)
+ f := make([]Foo, 20)
+ for i := range f {
+ f[i] = b[i]
+ }
+ T(f)
+ _ = make([]struct{}, 1)
+}
+
+func T(f []Foo) {
+ for i := range f {
+ f[i].Print()
+ }
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/a.go
new file mode 100644
index 00000000000..da90ca377b4
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/a.go
@@ -0,0 +1,7 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type T struct{ int }
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/b.go
new file mode 100644
index 00000000000..3b35b2d324b
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/b.go
@@ -0,0 +1,9 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+type U struct{ a.T }
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/main.go b/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/main.go
new file mode 100644
index 00000000000..f09b7274821
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6513.dir/main.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "./a"
+ "./b"
+)
+
+func main() {
+ var t a.T
+ var u b.U
+ _, _ = t, u
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6513.go b/gcc/testsuite/go.test/test/fixedbugs/issue6513.go
new file mode 100644
index 00000000000..b32e0c5614d
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6513.go
@@ -0,0 +1,10 @@
+// compiledir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 6513: embedded builtins may get incorrect qualified
+// field name during import.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/a.go b/gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/a.go
new file mode 100644
index 00000000000..9c90e0740cd
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/a.go
@@ -0,0 +1,14 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type unexported struct {
+ a int
+ b bool
+}
+
+type Struct struct {
+ unexported
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/b.go b/gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/b.go
new file mode 100644
index 00000000000..b6a6fc317f5
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6789.dir/b.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "./a"
+
+type s a.Struct
+
+func main() {
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6789.go b/gcc/testsuite/go.test/test/fixedbugs/issue6789.go
new file mode 100644
index 00000000000..e3a2c3320ef
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6789.go
@@ -0,0 +1,10 @@
+// rundir
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 6789: gccgo failed to find the hash function for an
+// unexported struct embedded in an exported struct.
+
+package ignored
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6899.go b/gcc/testsuite/go.test/test/fixedbugs/issue6899.go
new file mode 100644
index 00000000000..a693bf28508
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6899.go
@@ -0,0 +1,13 @@
+// cmpout
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "math"
+
+func main() {
+ println(math.Copysign(0, -1))
+}
diff --git a/gcc/testsuite/go.test/test/fixedbugs/issue6899.out b/gcc/testsuite/go.test/test/fixedbugs/issue6899.out
new file mode 100644
index 00000000000..e2375f07766
--- /dev/null
+++ b/gcc/testsuite/go.test/test/fixedbugs/issue6899.out
@@ -0,0 +1 @@
+-0.000000e+000
diff --git a/gcc/testsuite/go.test/test/import1.go b/gcc/testsuite/go.test/test/import1.go
index 56b29d58c06..2433b5f2ad2 100644
--- a/gcc/testsuite/go.test/test/import1.go
+++ b/gcc/testsuite/go.test/test/import1.go
@@ -14,5 +14,6 @@ import bufio "os" // ERROR "redeclared|redefinition|incompatible" "imported and
import (
"fmt" // GCCGO_ERROR "previous|not used"
- fmt "math" // ERROR "redeclared|redefinition|incompatible" "imported and not used"
+ fmt "math" // ERROR "redeclared|redefinition|incompatible" "imported and not used: \x22math\x22 as fmt"
+ . "math" // GC_ERROR "imported and not used: \x22math\x22$"
)
diff --git a/gcc/testsuite/go.test/test/interface/explicit.go b/gcc/testsuite/go.test/test/interface/explicit.go
index eb81156e081..36fa1a4224f 100644
--- a/gcc/testsuite/go.test/test/interface/explicit.go
+++ b/gcc/testsuite/go.test/test/interface/explicit.go
@@ -80,3 +80,22 @@ var m2 M = jj // ERROR "incompatible|wrong type for M method"
var m3 = M(ii) // ERROR "invalid|missing"
var m4 = M(jj) // ERROR "invalid|wrong type for M method"
+
+
+type B1 interface {
+ _()
+}
+
+type B2 interface {
+ M()
+ _()
+}
+
+type T2 struct{}
+
+func (t *T2) M() {}
+func (t *T2) _() {}
+
+// Check that nothing satisfies an interface with blank methods.
+var b1 B1 = &T2{} // ERROR "incompatible|missing _ method"
+var b2 B2 = &T2{} // ERROR "incompatible|missing _ method"
diff --git a/gcc/testsuite/go.test/test/interface/fail.go b/gcc/testsuite/go.test/test/interface/fail.go
index 72b854dc00c..81eb6cb3c15 100644
--- a/gcc/testsuite/go.test/test/interface/fail.go
+++ b/gcc/testsuite/go.test/test/interface/fail.go
@@ -14,18 +14,33 @@ type I interface {
func main() {
shouldPanic(p1)
+ shouldPanic(p2)
}
func p1() {
var s *S
var i I
- var e interface {}
+ var e interface{}
e = s
i = e.(I)
_ = i
}
-type S struct {
+type S struct{}
+
+func (s *S) _() {}
+
+type B interface {
+ _()
+}
+
+func p2() {
+ var s *S
+ var b B
+ var e interface{}
+ e = s
+ b = e.(B)
+ _ = b
}
func shouldPanic(f func()) {
diff --git a/gcc/testsuite/go.test/test/mapnan.go b/gcc/testsuite/go.test/test/mapnan.go
index 60b35fbeaf4..f081cab01d4 100644
--- a/gcc/testsuite/go.test/test/mapnan.go
+++ b/gcc/testsuite/go.test/test/mapnan.go
@@ -13,17 +13,13 @@ import (
"fmt"
"math"
"time"
- "syscall"
)
func main() {
// Test that NaNs in maps don't go quadratic.
t := func(n int) time.Duration {
- var u0 syscall.Rusage
- if err := syscall.Getrusage(0, &u0); err != nil {
- panic(err)
- }
+ t1 := time.Now()
m := map[float64]int{}
nan := math.NaN()
for i := 0; i < n; i++ {
@@ -32,11 +28,7 @@ func main() {
if len(m) != n {
panic("wrong size map after nan insertion")
}
- var u1 syscall.Rusage
- if err := syscall.Getrusage(0, &u1); err != nil {
- panic(err)
- }
- return time.Duration(u1.Utime.Nano() - u0.Utime.Nano())
+ return time.Since(t1)
}
// Depending on the machine and OS, this test might be too fast
diff --git a/gcc/testsuite/go.test/test/method2.go b/gcc/testsuite/go.test/test/method2.go
index b63da10dc69..aaa850e7191 100644
--- a/gcc/testsuite/go.test/test/method2.go
+++ b/gcc/testsuite/go.test/test/method2.go
@@ -21,7 +21,7 @@ func (p *P1) val() int { return 1 } // ERROR "receiver.* pointer|invalid pointer
type I interface{}
type I1 interface{}
-func (p I) val() int { return 1 } // ERROR "receiver.*interface|invalid pointer or interface receiver"
+func (p I) val() int { return 1 } // ERROR "receiver.*interface|invalid pointer or interface receiver"
func (p *I1) val() int { return 1 } // ERROR "receiver.*interface|invalid pointer or interface receiver"
type Val interface {
@@ -33,4 +33,5 @@ var _ = (*Val).val // ERROR "method"
var v Val
var pv = &v
-var _ = pv.val() // ERROR "method"
+var _ = pv.val() // ERROR "method"
+var _ = pv.val // ERROR "method"
diff --git a/gcc/testsuite/go.test/test/nilcheck.go b/gcc/testsuite/go.test/test/nilcheck.go
new file mode 100644
index 00000000000..fe05d05c925
--- /dev/null
+++ b/gcc/testsuite/go.test/test/nilcheck.go
@@ -0,0 +1,184 @@
+// errorcheck -0 -N -d=nil
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that nil checks are inserted.
+// Optimization is disabled, so redundant checks are not removed.
+
+package p
+
+type Struct struct {
+ X int
+ Y float64
+}
+
+type BigStruct struct {
+ X int
+ Y float64
+ A [1<<20]int
+ Z string
+}
+
+type Empty struct {
+}
+
+type Empty1 struct {
+ Empty
+}
+
+var (
+ intp *int
+ arrayp *[10]int
+ array0p *[0]int
+ bigarrayp *[1<<26]int
+ structp *Struct
+ bigstructp *BigStruct
+ emptyp *Empty
+ empty1p *Empty1
+)
+
+func f1() {
+ _ = *intp // ERROR "nil check"
+ _ = *arrayp // ERROR "nil check"
+ _ = *array0p // ERROR "nil check"
+ _ = *array0p // ERROR "nil check"
+ _ = *intp // ERROR "nil check"
+ _ = *arrayp // ERROR "nil check"
+ _ = *structp // ERROR "nil check"
+ _ = *emptyp // ERROR "nil check"
+ _ = *arrayp // ERROR "nil check"
+}
+
+func f2() {
+ var (
+ intp *int
+ arrayp *[10]int
+ array0p *[0]int
+ bigarrayp *[1<<20]int
+ structp *Struct
+ bigstructp *BigStruct
+ emptyp *Empty
+ empty1p *Empty1
+ )
+
+ _ = *intp // ERROR "nil check"
+ _ = *arrayp // ERROR "nil check"
+ _ = *array0p // ERROR "nil check"
+ _ = *array0p // ERROR "nil check"
+ _ = *intp // ERROR "nil check"
+ _ = *arrayp // ERROR "nil check"
+ _ = *structp // ERROR "nil check"
+ _ = *emptyp // ERROR "nil check"
+ _ = *arrayp // ERROR "nil check"
+ _ = *bigarrayp // ERROR "nil check"
+ _ = *bigstructp // ERROR "nil check"
+ _ = *empty1p // ERROR "nil check"
+}
+
+func fx10k() *[10000]int
+var b bool
+
+
+func f3(x *[10000]int) {
+ // Using a huge type and huge offsets so the compiler
+ // does not expect the memory hardware to fault.
+ _ = x[9999] // ERROR "nil check"
+
+ for {
+ if x[9999] != 0 { // ERROR "nil check"
+ break
+ }
+ }
+
+ x = fx10k()
+ _ = x[9999] // ERROR "nil check"
+ if b {
+ _ = x[9999] // ERROR "nil check"
+ } else {
+ _ = x[9999] // ERROR "nil check"
+ }
+ _ = x[9999] // ERROR "nil check"
+
+ x = fx10k()
+ if b {
+ _ = x[9999] // ERROR "nil check"
+ } else {
+ _ = x[9999] // ERROR "nil check"
+ }
+ _ = x[9999] // ERROR "nil check"
+
+ fx10k()
+ // This one is a bit redundant, if we figured out that
+ // x wasn't going to change across the function call.
+ // But it's a little complex to do and in practice doesn't
+ // matter enough.
+ _ = x[9999] // ERROR "nil check"
+}
+
+func f3a() {
+ x := fx10k()
+ y := fx10k()
+ z := fx10k()
+ _ = &x[9] // ERROR "nil check"
+ y = z
+ _ = &x[9] // ERROR "nil check"
+ x = y
+ _ = &x[9] // ERROR "nil check"
+}
+
+func f3b() {
+ x := fx10k()
+ y := fx10k()
+ _ = &x[9] // ERROR "nil check"
+ y = x
+ _ = &x[9] // ERROR "nil check"
+ x = y
+ _ = &x[9] // ERROR "nil check"
+}
+
+func fx10() *[10]int
+
+func f4(x *[10]int) {
+ // Most of these have no checks because a real memory reference follows,
+ // and the offset is small enough that if x is nil, the address will still be
+ // in the first unmapped page of memory.
+
+ _ = x[9] // ERROR "nil check"
+
+ for {
+ if x[9] != 0 { // ERROR "nil check"
+ break
+ }
+ }
+
+ x = fx10()
+ _ = x[9] // ERROR "nil check"
+ if b {
+ _ = x[9] // ERROR "nil check"
+ } else {
+ _ = x[9] // ERROR "nil check"
+ }
+ _ = x[9] // ERROR "nil check"
+
+ x = fx10()
+ if b {
+ _ = x[9] // ERROR "nil check"
+ } else {
+ _ = &x[9] // ERROR "nil check"
+ }
+ _ = x[9] // ERROR "nil check"
+
+ fx10()
+ _ = x[9] // ERROR "nil check"
+
+ x = fx10()
+ y := fx10()
+ _ = &x[9] // ERROR "nil check"
+ y = x
+ _ = &x[9] // ERROR "nil check"
+ x = y
+ _ = &x[9] // ERROR "nil check"
+}
+
diff --git a/gcc/testsuite/go.test/test/nilptr.go b/gcc/testsuite/go.test/test/nilptr.go
index 793e9967368..9631d1618b5 100644
--- a/gcc/testsuite/go.test/test/nilptr.go
+++ b/gcc/testsuite/go.test/test/nilptr.go
@@ -40,6 +40,10 @@ func main() {
shouldPanic(p10)
shouldPanic(p11)
shouldPanic(p12)
+ shouldPanic(p13)
+ shouldPanic(p14)
+ shouldPanic(p15)
+ shouldPanic(p16)
}
func shouldPanic(f func()) {
@@ -152,3 +156,27 @@ func p12() {
var p *T = nil
println(*(&((*p).i)))
}
+
+// Tests suggested in golang.org/issue/6080.
+
+func p13() {
+ var x *[10]int
+ y := x[:]
+ _ = y
+}
+
+func p14() {
+ println((*[1]int)(nil)[:])
+}
+
+func p15() {
+ for i := range (*[1]int)(nil)[:] {
+ _ = i
+ }
+}
+
+func p16() {
+ for i, v := range (*[1]int)(nil)[:] {
+ _ = i + v
+ }
+}
diff --git a/gcc/testsuite/go.test/test/nilptr2.go b/gcc/testsuite/go.test/test/nilptr2.go
new file mode 100644
index 00000000000..57a5f8068f0
--- /dev/null
+++ b/gcc/testsuite/go.test/test/nilptr2.go
@@ -0,0 +1,128 @@
+// run
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ ok := true
+ for _, tt := range tests {
+ func() {
+ defer func() {
+ if err := recover(); err == nil {
+ println(tt.name, "did not panic")
+ ok = false
+ }
+ }()
+ tt.fn()
+ }()
+ }
+ if !ok {
+ println("BUG")
+ }
+}
+
+var intp *int
+var slicep *[]byte
+var a10p *[10]int
+var a10Mp *[1<<20]int
+var structp *Struct
+var bigstructp *BigStruct
+var i int
+var m *M
+var m1 *M1
+var m2 *M2
+
+func use(interface{}) {
+}
+
+var tests = []struct{
+ name string
+ fn func()
+}{
+ // Edit .+1,/^}/s/^[^ ].+/ {"&", func() { println(&) }},\n {"\&&", func() { println(\&&) }},/g
+ {"*intp", func() { println(*intp) }},
+ {"&*intp", func() { println(&*intp) }},
+ {"*slicep", func() { println(*slicep) }},
+ {"&*slicep", func() { println(&*slicep) }},
+ {"(*slicep)[0]", func() { println((*slicep)[0]) }},
+ {"&(*slicep)[0]", func() { println(&(*slicep)[0]) }},
+ {"(*slicep)[i]", func() { println((*slicep)[i]) }},
+ {"&(*slicep)[i]", func() { println(&(*slicep)[i]) }},
+ {"*a10p", func() { use(*a10p) }},
+ {"&*a10p", func() { println(&*a10p) }},
+ {"a10p[0]", func() { println(a10p[0]) }},
+ {"&a10p[0]", func() { println(&a10p[0]) }},
+ {"a10p[i]", func() { println(a10p[i]) }},
+ {"&a10p[i]", func() { println(&a10p[i]) }},
+ {"*structp", func() { use(*structp) }},
+ {"&*structp", func() { println(&*structp) }},
+ {"structp.i", func() { println(structp.i) }},
+ {"&structp.i", func() { println(&structp.i) }},
+ {"structp.j", func() { println(structp.j) }},
+ {"&structp.j", func() { println(&structp.j) }},
+ {"structp.k", func() { println(structp.k) }},
+ {"&structp.k", func() { println(&structp.k) }},
+ {"structp.x[0]", func() { println(structp.x[0]) }},
+ {"&structp.x[0]", func() { println(&structp.x[0]) }},
+ {"structp.x[i]", func() { println(structp.x[i]) }},
+ {"&structp.x[i]", func() { println(&structp.x[i]) }},
+ {"structp.x[9]", func() { println(structp.x[9]) }},
+ {"&structp.x[9]", func() { println(&structp.x[9]) }},
+ {"structp.l", func() { println(structp.l) }},
+ {"&structp.l", func() { println(&structp.l) }},
+ {"*bigstructp", func() { use(*bigstructp) }},
+ {"&*bigstructp", func() { println(&*bigstructp) }},
+ {"bigstructp.i", func() { println(bigstructp.i) }},
+ {"&bigstructp.i", func() { println(&bigstructp.i) }},
+ {"bigstructp.j", func() { println(bigstructp.j) }},
+ {"&bigstructp.j", func() { println(&bigstructp.j) }},
+ {"bigstructp.k", func() { println(bigstructp.k) }},
+ {"&bigstructp.k", func() { println(&bigstructp.k) }},
+ {"bigstructp.x[0]", func() { println(bigstructp.x[0]) }},
+ {"&bigstructp.x[0]", func() { println(&bigstructp.x[0]) }},
+ {"bigstructp.x[i]", func() { println(bigstructp.x[i]) }},
+ {"&bigstructp.x[i]", func() { println(&bigstructp.x[i]) }},
+ {"bigstructp.x[9]", func() { println(bigstructp.x[9]) }},
+ {"&bigstructp.x[9]", func() { println(&bigstructp.x[9]) }},
+ {"bigstructp.x[100<<20]", func() { println(bigstructp.x[100<<20]) }},
+ {"&bigstructp.x[100<<20]", func() { println(&bigstructp.x[100<<20]) }},
+ {"bigstructp.l", func() { println(bigstructp.l) }},
+ {"&bigstructp.l", func() { println(&bigstructp.l) }},
+ {"m1.F()", func() { println(m1.F()) }},
+ {"m1.M.F()", func() { println(m1.M.F()) }},
+ {"m2.F()", func() { println(m2.F()) }},
+ {"m2.M.F()", func() { println(m2.M.F()) }},
+}
+
+type Struct struct {
+ i int
+ j float64
+ k string
+ x [10]int
+ l []byte
+}
+
+type BigStruct struct {
+ i int
+ j float64
+ k string
+ x [128<<20]byte
+ l []byte
+}
+
+type M struct {
+}
+
+func (m *M) F() int {return 0}
+
+type M1 struct {
+ M
+}
+
+type M2 struct {
+ x int
+ M
+}
diff --git a/gcc/testsuite/go.test/test/nilptr3.go b/gcc/testsuite/go.test/test/nilptr3.go
new file mode 100644
index 00000000000..08597a02d95
--- /dev/null
+++ b/gcc/testsuite/go.test/test/nilptr3.go
@@ -0,0 +1,191 @@
+// errorcheck -0 -d=nil
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that nil checks are removed.
+// Optimization is enabled.
+
+package p
+
+type Struct struct {
+ X int
+ Y float64
+}
+
+type BigStruct struct {
+ X int
+ Y float64
+ A [1<<20]int
+ Z string
+}
+
+type Empty struct {
+}
+
+type Empty1 struct {
+ Empty
+}
+
+var (
+ intp *int
+ arrayp *[10]int
+ array0p *[0]int
+ bigarrayp *[1<<26]int
+ structp *Struct
+ bigstructp *BigStruct
+ emptyp *Empty
+ empty1p *Empty1
+)
+
+func f1() {
+ _ = *intp // ERROR "generated nil check"
+
+ // This one should be removed but the block copy needs
+ // to be turned into its own pseudo-op in order to see
+ // the indirect.
+ _ = *arrayp // ERROR "generated nil check"
+
+ // 0-byte indirect doesn't suffice
+ _ = *array0p // ERROR "generated nil check"
+ _ = *array0p // ERROR "removed repeated nil check" 386
+
+ _ = *intp // ERROR "removed repeated nil check"
+ _ = *arrayp // ERROR "removed repeated nil check"
+ _ = *structp // ERROR "generated nil check"
+ _ = *emptyp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "removed repeated nil check"
+}
+
+func f2() {
+ var (
+ intp *int
+ arrayp *[10]int
+ array0p *[0]int
+ bigarrayp *[1<<20]int
+ structp *Struct
+ bigstructp *BigStruct
+ emptyp *Empty
+ empty1p *Empty1
+ )
+
+ _ = *intp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "generated nil check"
+ _ = *array0p // ERROR "generated nil check"
+ _ = *array0p // ERROR "removed repeated nil check"
+ _ = *intp // ERROR "removed repeated nil check"
+ _ = *arrayp // ERROR "removed repeated nil check"
+ _ = *structp // ERROR "generated nil check"
+ _ = *emptyp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "removed repeated nil check"
+ _ = *bigarrayp // ERROR "generated nil check" ARM removed nil check before indirect!!
+ _ = *bigstructp // ERROR "generated nil check"
+ _ = *empty1p // ERROR "generated nil check"
+}
+
+func fx10k() *[10000]int
+var b bool
+
+
+func f3(x *[10000]int) {
+ // Using a huge type and huge offsets so the compiler
+ // does not expect the memory hardware to fault.
+ _ = x[9999] // ERROR "generated nil check"
+
+ for {
+ if x[9999] != 0 { // ERROR "generated nil check"
+ break
+ }
+ }
+
+ x = fx10k()
+ _ = x[9999] // ERROR "generated nil check"
+ if b {
+ _ = x[9999] // ERROR "removed repeated nil check"
+ } else {
+ _ = x[9999] // ERROR "removed repeated nil check"
+ }
+ _ = x[9999] // ERROR "generated nil check"
+
+ x = fx10k()
+ if b {
+ _ = x[9999] // ERROR "generated nil check"
+ } else {
+ _ = x[9999] // ERROR "generated nil check"
+ }
+ _ = x[9999] // ERROR "generated nil check"
+
+ fx10k()
+ // This one is a bit redundant, if we figured out that
+ // x wasn't going to change across the function call.
+ // But it's a little complex to do and in practice doesn't
+ // matter enough.
+ _ = x[9999] // ERROR "generated nil check"
+}
+
+func f3a() {
+ x := fx10k()
+ y := fx10k()
+ z := fx10k()
+ _ = &x[9] // ERROR "generated nil check"
+ y = z
+ _ = &x[9] // ERROR "removed repeated nil check"
+ x = y
+ _ = &x[9] // ERROR "generated nil check"
+}
+
+func f3b() {
+ x := fx10k()
+ y := fx10k()
+ _ = &x[9] // ERROR "generated nil check"
+ y = x
+ _ = &x[9] // ERROR "removed repeated nil check"
+ x = y
+ _ = &x[9] // ERROR "removed repeated nil check"
+}
+
+func fx10() *[10]int
+
+func f4(x *[10]int) {
+ // Most of these have no checks because a real memory reference follows,
+ // and the offset is small enough that if x is nil, the address will still be
+ // in the first unmapped page of memory.
+
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ for {
+ if x[9] != 0 { // ERROR "removed nil check before indirect"
+ break
+ }
+ }
+
+ x = fx10()
+ _ = x[9] // ERROR "removed nil check before indirect"
+ if b {
+ _ = x[9] // ERROR "removed nil check before indirect"
+ } else {
+ _ = x[9] // ERROR "removed nil check before indirect"
+ }
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ x = fx10()
+ if b {
+ _ = x[9] // ERROR "removed nil check before indirect"
+ } else {
+ _ = &x[9] // ERROR "generated nil check"
+ }
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ fx10()
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ x = fx10()
+ y := fx10()
+ _ = &x[9] // ERROR "generated nil check"
+ y = x
+ _ = &x[9] // ERROR "removed repeated nil check"
+ x = y
+ _ = &x[9] // ERROR "removed repeated nil check"
+}
+
diff --git a/gcc/testsuite/go.test/test/recover.go b/gcc/testsuite/go.test/test/recover.go
index 7c27d7c4d63..071be6667ac 100644
--- a/gcc/testsuite/go.test/test/recover.go
+++ b/gcc/testsuite/go.test/test/recover.go
@@ -10,31 +10,72 @@ package main
import (
"os"
+ "reflect"
"runtime"
)
func main() {
+ // go.tools/ssa/interp still has:
+ // - some lesser bugs in recover()
+ // - incomplete support for reflection
+ interp := os.Getenv("GOSSAINTERP") != ""
+
test1()
test1WithClosures()
test2()
test3()
- // exp/ssa/interp still has some bugs in recover().
- if os.Getenv("GOSSAINTERP") == "" {
+ if !interp {
test4()
- test5()
}
+ test5()
test6()
test6WithClosures()
test7()
+ test8()
+ test9()
+ if !interp {
+ test9reflect1()
+ test9reflect2()
+ }
+ test10()
+ if !interp {
+ test10reflect1()
+ test10reflect2()
+ }
+ test11()
+ if !interp {
+ test11reflect1()
+ test11reflect2()
+ }
+ test12()
+ if !interp {
+ test12reflect1()
+ test12reflect2()
+ }
+ test13()
+ if !interp {
+ test13reflect1()
+ test13reflect2()
+ }
+ test14()
+ if !interp {
+ test14reflect1()
+ test14reflect2()
+ test15()
+ }
}
func die() {
runtime.Breakpoint() // can't depend on panic
}
-func mustRecover(x interface{}) {
- mustNotRecover() // because it's not a defer call
- v := recover()
+func mustRecoverBody(v1, v2, v3, x interface{}) {
+ v := v1
+ if v != nil {
+ println("spurious recover", v)
+ die()
+ }
+ v = v2
if v == nil {
println("missing recover")
die() // panic is useless here
@@ -45,13 +86,21 @@ func mustRecover(x interface{}) {
}
// the value should be gone now regardless
- v = recover()
+ v = v3
if v != nil {
println("recover didn't recover")
die()
}
}
+func doubleRecover() interface{} {
+ return recover()
+}
+
+func mustRecover(x interface{}) {
+ mustRecoverBody(doubleRecover(), recover(), recover(), x)
+}
+
func mustNotRecover() {
v := recover()
if v != nil {
@@ -277,3 +326,180 @@ func test8() {
die()
}
}
+
+type I interface {
+ M()
+}
+
+// pointer receiver, so no wrapper in i.M()
+type T1 struct{}
+
+func (*T1) M() {
+ mustRecoverBody(doubleRecover(), recover(), recover(), 9)
+}
+
+func test9() {
+ var i I = &T1{}
+ defer i.M()
+ panic(9)
+}
+
+func test9reflect1() {
+ f := reflect.ValueOf(&T1{}).Method(0).Interface().(func())
+ defer f()
+ panic(9)
+}
+
+func test9reflect2() {
+ f := reflect.TypeOf(&T1{}).Method(0).Func.Interface().(func(*T1))
+ defer f(&T1{})
+ panic(9)
+}
+
+// word-sized value receiver, so no wrapper in i.M()
+type T2 uintptr
+
+func (T2) M() {
+ mustRecoverBody(doubleRecover(), recover(), recover(), 10)
+}
+
+func test10() {
+ var i I = T2(0)
+ defer i.M()
+ panic(10)
+}
+
+func test10reflect1() {
+ f := reflect.ValueOf(T2(0)).Method(0).Interface().(func())
+ defer f()
+ panic(10)
+}
+
+func test10reflect2() {
+ f := reflect.TypeOf(T2(0)).Method(0).Func.Interface().(func(T2))
+ defer f(T2(0))
+ panic(10)
+}
+
+// tiny receiver, so basic wrapper in i.M()
+type T3 struct{}
+
+func (T3) M() {
+ mustRecoverBody(doubleRecover(), recover(), recover(), 11)
+}
+
+func test11() {
+ var i I = T3{}
+ defer i.M()
+ panic(11)
+}
+
+func test11reflect1() {
+ f := reflect.ValueOf(T3{}).Method(0).Interface().(func())
+ defer f()
+ panic(11)
+}
+
+func test11reflect2() {
+ f := reflect.TypeOf(T3{}).Method(0).Func.Interface().(func(T3))
+ defer f(T3{})
+ panic(11)
+}
+
+// large receiver, so basic wrapper in i.M()
+type T4 [2]string
+
+func (T4) M() {
+ mustRecoverBody(doubleRecover(), recover(), recover(), 12)
+}
+
+func test12() {
+ var i I = T4{}
+ defer i.M()
+ panic(12)
+}
+
+func test12reflect1() {
+ f := reflect.ValueOf(T4{}).Method(0).Interface().(func())
+ defer f()
+ panic(12)
+}
+
+func test12reflect2() {
+ f := reflect.TypeOf(T4{}).Method(0).Func.Interface().(func(T4))
+ defer f(T4{})
+ panic(12)
+}
+
+// enormous receiver, so wrapper splits stack to call M
+type T5 [8192]byte
+
+func (T5) M() {
+ mustRecoverBody(doubleRecover(), recover(), recover(), 13)
+}
+
+func test13() {
+ var i I = T5{}
+ defer i.M()
+ panic(13)
+}
+
+func test13reflect1() {
+ f := reflect.ValueOf(T5{}).Method(0).Interface().(func())
+ defer f()
+ panic(13)
+}
+
+func test13reflect2() {
+ f := reflect.TypeOf(T5{}).Method(0).Func.Interface().(func(T5))
+ defer f(T5{})
+ panic(13)
+}
+
+// enormous receiver + enormous method frame, so wrapper splits stack to call M,
+// and then M splits stack to allocate its frame.
+// recover must look back two frames to find the panic.
+type T6 [8192]byte
+
+var global byte
+
+func (T6) M() {
+ var x [8192]byte
+ x[0] = 1
+ x[1] = 2
+ for i := range x {
+ global += x[i]
+ }
+ mustRecoverBody(doubleRecover(), recover(), recover(), 14)
+}
+
+func test14() {
+ var i I = T6{}
+ defer i.M()
+ panic(14)
+}
+
+func test14reflect1() {
+ f := reflect.ValueOf(T6{}).Method(0).Interface().(func())
+ defer f()
+ panic(14)
+}
+
+func test14reflect2() {
+ f := reflect.TypeOf(T6{}).Method(0).Func.Interface().(func(T6))
+ defer f(T6{})
+ panic(14)
+}
+
+// function created by reflect.MakeFunc
+
+func reflectFunc(args []reflect.Value) (results []reflect.Value) {
+ mustRecoverBody(doubleRecover(), recover(), recover(), 15)
+ return nil
+}
+
+func test15() {
+ f := reflect.MakeFunc(reflect.TypeOf((func())(nil)), reflectFunc).Interface().(func())
+ defer f()
+ panic(15)
+}
diff --git a/gcc/testsuite/go.test/test/recover3.go b/gcc/testsuite/go.test/test/recover3.go
index ebfa0a30757..e17bfb3f6aa 100644
--- a/gcc/testsuite/go.test/test/recover3.go
+++ b/gcc/testsuite/go.test/test/recover3.go
@@ -64,7 +64,8 @@ func main() {
i = 99999
var sl []int
- check("array-bounds", func() { println(p[i]) }, "index out of range")
+ p1 := new([10]int)
+ check("array-bounds", func() { println(p1[i]) }, "index out of range")
check("slice-bounds", func() { println(sl[i]) }, "index out of range")
var inter interface{}
diff --git a/gcc/testsuite/go.test/test/run.go b/gcc/testsuite/go.test/test/run.go
index 5e167d6b0cc..5c94de6400f 100644
--- a/gcc/testsuite/go.test/test/run.go
+++ b/gcc/testsuite/go.test/test/run.go
@@ -27,6 +27,8 @@ import (
"sort"
"strconv"
"strings"
+ "time"
+ "unicode"
)
var (
@@ -113,28 +115,39 @@ func main() {
failed := false
resCount := map[string]int{}
for _, test := range tests {
- <-test.donec
- _, isSkip := test.err.(skipError)
- errStr := "pass"
+ <-test.donec
+ status := "ok "
+ errStr := ""
+ if _, isSkip := test.err.(skipError); isSkip {
+ status = "skip"
+ test.err = nil
+ if !skipOkay[path.Join(test.dir, test.gofile)] {
+ errStr = "unexpected skip for " + path.Join(test.dir, test.gofile) + ": " + errStr
+ status = "FAIL"
+ }
+ }
if test.err != nil {
+ status = "FAIL"
errStr = test.err.Error()
- if !isSkip {
- failed = true
- }
}
- if isSkip && !skipOkay[path.Join(test.dir, test.gofile)] {
- errStr = "unexpected skip for " + path.Join(test.dir, test.gofile) + ": " + errStr
- isSkip = false
+ if status == "FAIL" {
failed = true
}
- resCount[errStr]++
- if isSkip && !*verbose && !*showSkips {
+ resCount[status]++
+ if status == "skip" && !*verbose && !*showSkips {
+ continue
+ }
+ dt := fmt.Sprintf("%.3fs", test.dt.Seconds())
+ if status == "FAIL" {
+ fmt.Printf("# go run run.go -- %s\n%s\nFAIL\t%s\t%s\n",
+ path.Join(test.dir, test.gofile),
+ errStr, test.goFileName(), dt)
continue
}
- if !*verbose && test.err == nil {
+ if !*verbose {
continue
}
- fmt.Printf("%-20s %-20s: %s\n", test.action, test.goFileName(), errStr)
+ fmt.Printf("%s\t%s\t%s\n", status, test.goFileName(), dt)
}
if *summary {
@@ -206,7 +219,8 @@ func check(err error) {
type test struct {
dir, gofile string
donec chan bool // closed when done
-
+ dt time.Duration
+
src string
action string // "compile", "build", etc.
@@ -299,14 +313,17 @@ func goDirPackages(longdir string) ([][]string, error) {
return pkgs, nil
}
+type context struct {
+ GOOS string
+ GOARCH string
+}
+
// shouldTest looks for build tags in a source file and returns
// whether the file should be used according to the tags.
func shouldTest(src string, goos, goarch string) (ok bool, whyNot string) {
if idx := strings.Index(src, "\npackage"); idx >= 0 {
src = src[:idx]
}
- notgoos := "!" + goos
- notgoarch := "!" + goarch
for _, line := range strings.Split(src, "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "//") {
@@ -318,34 +335,68 @@ func shouldTest(src string, goos, goarch string) (ok bool, whyNot string) {
if len(line) == 0 || line[0] != '+' {
continue
}
+ ctxt := &context{
+ GOOS: goos,
+ GOARCH: goarch,
+ }
words := strings.Fields(line)
if words[0] == "+build" {
- for _, word := range words {
- switch word {
- case goos, goarch:
- return true, ""
- case notgoos, notgoarch:
- continue
- default:
- if word[0] == '!' {
- // NOT something-else
- return true, ""
- }
+ ok := false
+ for _, word := range words[1:] {
+ if ctxt.match(word) {
+ ok = true
+ break
}
}
- // no matching tag found.
- return false, line
+ if !ok {
+ // no matching tag found.
+ return false, line
+ }
}
}
- // no build tags.
+ // no build tags
return true, ""
}
+func (ctxt *context) match(name string) bool {
+ if name == "" {
+ return false
+ }
+ if i := strings.Index(name, ","); i >= 0 {
+ // comma-separated list
+ return ctxt.match(name[:i]) && ctxt.match(name[i+1:])
+ }
+ if strings.HasPrefix(name, "!!") { // bad syntax, reject always
+ return false
+ }
+ if strings.HasPrefix(name, "!") { // negation
+ return len(name) > 1 && !ctxt.match(name[1:])
+ }
+
+ // Tags must be letters, digits, underscores or dots.
+ // Unlike in Go identifiers, all digits are fine (e.g., "386").
+ for _, c := range name {
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
+ return false
+ }
+ }
+
+ if name == ctxt.GOOS || name == ctxt.GOARCH {
+ return true
+ }
+
+ return false
+}
+
func init() { checkShouldTest() }
// run runs a test.
func (t *test) run() {
- defer close(t.donec)
+ start := time.Now()
+ defer func() {
+ t.dt = time.Since(start)
+ close(t.donec)
+ }()
srcBytes, err := ioutil.ReadFile(t.goFileName())
if err != nil {
@@ -815,7 +866,7 @@ func defaultRunOutputLimit() int {
return cpu
}
-// checkShouldTest runs canity checks on the shouldTest function.
+// checkShouldTest runs sanity checks on the shouldTest function.
func checkShouldTest() {
assert := func(ok bool, _ string) {
if !ok {
@@ -823,11 +874,28 @@ func checkShouldTest() {
}
}
assertNot := func(ok bool, _ string) { assert(!ok, "") }
+
+ // Simple tests.
assert(shouldTest("// +build linux", "linux", "arm"))
assert(shouldTest("// +build !windows", "linux", "arm"))
assertNot(shouldTest("// +build !windows", "windows", "amd64"))
- assertNot(shouldTest("// +build arm 386", "linux", "amd64"))
+
+ // A file with no build tags will always be tested.
assert(shouldTest("// This is a test.", "os", "arch"))
+
+ // Build tags separated by a space are OR-ed together.
+ assertNot(shouldTest("// +build arm 386", "linux", "amd64"))
+
+ // Build tags seperated by a comma are AND-ed together.
+ assertNot(shouldTest("// +build !windows,!plan9", "windows", "amd64"))
+ assertNot(shouldTest("// +build !windows,!plan9", "plan9", "386"))
+
+ // Build tags on multiple lines are AND-ed together.
+ assert(shouldTest("// +build !windows\n// +build amd64", "linux", "amd64"))
+ assertNot(shouldTest("// +build !windows\n// +build amd64", "windows", "amd64"))
+
+ // Test that (!a OR !b) matches anything.
+ assert(shouldTest("// +build !windows !plan9", "windows", "amd64"))
}
// envForDir returns a copy of the environment
diff --git a/gcc/testsuite/go.test/test/shift2.go b/gcc/testsuite/go.test/test/shift2.go
index 88ef3c40f52..80e6bbc190d 100644
--- a/gcc/testsuite/go.test/test/shift2.go
+++ b/gcc/testsuite/go.test/test/shift2.go
@@ -20,6 +20,7 @@ var (
i = 1 << s // 1 has type int
j int32 = 1 << s // 1 has type int32; j == 0
k = uint64(1 << s) // 1 has type uint64; k == 1<<33
+ l = g(1 << s) // 1 has type int
m int = 1.0 << s // legal: 1.0 has type int
w int64 = 1.0 << 33 // legal: 1.0<<33 is a constant shift expression
)
diff --git a/gcc/testsuite/go.test/test/sizeof.go b/gcc/testsuite/go.test/test/sizeof.go
index 9aa95677d47..c3db1e5c3ae 100644
--- a/gcc/testsuite/go.test/test/sizeof.go
+++ b/gcc/testsuite/go.test/test/sizeof.go
@@ -1,4 +1,4 @@
-// compile
+// run
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -58,35 +58,35 @@ func main() {
type (
S1 struct {
- A int32
+ A int64
S2
}
S2 struct {
- B int32
+ B int64
S3
}
S3 struct {
- C int32
+ C int64
S4
}
S4 struct {
- D int32
+ D int64
S5
}
S5 struct {
- E int32
+ E int64
S6
}
S6 struct {
- F int32
+ F int64
S7
}
S7 struct {
- G int32
+ G int64
S8
}
S8 struct {
- H int32
+ H int64
*S1
}
)
@@ -96,24 +96,24 @@ func testDeep() {
switch {
case unsafe.Offsetof(s1.A) != 0:
panic("unsafe.Offsetof(s1.A) != 0")
- case unsafe.Offsetof(s1.B) != 4:
- panic("unsafe.Offsetof(s1.B) != 4")
- case unsafe.Offsetof(s1.C) != 8:
- panic("unsafe.Offsetof(s1.C) != 8")
- case unsafe.Offsetof(s1.D) != 12:
- panic("unsafe.Offsetof(s1.D) != 12")
- case unsafe.Offsetof(s1.E) != 16:
- panic("unsafe.Offsetof(s1.E) != 16")
- case unsafe.Offsetof(s1.F) != 20:
- panic("unsafe.Offsetof(s1.F) != 20")
- case unsafe.Offsetof(s1.G) != 24:
- panic("unsafe.Offsetof(s1.G) != 24")
- case unsafe.Offsetof(s1.H) != 28:
- panic("unsafe.Offsetof(s1.H) != 28")
- case unsafe.Offsetof(s1.S1) != 32:
- panic("unsafe.Offsetof(s1.S1) != 32")
- case unsafe.Offsetof(s1.S1.S2.S3.S4.S5.S6.S7.S8.S1.S2) != 4:
- panic("unsafe.Offsetof(s1.S1.S2.S3.S4.S5.S6.S7.S8.S1.S2) != 4")
+ case unsafe.Offsetof(s1.B) != 8:
+ panic("unsafe.Offsetof(s1.B) != 8")
+ case unsafe.Offsetof(s1.C) != 16:
+ panic("unsafe.Offsetof(s1.C) != 16")
+ case unsafe.Offsetof(s1.D) != 24:
+ panic("unsafe.Offsetof(s1.D) != 24")
+ case unsafe.Offsetof(s1.E) != 32:
+ panic("unsafe.Offsetof(s1.E) != 32")
+ case unsafe.Offsetof(s1.F) != 40:
+ panic("unsafe.Offsetof(s1.F) != 40")
+ case unsafe.Offsetof(s1.G) != 48:
+ panic("unsafe.Offsetof(s1.G) != 48")
+ case unsafe.Offsetof(s1.H) != 56:
+ panic("unsafe.Offsetof(s1.H) != 56")
+ case unsafe.Offsetof(s1.S1) != 64:
+ panic("unsafe.Offsetof(s1.S1) != 64")
+ case unsafe.Offsetof(s1.S1.S2.S3.S4.S5.S6.S7.S8.S1.S2) != 8:
+ panic("unsafe.Offsetof(s1.S1.S2.S3.S4.S5.S6.S7.S8.S1.S2) != 8")
}
}
diff --git a/gcc/testsuite/go.test/test/slice3.go b/gcc/testsuite/go.test/test/slice3.go
new file mode 100644
index 00000000000..3cf34b57e75
--- /dev/null
+++ b/gcc/testsuite/go.test/test/slice3.go
@@ -0,0 +1,156 @@
+// runoutput
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test run-time behavior of 3-index slice expressions.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+)
+
+var bout *bufio.Writer
+
+func main() {
+ bout = bufio.NewWriter(os.Stdout)
+
+ fmt.Fprintf(bout, "%s", programTop)
+ fmt.Fprintf(bout, "func main() {\n")
+
+ index := []string{
+ "0",
+ "1",
+ "2",
+ "3",
+ "10",
+ "20",
+ "vminus1",
+ "v0",
+ "v1",
+ "v2",
+ "v3",
+ "v10",
+ "v20",
+ }
+
+ parse := func(s string) (n int, isconst bool) {
+ if s == "vminus1" {
+ return -1, false
+ }
+ isconst = true
+ if s[0] == 'v' {
+ isconst = false
+ s = s[1:]
+ }
+ n, _ = strconv.Atoi(s)
+ return n, isconst
+ }
+
+ const Cap = 10 // cap of slice, array
+
+ for _, base := range []string{"array", "slice"} {
+ for _, i := range index {
+ iv, iconst := parse(i)
+ for _, j := range index {
+ jv, jconst := parse(j)
+ for _, k := range index {
+ kv, kconst := parse(k)
+ // Avoid errors that would make the program not compile.
+ // Those are tested by slice3err.go.
+ switch {
+ case iconst && jconst && iv > jv,
+ jconst && kconst && jv > kv,
+ iconst && kconst && iv > kv,
+ iconst && base == "array" && iv > Cap,
+ jconst && base == "array" && jv > Cap,
+ kconst && base == "array" && kv > Cap:
+ continue
+ }
+
+ expr := base + "[" + i + ":" + j + ":" + k + "]"
+ var xbase, xlen, xcap int
+ if iv > jv || jv > kv || kv > Cap || iv < 0 || jv < 0 || kv < 0 {
+ xbase, xlen, xcap = -1, -1, -1
+ } else {
+ xbase = iv
+ xlen = jv - iv
+ xcap = kv - iv
+ }
+ fmt.Fprintf(bout, "\tcheckSlice(%q, func() []byte { return %s }, %d, %d, %d)\n", expr, expr, xbase, xlen, xcap)
+ }
+ }
+ }
+ }
+
+ fmt.Fprintf(bout, "\tif !ok { os.Exit(1) }\n")
+ fmt.Fprintf(bout, "}\n")
+ bout.Flush()
+}
+
+var programTop = `
+package main
+
+import (
+ "fmt"
+ "os"
+ "unsafe"
+)
+
+var ok = true
+
+var (
+ array = new([10]byte)
+ slice = array[:]
+
+ vminus1 = -1
+ v0 = 0
+ v1 = 1
+ v2 = 2
+ v3 = 3
+ v4 = 4
+ v5 = 5
+ v10 = 10
+ v20 = 20
+)
+
+func notOK() {
+ if ok {
+ println("BUG:")
+ ok = false
+ }
+}
+
+func checkSlice(desc string, f func() []byte, xbase, xlen, xcap int) {
+ defer func() {
+ if err := recover(); err != nil {
+ if xbase >= 0 {
+ notOK()
+ println(desc, " unexpected panic: ", fmt.Sprint(err))
+ }
+ }
+ // "no panic" is checked below
+ }()
+
+ x := f()
+
+ arrayBase := uintptr(unsafe.Pointer(array))
+ raw := *(*[3]uintptr)(unsafe.Pointer(&x))
+ base, len, cap := raw[0] - arrayBase, raw[1], raw[2]
+ if xbase < 0 {
+ notOK()
+ println(desc, "=", base, len, cap, "want panic")
+ return
+ }
+ if base != uintptr(xbase) || len != uintptr(xlen) || cap != uintptr(xcap) {
+ notOK()
+ println(desc, "=", base, len, cap, "want", xbase, xlen, xcap)
+ }
+}
+
+`
diff --git a/gcc/testsuite/go.test/test/slice3err.go b/gcc/testsuite/go.test/test/slice3err.go
new file mode 100644
index 00000000000..83fb39be4c1
--- /dev/null
+++ b/gcc/testsuite/go.test/test/slice3err.go
@@ -0,0 +1,121 @@
+// errorcheck
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var array *[10]int
+var slice []int
+var str string
+var i, j, k int
+
+func f() {
+ // check what missing arguments are allowed
+ _ = array[:]
+ _ = array[i:]
+ _ = array[:j]
+ _ = array[i:j]
+ _ = array[::] // ERROR "middle index required in 3-index slice" "final index required in 3-index slice"
+ _ = array[i::] // ERROR "middle index required in 3-index slice" "final index required in 3-index slice"
+ _ = array[:j:] // ERROR "final index required in 3-index slice"
+ _ = array[i:j:] // ERROR "final index required in 3-index slice"
+ _ = array[::k] // ERROR "middle index required in 3-index slice"
+ _ = array[i::k] // ERROR "middle index required in 3-index slice"
+ _ = array[:j:k]
+ _ = array[i:j:k]
+
+ _ = slice[:]
+ _ = slice[i:]
+ _ = slice[:j]
+ _ = slice[i:j]
+ _ = slice[::] // ERROR "middle index required in 3-index slice" "final index required in 3-index slice"
+ _ = slice[i::] // ERROR "middle index required in 3-index slice" "final index required in 3-index slice"
+ _ = slice[:j:] // ERROR "final index required in 3-index slice"
+ _ = slice[i:j:] // ERROR "final index required in 3-index slice"
+ _ = slice[::k] // ERROR "middle index required in 3-index slice"
+ _ = slice[i::k] // ERROR "middle index required in 3-index slice"
+ _ = slice[:j:k]
+ _ = slice[i:j:k]
+
+ _ = str[:]
+ _ = str[i:]
+ _ = str[:j]
+ _ = str[i:j]
+ _ = str[::] // ERROR "3-index slice of string" "middle index required in 3-index slice" "final index required in 3-index slice"
+ _ = str[i::] // ERROR "3-index slice of string" "middle index required in 3-index slice" "final index required in 3-index slice"
+ _ = str[:j:] // ERROR "3-index slice of string" "final index required in 3-index slice"
+ _ = str[i:j:] // ERROR "3-index slice of string" "final index required in 3-index slice"
+ _ = str[::k] // ERROR "3-index slice of string" "middle index required in 3-index slice"
+ _ = str[i::k] // ERROR "3-index slice of string" "middle index required in 3-index slice"
+ _ = str[:j:k] // ERROR "3-index slice of string"
+ _ = str[i:j:k] // ERROR "3-index slice of string"
+
+ // check invalid indices
+ _ = array[1:2]
+ _ = array[2:1] // ERROR "invalid slice index|inverted slice"
+ _ = array[2:2]
+ _ = array[i:1]
+ _ = array[1:j]
+ _ = array[1:2:3]
+ _ = array[1:3:2] // ERROR "invalid slice index|inverted slice"
+ _ = array[2:1:3] // ERROR "invalid slice index|inverted slice"
+ _ = array[2:3:1] // ERROR "invalid slice index|inverted slice"
+ _ = array[3:1:2] // ERROR "invalid slice index|inverted slice"
+ _ = array[3:2:1] // ERROR "invalid slice index|inverted slice"
+ _ = array[i:1:2]
+ _ = array[i:2:1] // ERROR "invalid slice index|inverted slice"
+ _ = array[1:j:2]
+ _ = array[2:j:1] // ERROR "invalid slice index"
+ _ = array[1:2:k]
+ _ = array[2:1:k] // ERROR "invalid slice index|inverted slice"
+
+ _ = slice[1:2]
+ _ = slice[2:1] // ERROR "invalid slice index|inverted slice"
+ _ = slice[2:2]
+ _ = slice[i:1]
+ _ = slice[1:j]
+ _ = slice[1:2:3]
+ _ = slice[1:3:2] // ERROR "invalid slice index|inverted slice"
+ _ = slice[2:1:3] // ERROR "invalid slice index|inverted slice"
+ _ = slice[2:3:1] // ERROR "invalid slice index|inverted slice"
+ _ = slice[3:1:2] // ERROR "invalid slice index|inverted slice"
+ _ = slice[3:2:1] // ERROR "invalid slice index|inverted slice"
+ _ = slice[i:1:2]
+ _ = slice[i:2:1] // ERROR "invalid slice index|inverted slice"
+ _ = slice[1:j:2]
+ _ = slice[2:j:1] // ERROR "invalid slice index"
+ _ = slice[1:2:k]
+ _ = slice[2:1:k] // ERROR "invalid slice index|inverted slice"
+
+ _ = str[1:2]
+ _ = str[2:1] // ERROR "invalid slice index|inverted slice"
+ _ = str[2:2]
+ _ = str[i:1]
+ _ = str[1:j]
+
+ // check out of bounds indices on array
+ _ = array[11:11] // ERROR "out of bounds"
+ _ = array[11:12] // ERROR "out of bounds"
+ _ = array[11:] // ERROR "out of bounds"
+ _ = array[:11] // ERROR "out of bounds"
+ _ = array[1:11] // ERROR "out of bounds"
+ _ = array[1:11:12] // ERROR "out of bounds"
+ _ = array[1:2:11] // ERROR "out of bounds"
+ _ = array[1:11:3] // ERROR "out of bounds|invalid slice index"
+ _ = array[11:2:3] // ERROR "out of bounds|inverted slice|invalid slice index"
+ _ = array[11:12:13] // ERROR "out of bounds"
+
+ // slice bounds not checked
+ _ = slice[11:11]
+ _ = slice[11:12]
+ _ = slice[11:]
+ _ = slice[:11]
+ _ = slice[1:11]
+ _ = slice[1:11:12]
+ _ = slice[1:2:11]
+ _ = slice[1:11:3] // ERROR "invalid slice index"
+ _ = slice[11:2:3] // ERROR "invalid slice index|inverted slice"
+ _ = slice[11:12:13]
+}
diff --git a/gcc/testsuite/go.test/test/stress/runstress.go b/gcc/testsuite/go.test/test/stress/runstress.go
index b5adf6a4a56..76ab2a8b4fa 100644
--- a/gcc/testsuite/go.test/test/stress/runstress.go
+++ b/gcc/testsuite/go.test/test/stress/runstress.go
@@ -114,11 +114,16 @@ func stressExec() {
}
}
-func ringf(in <-chan int, out chan<- int, donec chan<- bool) {
+func ringf(in <-chan int, out chan<- int, donec chan bool) {
for {
- n := <-in
+ var n int
+ select {
+ case <-donec:
+ return
+ case n = <-in:
+ }
if n == 0 {
- donec <- true
+ close(donec)
return
}
out <- n - 1
diff --git a/gcc/testsuite/go.test/test/string_lit.go b/gcc/testsuite/go.test/test/string_lit.go
index fea6f553d10..4751b82ccf4 100644
--- a/gcc/testsuite/go.test/test/string_lit.go
+++ b/gcc/testsuite/go.test/test/string_lit.go
@@ -125,6 +125,11 @@ func main() {
s = string(-1)
assert(s, "\xef\xbf\xbd", "negative rune")
+ // the large rune tests yet again, with a slice.
+ rs := []rune{0x10ffff, 0x10ffff + 1, 0xD800, 0xDFFF, -1}
+ s = string(rs)
+ assert(s, "\xf4\x8f\xbf\xbf\xef\xbf\xbd\xef\xbf\xbd\xef\xbf\xbd\xef\xbf\xbd", "large rune slice")
+
assert(string(gr1), gx1, "global ->[]rune")
assert(string(gr2), gx2fix, "global invalid ->[]rune")
assert(string(gb1), gx1, "->[]byte")
diff --git a/gcc/testsuite/go.test/test/syntax/chan1.go b/gcc/testsuite/go.test/test/syntax/chan1.go
index 868a1226d9f..4860422ad87 100644
--- a/gcc/testsuite/go.test/test/syntax/chan1.go
+++ b/gcc/testsuite/go.test/test/syntax/chan1.go
@@ -10,8 +10,8 @@ var c chan int
var v int
func main() {
- if c <- v { // ERROR "send statement.*value.*select"
+ if c <- v { // ERROR "used as value"
}
}
-var _ = c <- v // ERROR "send statement.*value.*select"
+var _ = c <- v // ERROR "used as value"
diff --git a/gcc/testsuite/go.test/test/syntax/semi1.go b/gcc/testsuite/go.test/test/syntax/semi1.go
index 8fbfb206ad7..6e0428121ff 100644
--- a/gcc/testsuite/go.test/test/syntax/semi1.go
+++ b/gcc/testsuite/go.test/test/syntax/semi1.go
@@ -7,7 +7,7 @@
package main
func main() {
- if x; y // ERROR "unexpected semicolon or newline before .?{.?|undefined"
+ if x; y // ERROR "missing .*{.* after if clause|undefined"
{
z // GCCGO_ERROR "undefined"
diff --git a/gcc/testsuite/go.test/test/syntax/semi2.go b/gcc/testsuite/go.test/test/syntax/semi2.go
index cfb0ed17b74..23d7bd0ee88 100644
--- a/gcc/testsuite/go.test/test/syntax/semi2.go
+++ b/gcc/testsuite/go.test/test/syntax/semi2.go
@@ -7,7 +7,7 @@
package main
func main() {
- switch x; y // ERROR "unexpected semicolon or newline before .?{.?|undefined"
+ switch x; y // ERROR "missing .*{.* after switch clause|undefined"
{
z
diff --git a/gcc/testsuite/go.test/test/syntax/semi3.go b/gcc/testsuite/go.test/test/syntax/semi3.go
index 645af7354a5..ca070d8a577 100644
--- a/gcc/testsuite/go.test/test/syntax/semi3.go
+++ b/gcc/testsuite/go.test/test/syntax/semi3.go
@@ -7,7 +7,7 @@
package main
func main() {
- for x; y; z // ERROR "unexpected semicolon or newline before .?{.?|undefined"
+ for x; y; z // ERROR "missing .*{.* after for clause|undefined"
{
z // GCCGO_ERROR "undefined"
diff --git a/gcc/testsuite/go.test/test/syntax/semi4.go b/gcc/testsuite/go.test/test/syntax/semi4.go
index e192348aa20..99c2d22561b 100644
--- a/gcc/testsuite/go.test/test/syntax/semi4.go
+++ b/gcc/testsuite/go.test/test/syntax/semi4.go
@@ -8,7 +8,7 @@ package main
func main() {
for x // GCCGO_ERROR "undefined"
- { // ERROR "unexpected semicolon or newline before .?{.?"
+ { // ERROR "missing .*{.* after for clause"
z // GCCGO_ERROR "undefined"
diff --git a/gcc/testsuite/go.test/test/testlib b/gcc/testsuite/go.test/test/testlib
index de138b1d19d..4a17f4feb9c 100644
--- a/gcc/testsuite/go.test/test/testlib
+++ b/gcc/testsuite/go.test/test/testlib
@@ -16,29 +16,50 @@ pkgs() {
done | sort
}
+_match() {
+ case $1 in
+ *,*)
+ #echo >&2 "match comma separated $1"
+ first=$(echo $1 | sed 's/,.*//')
+ rest=$(echo $1 | sed 's/[^,]*,//')
+ if _match $first && _match $rest; then
+ return 0
+ fi
+ return 1
+ ;;
+ '!'*)
+ #echo >&2 "match negation $1"
+ neg=$(echo $1 | sed 's/^!//')
+ if _match $neg; then
+ return 1
+ fi
+ return 0
+ ;;
+ $GOARCH|$GOOS)
+ #echo >&2 "match GOARCH or GOOS $1"
+ return 0
+ ;;
+ esac
+ return 1
+}
+
# +build aborts execution if the supplied tags don't match,
# i.e. none of the tags (x or !x) matches GOARCH or GOOS.
+build() {
if (( $# == 0 )); then
return
fi
+ m=0
for tag; do
- case $tag in
- $GOARCH|$GOOS)
- #echo >&2 "match $tag in $1"
- return # don't exclude.
- ;;
- '!'$GOARCH|'!'$GOOS)
- ;;
- '!'*)
- # not x where x is neither GOOS nor GOARCH.
- #echo >&2 "match $tag in $1"
- return # don't exclude
- ;;
- esac
+ if _match $tag; then
+ m=1
+ fi
done
- # no match.
- exit 0
+ if [ $m = 0 ]; then
+ #echo >&2 no match
+ exit 0
+ fi
+ unset m
}
compile() {
diff --git a/gcc/testsuite/lib/asan-dg.exp b/gcc/testsuite/lib/asan-dg.exp
index 8990677d51e..ca8e8132e6e 100644
--- a/gcc/testsuite/lib/asan-dg.exp
+++ b/gcc/testsuite/lib/asan-dg.exp
@@ -288,9 +288,10 @@ proc asan-gtest { args } {
if { "$status" == "fail" } {
pass "$testname execution test"
if { ![regexp $regexpr ${output}] } {
- fail "$testname output pattern test, should match $regexpr"
+ fail "$testname output pattern test"
+ send_log "Output should match: $regexpr\n"
} else {
- pass "$testname output pattern test, $regexpr"
+ pass "$testname output pattern test"
}
} elseif { "$status" == "pass" } {
fail "$testname execution test"
diff --git a/gcc/testsuite/lib/tsan-dg.exp b/gcc/testsuite/lib/tsan-dg.exp
new file mode 100644
index 00000000000..f39d8b5329f
--- /dev/null
+++ b/gcc/testsuite/lib/tsan-dg.exp
@@ -0,0 +1,114 @@
+# Copyright (C) 2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Return 1 if compilation with -fsanitize=thread is error-free for trivial
+# code, 0 otherwise.
+
+proc check_effective_target_fthread_sanitizer {} {
+ return [check_no_compiler_messages faddress_sanitizer object {
+ void foo (void) { }
+ } "-fPIE -pie -fsanitize=thread"]
+}
+
+#
+# tsan_link_flags -- compute library path and flags to find libtsan.
+# (originally from g++.exp)
+#
+
+proc tsan_link_flags { paths } {
+ global srcdir
+ global ld_library_path
+ global shlib_ext
+
+ set gccpath ${paths}
+ set flags ""
+
+ set shlib_ext [get_shlib_extension]
+
+ if { $gccpath != "" } {
+ if { [file exists "${gccpath}/libsanitizer/tsan/.libs/libtsan.a"]
+ || [file exists "${gccpath}/libsanitizer/tsan/.libs/libtsan.${shlib_ext}"] } {
+ append flags " -B${gccpath}/libsanitizer/tsan/ "
+ append flags " -L${gccpath}/libsanitizer/tsan/.libs "
+ append ld_library_path ":${gccpath}/libsanitizer/tsan/.libs"
+ }
+ } else {
+ global tool_root_dir
+
+ set libtsan [lookfor_file ${tool_root_dir} libtsan]
+ if { $libtsan != "" } {
+ append flags "-L${libtsan} "
+ append ld_library_path ":${libtsan}"
+ }
+ }
+
+ set_ld_library_path_env_vars
+
+ return "$flags"
+}
+
+#
+# tsan_init -- called at the start of each subdir of tests
+#
+
+proc tsan_init { args } {
+ global TEST_ALWAYS_FLAGS
+ global ALWAYS_CXXFLAGS
+ global TOOL_OPTIONS
+ global tsan_saved_TEST_ALWAYS_FLAGS
+
+ set link_flags ""
+ if ![is_remote host] {
+ if [info exists TOOL_OPTIONS] {
+ set link_flags "[tsan_link_flags [get_multilibs ${TOOL_OPTIONS}]]"
+ } else {
+ set link_flags "[tsan_link_flags [get_multilibs]]"
+ }
+ }
+
+ if [info exists TEST_ALWAYS_FLAGS] {
+ set tsan_saved_TEST_ALWAYS_FLAGS $TEST_ALWAYS_FLAGS
+ }
+ if [info exists ALWAYS_CXXFLAGS] {
+ set ALWAYS_CXXFLAGS [concat "{ldflags=$link_flags}" $ALWAYS_CXXFLAGS]
+ set ALWAYS_CXXFLAGS [concat "{additional_flags=-fPIE -pie -fsanitize=thread -g}" $ALWAYS_CXXFLAGS]
+ } else {
+ if [info exists TEST_ALWAYS_FLAGS] {
+ set TEST_ALWAYS_FLAGS "$link_flags -fPIE -pie -fsanitize=thread -g $TEST_ALWAYS_FLAGS"
+ } else {
+ set TEST_ALWAYS_FLAGS "$link_flags -fPIE -pie -fsanitize=thread -g"
+ }
+ }
+ if { $link_flags != "" } {
+ return 1
+ }
+ return 0
+}
+
+#
+# tsan_finish -- called at the start of each subdir of tests
+#
+
+proc tsan_finish { args } {
+ global TEST_ALWAYS_FLAGS
+ global tsan_saved_TEST_ALWAYS_FLAGS
+
+ if [info exists tsan_saved_TEST_ALWAYS_FLAGS] {
+ set TEST_ALWAYS_FLAGS $tsan_saved_TEST_ALWAYS_FLAGS
+ } else {
+ unset TEST_ALWAYS_FLAGS
+ }
+}
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 91a08ae1d7c..042da3bcc39 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -390,7 +390,7 @@ wrapup_global_declaration_2 (tree decl)
if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl))
{
- struct varpool_node *node;
+ varpool_node *node;
bool needed = true;
node = varpool_get_node (decl);
diff --git a/gcc/tracer.c b/gcc/tracer.c
index 996895006f4..a40cbebd434 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -230,9 +230,9 @@ find_trace (basic_block bb, basic_block *trace)
static bool
tail_duplicate (void)
{
- fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
+ fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block_for_fn (cfun));
basic_block *trace = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
- int *counts = XNEWVEC (int, last_basic_block);
+ int *counts = XNEWVEC (int, last_basic_block_for_fn (cfun));
int ninsns = 0, nduplicated = 0;
gcov_type weighted_insns = 0, traced_insns = 0;
fibheap_t heap = fibheap_new ();
@@ -243,7 +243,7 @@ tail_duplicate (void)
/* Create an oversized sbitmap to reduce the chance that we need to
resize it. */
- bb_seen = sbitmap_alloc (last_basic_block * 2);
+ bb_seen = sbitmap_alloc (last_basic_block_for_fn (cfun) * 2);
bitmap_clear (bb_seen);
initialize_original_copy_tables ();
@@ -256,7 +256,7 @@ tail_duplicate (void)
branch_ratio_cutoff =
(REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int n = count_insns (bb);
if (!ignore_bb_p (bb))
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index 31dee7678d2..c9af68002ac 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -55,14 +55,9 @@
#include "gimple-pretty-print.h"
#include "cfgloop.h"
#include "tree-ssa-address.h"
+#include "predict.h"
-#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
-#define PROB_VERY_LIKELY (PROB_ALWAYS - PROB_VERY_UNLIKELY)
-#define PROB_UNLIKELY (REG_BR_PROB_BASE / 5 - 1)
-#define PROB_LIKELY (PROB_ALWAYS - PROB_VERY_LIKELY)
-#define PROB_ALWAYS (REG_BR_PROB_BASE)
-
#define A_RUNINSTRUMENTEDCODE 0x0001
#define A_RUNUNINSTRUMENTEDCODE 0x0002
#define A_SAVELIVEVARIABLES 0x0004
@@ -1961,7 +1956,7 @@ tm_region_init (struct tm_region *region)
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
using bb->aux. */
- bb_regions.safe_grow_cleared (last_basic_block);
+ bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
queue.safe_push (bb);
bb_regions[bb->index] = region;
@@ -2633,7 +2628,7 @@ static vec<tm_region_p>
get_bb_regions_instrumented (bool traverse_clones,
bool include_uninstrumented_p)
{
- unsigned n = last_basic_block;
+ unsigned n = last_basic_block_for_fn (cfun);
struct bb2reg_stuff stuff;
vec<tm_region_p> ret;
@@ -2661,7 +2656,7 @@ compute_transaction_bits (void)
certainly don't need it to calculate CDI_DOMINATOR info. */
gate_tm_init ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_IN_TRANSACTION;
for (region = all_tm_regions; region; region = region->next)
@@ -2998,7 +2993,7 @@ execute_tm_mark (void)
&& sub & GTMA_MAY_ENTER_IRREVOCABLE)
continue;
}
- expand_block_tm (r, BASIC_BLOCK (i));
+ expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
}
}
@@ -3189,7 +3184,7 @@ execute_tm_edges (void)
FOR_EACH_VEC_ELT (bb_regions, i, r)
if (r != NULL)
- expand_block_edges (r, BASIC_BLOCK (i));
+ expand_block_edges (r, BASIC_BLOCK_FOR_FN (cfun, i));
bb_regions.release ();
@@ -3705,7 +3700,7 @@ tm_memopt_compute_antic (struct tm_region *region,
unsigned int i;
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
- BB_VISITED_P (BASIC_BLOCK (i)) = true;
+ BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
}
qin = worklist;
@@ -4577,7 +4572,8 @@ ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
- ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
+ ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
+ for_clone);
if (old_irr)
{
diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h
index 961b9a6e5d2..68f2908eb6e 100644
--- a/gcc/tree-affine.h
+++ b/gcc/tree-affine.h
@@ -35,7 +35,7 @@ struct aff_comb_elt
widest_int coef;
};
-typedef struct affine_tree_combination
+struct aff_tree
{
/* Type of the result of the combination. */
tree type;
@@ -58,7 +58,7 @@ typedef struct affine_tree_combination
than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
TYPE of POINTER_TYPEs or TYPE. */
tree rest;
-} aff_tree;
+};
widest_int wide_int_ext_for_comb (const widest_int &, aff_tree *);
void aff_combination_const (aff_tree *, tree, const widest_int &);
@@ -83,3 +83,16 @@ bool aff_comb_cannot_overlap_p (aff_tree *, const widest_int &,
/* Debugging functions. */
void debug_aff (aff_tree *);
+
+/* Return true if AFF is actually ZERO. */
+static inline bool
+aff_combination_zero_p (aff_tree *aff)
+{
+ if (!aff)
+ return true;
+
+ if (aff->n == 0 && aff->offset.is_zero ())
+ return true;
+
+ return false;
+}
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index df0c4894130..754b019dfcf 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -876,7 +876,7 @@ tree_call_cdce (void)
gimple_stmt_iterator i;
bool something_changed = false;
auto_vec<gimple> cond_dead_built_in_calls;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Collect dead call candidates. */
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 177a049aaf6..dd027b9ed29 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -184,22 +184,20 @@ init_empty_tree_cfg_for_function (struct function *fn)
{
/* Initialize the basic block array. */
init_flow (fn);
- profile_status_for_function (fn) = PROFILE_ABSENT;
+ profile_status_for_fn (fn) = PROFILE_ABSENT;
n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
- last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS;
- vec_alloc (basic_block_info_for_function (fn), initial_cfg_capacity);
- vec_safe_grow_cleared (basic_block_info_for_function (fn),
+ last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
+ vec_alloc (basic_block_info_for_fn (fn), initial_cfg_capacity);
+ vec_safe_grow_cleared (basic_block_info_for_fn (fn),
initial_cfg_capacity);
/* Build a mapping of labels to their associated blocks. */
- vec_alloc (label_to_block_map_for_function (fn), initial_cfg_capacity);
- vec_safe_grow_cleared (label_to_block_map_for_function (fn),
+ vec_alloc (label_to_block_map_for_fn (fn), initial_cfg_capacity);
+ vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
initial_cfg_capacity);
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
- ENTRY_BLOCK_PTR_FOR_FN (fn));
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
- EXIT_BLOCK_PTR_FOR_FN (fn));
+ SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
+ SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
= EXIT_BLOCK_PTR_FOR_FN (fn);
@@ -246,8 +244,10 @@ build_gimple_cfg (gimple_seq seq)
create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Adjust the size of the array. */
- if (basic_block_info->length () < (size_t) n_basic_blocks_for_fn (cfun))
- vec_safe_grow_cleared (basic_block_info, n_basic_blocks_for_fn (cfun));
+ if (basic_block_info_for_fn (cfun)->length ()
+ < (size_t) n_basic_blocks_for_fn (cfun))
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
+ n_basic_blocks_for_fn (cfun));
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
@@ -304,7 +304,7 @@ replace_loop_annotate ()
}
/* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gsi = gsi_last_bb (bb);
stmt = gsi_stmt (gsi);
@@ -458,7 +458,7 @@ factor_computed_gotos (void)
Examine the last statement in each basic block to see if the block
ends with a computed goto. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple last;
@@ -599,7 +599,7 @@ create_bb (void *h, void *e, basic_block after)
not have to clear the newly allocated basic block here. */
bb = alloc_block ();
- bb->index = last_basic_block;
+ bb->index = last_basic_block_for_fn (cfun);
bb->flags = BB_NEW;
set_bb_seq (bb, h ? (gimple_seq) h : NULL);
@@ -607,17 +607,20 @@ create_bb (void *h, void *e, basic_block after)
link_block (bb, after);
/* Grow the basic block array if needed. */
- if ((size_t) last_basic_block == basic_block_info->length ())
+ if ((size_t) last_basic_block_for_fn (cfun)
+ == basic_block_info_for_fn (cfun)->length ())
{
- size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
- vec_safe_grow_cleared (basic_block_info, new_size);
+ size_t new_size =
+ (last_basic_block_for_fn (cfun)
+ + (last_basic_block_for_fn (cfun) + 3) / 4);
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
}
/* Add the newly created block to the array. */
- SET_BASIC_BLOCK (last_basic_block, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block_for_fn (cfun), bb);
n_basic_blocks_for_fn (cfun)++;
- last_basic_block++;
+ last_basic_block_for_fn (cfun)++;
return bb;
}
@@ -634,7 +637,7 @@ fold_cond_expr_cond (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple stmt = last_stmt (bb);
@@ -676,11 +679,12 @@ make_edges (void)
/* Create an edge from entry to the first block with executable
statements in it. */
- make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), BASIC_BLOCK (NUM_FIXED_BLOCKS),
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS),
EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last = last_stmt (bb);
bool fallthru;
@@ -834,7 +838,7 @@ assign_discriminators (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
@@ -947,7 +951,7 @@ end_recording_case_labels (void)
edge_to_cases = NULL;
EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (bb)
{
gimple stmt = last_stmt (bb);
@@ -1031,7 +1035,8 @@ label_to_block_fn (struct function *ifun, tree dest)
and undefined variable warnings quite right. */
if (seen_error () && uid < 0)
{
- gimple_stmt_iterator gsi = gsi_start_bb (BASIC_BLOCK (NUM_FIXED_BLOCKS));
+ gimple_stmt_iterator gsi =
+ gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS));
gimple stmt;
stmt = gimple_build_label (dest);
@@ -1052,7 +1057,7 @@ make_abnormal_goto_edges (basic_block bb, bool for_call)
basic_block target_bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (target_bb)
+ FOR_EACH_BB_FN (target_bb, cfun)
{
for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -1228,11 +1233,11 @@ void
cleanup_dead_labels (void)
{
basic_block bb;
- label_for_bb = XCNEWVEC (struct label_record, last_basic_block);
+ label_for_bb = XCNEWVEC (struct label_record, last_basic_block_for_fn (cfun));
/* Find a suitable label for each block. We use the first user-defined
label if there is one, or otherwise just the first label we see. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
@@ -1268,7 +1273,7 @@ cleanup_dead_labels (void)
/* Now redirect all jumps/branches to the selected label.
First do so for each block ending in a control statement. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple stmt = last_stmt (bb);
tree label, new_label;
@@ -1360,7 +1365,7 @@ cleanup_dead_labels (void)
/* Finally, purge dead labels. All user-defined labels and labels that
can be the target of non-local gotos and labels which have their
address taken are preserved. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
tree label_for_this_bb = label_for_bb[bb->index].label;
@@ -1484,7 +1489,7 @@ group_case_labels (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
@@ -2086,8 +2091,8 @@ gimple_debug_bb (basic_block bb)
basic_block
gimple_debug_bb_n (int n)
{
- gimple_debug_bb (BASIC_BLOCK (n));
- return BASIC_BLOCK (n);
+ gimple_debug_bb (BASIC_BLOCK_FOR_FN (cfun, n));
+ return BASIC_BLOCK_FOR_FN (cfun, n);
}
@@ -2116,7 +2121,7 @@ gimple_dump_cfg (FILE *file, int flags)
dump_function_header (file, current_function_decl, flags);
fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
- last_basic_block);
+ last_basic_block_for_fn (cfun));
brief_dump_cfg (file, flags | TDF_COMMENT);
fprintf (file, "\n");
@@ -2157,7 +2162,7 @@ dump_cfg_stats (FILE *file)
SCALE (size), LABEL (size));
num_edges = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
num_edges += EDGE_COUNT (bb->succs);
size = num_edges * sizeof (struct edge_def);
total += size;
@@ -2379,7 +2384,7 @@ stmt_ends_bb_p (gimple t)
void
delete_tree_cfg_annotations (void)
{
- vec_free (label_to_block_map);
+ vec_free (label_to_block_map_for_fn (cfun));
}
@@ -4281,7 +4286,8 @@ verify_gimple_label (gimple stmt)
uid = LABEL_DECL_UID (decl);
if (cfun->cfg
- && (uid == -1 || (*label_to_block_map)[uid] != gimple_bb (stmt)))
+ && (uid == -1
+ || (*label_to_block_map_for_fn (cfun))[uid] != gimple_bb (stmt)))
{
error ("incorrect entry in label_to_block_map");
err |= true;
@@ -4890,7 +4896,7 @@ gimple_verify_flow_info (void)
err = 1;
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool found_ctrl_stmt = false;
@@ -7048,7 +7054,7 @@ dump_function_to_file (tree fndecl, FILE *file, int flags)
if (fun && fun->decl == fndecl
&& fun->cfg
- && basic_block_info_for_function (fun))
+ && basic_block_info_for_fn (fun))
{
/* If the CFG has been built, emit a CFG-based dump. */
if (!ignore_topmost_bind)
@@ -7237,7 +7243,7 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
if (verbosity >= 1)
{
fprintf (file, "%s{\n", s_indent);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->loop_father == loop)
print_loops_bb (file, bb, indent, verbosity);
@@ -7429,7 +7435,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
{
int i;
int blocks_split = 0;
- int last_bb = last_basic_block;
+ int last_bb = last_basic_block_for_fn (cfun);
bool check_last_block = false;
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
@@ -7480,7 +7486,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
return or not... */
for (i = 0; i < last_bb; i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
gimple_stmt_iterator gsi;
gimple stmt, last_stmt;
@@ -7609,7 +7615,7 @@ remove_edge_and_dominated_blocks (edge e)
EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
bitmap_set_bit (df_idom,
get_immediate_dominator (CDI_DOMINATORS, bb)->index);
}
@@ -7647,7 +7653,7 @@ remove_edge_and_dominated_blocks (edge e)
the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
for (dbb = first_dom_son (CDI_DOMINATORS, bb);
dbb;
dbb = next_dom_son (CDI_DOMINATORS, dbb))
@@ -7700,7 +7706,7 @@ gimple_purge_all_dead_eh_edges (const_bitmap blocks)
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
/* Earlier gimple_purge_dead_eh_edges could have removed
this basic block already. */
@@ -7757,7 +7763,7 @@ gimple_purge_all_dead_abnormal_call_edges (const_bitmap blocks)
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
/* Earlier gimple_purge_dead_abnormal_call_edges could have removed
this basic block already. */
@@ -7874,11 +7880,11 @@ gimple_account_profile_record (basic_block bb, int after_pass,
{
record->size[after_pass]
+= estimate_num_insns (gsi_stmt (i), &eni_size_weights);
- if (profile_status == PROFILE_READ)
+ if (profile_status_for_fn (cfun) == PROFILE_READ)
record->time[after_pass]
+= estimate_num_insns (gsi_stmt (i),
&eni_time_weights) * bb->count;
- else if (profile_status == PROFILE_GUESSED)
+ else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
record->time[after_pass]
+= estimate_num_insns (gsi_stmt (i),
&eni_time_weights) * bb->frequency;
@@ -7936,7 +7942,7 @@ split_critical_edges (void)
expensive. So we want to enable recording of edge to CASE_LABEL_EXPR
mappings around the calls to split_edge. */
start_recording_case_labels ();
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
@@ -8327,7 +8333,7 @@ execute_fixup_cfg (void)
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
e->count = apply_scale (e->count, count_scale);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb->count = apply_scale (bb->count, count_scale);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index ab8a394a150..949b21d749d 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -551,7 +551,7 @@ fixup_noreturn_call (gimple stmt)
SET_USE (use_p, error_mark_node);
}
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
- delete_basic_block (BASIC_BLOCK (bb_index));
+ delete_basic_block (BASIC_BLOCK_FOR_FN (cfun, bb_index));
BITMAP_FREE (blocks);
release_ssa_name (op);
}
@@ -585,8 +585,8 @@ split_bbs_on_noreturn_calls (void)
BB is present in the cfg. */
if (bb == NULL
|| bb->index < NUM_FIXED_BLOCKS
- || bb->index >= last_basic_block
- || BASIC_BLOCK (bb->index) != bb
+ || bb->index >= last_basic_block_for_fn (cfun)
+ || BASIC_BLOCK_FOR_FN (cfun, bb->index) != bb
|| !gimple_call_noreturn_p (stmt))
continue;
@@ -640,12 +640,12 @@ cleanup_tree_cfg_1 (void)
recording of edge to CASE_LABEL_EXPR. */
start_recording_case_labels ();
- /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB,
+ /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB_FN,
since the basic blocks may get removed. */
- n = last_basic_block;
+ n = last_basic_block_for_fn (cfun);
for (i = NUM_FIXED_BLOCKS; i < n; i++)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (bb)
retval |= cleanup_tree_cfg_bb (bb);
}
@@ -658,7 +658,7 @@ cleanup_tree_cfg_1 (void)
if (i < NUM_FIXED_BLOCKS)
continue;
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (!bb)
continue;
@@ -918,7 +918,7 @@ merge_phi_nodes (void)
calculate_dominance_info (CDI_DOMINATORS);
/* Find all PHI nodes that we may be able to merge. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
basic_block dest;
diff --git a/gcc/tree-complex.c b/gcc/tree-complex.c
index 80a978e52dd..8c9a3aa7c5a 100644
--- a/gcc/tree-complex.c
+++ b/gcc/tree-complex.c
@@ -207,7 +207,7 @@ init_dont_simulate_again (void)
gimple phi;
bool saw_a_complex_op = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -1636,8 +1636,8 @@ tree_lower_complex (void)
update_parameter_components ();
/* ??? Ideally we'd traverse the blocks in breadth-first order. */
- old_last_basic_block = last_basic_block;
- FOR_EACH_BB (bb)
+ old_last_basic_block = last_basic_block_for_fn (cfun);
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->index >= old_last_basic_block)
continue;
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 704c70c28ed..f6f17f82040 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -667,10 +667,10 @@ enum annot_expr_kind {
/* When processing aliases at the symbol table level, we need the
declaration of target. For this reason we need to queue aliases and
process them after all declarations has been produced. */
-typedef struct GTY(()) alias_pair {
+struct GTY(()) alias_pair {
tree decl;
tree target;
-} alias_pair;
+};
/* An initialization priority. */
typedef unsigned short priority_type;
@@ -1116,10 +1116,10 @@ struct GTY(()) tree_vec {
element. INDEX can optionally design the position of VALUE: in arrays,
it is the index where VALUE has to be placed; in structures, it is the
FIELD_DECL of the member. */
-typedef struct GTY(()) constructor_elt_d {
+struct GTY(()) constructor_elt {
tree index;
tree value;
-} constructor_elt;
+};
struct GTY(()) tree_constructor {
struct tree_typed typed;
@@ -1168,17 +1168,17 @@ struct GTY(()) tree_exp {
/* Immediate use linking structure. This structure is used for maintaining
a doubly linked list of uses of an SSA_NAME. */
-typedef struct GTY(()) ssa_use_operand_d {
- struct ssa_use_operand_d* GTY((skip(""))) prev;
- struct ssa_use_operand_d* GTY((skip(""))) next;
+struct GTY(()) ssa_use_operand_t {
+ struct ssa_use_operand_t* GTY((skip(""))) prev;
+ struct ssa_use_operand_t* GTY((skip(""))) next;
/* Immediate uses for a given SSA name are maintained as a cyclic
list. To recognize the root of this list, the location field
needs to point to the original SSA name. Since statements and
SSA names are of different data types, we need this union. See
- the explanation in struct immediate_use_iterator_d. */
+ the explanation in struct imm_use_iterator. */
union { gimple stmt; tree ssa_name; } GTY((skip(""))) loc;
tree *GTY((skip(""))) use;
-} ssa_use_operand_t;
+};
struct GTY(()) tree_ssa_name {
struct tree_typed typed;
@@ -1199,13 +1199,13 @@ struct GTY(()) tree_ssa_name {
"!POINTER_TYPE_P (TREE_TYPE ((tree)&%1)) : 2"))) info;
/* Immediate uses list for this SSA_NAME. */
- struct ssa_use_operand_d imm_uses;
+ struct ssa_use_operand_t imm_uses;
};
struct GTY(()) phi_arg_d {
/* imm_use MUST be the first element in struct because we do some
pointer arithmetic with it. See phi_arg_index_from_use. */
- struct ssa_use_operand_d imm_use;
+ struct ssa_use_operand_t imm_use;
tree def;
location_t locus;
};
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index db394e5449d..5f4585a6c82 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -4320,8 +4320,8 @@ compute_all_dependences (vec<data_reference_p> datarefs,
typedef struct data_ref_loc_d
{
- /* Position of the memory reference. */
- tree *pos;
+ /* The memory reference. */
+ tree ref;
/* True if the memory reference is read. */
bool is_read;
@@ -4336,7 +4336,7 @@ get_references_in_stmt (gimple stmt, vec<data_ref_loc, va_heap> *references)
{
bool clobbers_memory = false;
data_ref_loc ref;
- tree *op0, *op1;
+ tree op0, op1;
enum gimple_code stmt_code = gimple_code (stmt);
/* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
@@ -4346,16 +4346,26 @@ get_references_in_stmt (gimple stmt, vec<data_ref_loc, va_heap> *references)
&& !(gimple_call_flags (stmt) & ECF_CONST))
{
/* Allow IFN_GOMP_SIMD_LANE in their own loops. */
- if (gimple_call_internal_p (stmt)
- && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
- {
- struct loop *loop = gimple_bb (stmt)->loop_father;
- tree uid = gimple_call_arg (stmt, 0);
- gcc_assert (TREE_CODE (uid) == SSA_NAME);
- if (loop == NULL
- || loop->simduid != SSA_NAME_VAR (uid))
+ if (gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_GOMP_SIMD_LANE:
+ {
+ struct loop *loop = gimple_bb (stmt)->loop_father;
+ tree uid = gimple_call_arg (stmt, 0);
+ gcc_assert (TREE_CODE (uid) == SSA_NAME);
+ if (loop == NULL
+ || loop->simduid != SSA_NAME_VAR (uid))
+ clobbers_memory = true;
+ break;
+ }
+ case IFN_MASK_LOAD:
+ case IFN_MASK_STORE:
+ break;
+ default:
clobbers_memory = true;
- }
+ break;
+ }
else
clobbers_memory = true;
}
@@ -4369,15 +4379,15 @@ get_references_in_stmt (gimple stmt, vec<data_ref_loc, va_heap> *references)
if (stmt_code == GIMPLE_ASSIGN)
{
tree base;
- op0 = gimple_assign_lhs_ptr (stmt);
- op1 = gimple_assign_rhs1_ptr (stmt);
+ op0 = gimple_assign_lhs (stmt);
+ op1 = gimple_assign_rhs1 (stmt);
- if (DECL_P (*op1)
- || (REFERENCE_CLASS_P (*op1)
- && (base = get_base_address (*op1))
+ if (DECL_P (op1)
+ || (REFERENCE_CLASS_P (op1)
+ && (base = get_base_address (op1))
&& TREE_CODE (base) != SSA_NAME))
{
- ref.pos = op1;
+ ref.ref = op1;
ref.is_read = true;
references->safe_push (ref);
}
@@ -4386,16 +4396,35 @@ get_references_in_stmt (gimple stmt, vec<data_ref_loc, va_heap> *references)
{
unsigned i, n;
- op0 = gimple_call_lhs_ptr (stmt);
+ ref.is_read = false;
+ if (gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_MASK_LOAD:
+ ref.is_read = true;
+ case IFN_MASK_STORE:
+ ref.ref = fold_build2 (MEM_REF,
+ ref.is_read
+ ? TREE_TYPE (gimple_call_lhs (stmt))
+ : TREE_TYPE (gimple_call_arg (stmt, 3)),
+ gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 1));
+ references->safe_push (ref);
+ return false;
+ default:
+ break;
+ }
+
+ op0 = gimple_call_lhs (stmt);
n = gimple_call_num_args (stmt);
for (i = 0; i < n; i++)
{
- op1 = gimple_call_arg_ptr (stmt, i);
+ op1 = gimple_call_arg (stmt, i);
- if (DECL_P (*op1)
- || (REFERENCE_CLASS_P (*op1) && get_base_address (*op1)))
+ if (DECL_P (op1)
+ || (REFERENCE_CLASS_P (op1) && get_base_address (op1)))
{
- ref.pos = op1;
+ ref.ref = op1;
ref.is_read = true;
references->safe_push (ref);
}
@@ -4404,11 +4433,11 @@ get_references_in_stmt (gimple stmt, vec<data_ref_loc, va_heap> *references)
else
return clobbers_memory;
- if (*op0
- && (DECL_P (*op0)
- || (REFERENCE_CLASS_P (*op0) && get_base_address (*op0))))
+ if (op0
+ && (DECL_P (op0)
+ || (REFERENCE_CLASS_P (op0) && get_base_address (op0))))
{
- ref.pos = op0;
+ ref.ref = op0;
ref.is_read = false;
references->safe_push (ref);
}
@@ -4435,7 +4464,7 @@ find_data_references_in_stmt (struct loop *nest, gimple stmt,
FOR_EACH_VEC_ELT (references, i, ref)
{
dr = create_data_ref (nest, loop_containing_stmt (stmt),
- *ref->pos, stmt, ref->is_read);
+ ref->ref, stmt, ref->is_read);
gcc_assert (dr != NULL);
datarefs->safe_push (dr);
}
@@ -4464,7 +4493,7 @@ graphite_find_data_references_in_stmt (loop_p nest, loop_p loop, gimple stmt,
FOR_EACH_VEC_ELT (references, i, ref)
{
- dr = create_data_ref (nest, loop, *ref->pos, stmt, ref->is_read);
+ dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read);
gcc_assert (dr != NULL);
datarefs->safe_push (dr);
}
diff --git a/gcc/tree-data-ref.h b/gcc/tree-data-ref.h
index 8cd7ef801da..76e1b820801 100644
--- a/gcc/tree-data-ref.h
+++ b/gcc/tree-data-ref.h
@@ -239,11 +239,11 @@ enum data_dependence_direction {
typedef vec<tree> affine_fn;
-typedef struct
+struct conflict_function
{
unsigned n;
affine_fn fns[MAX_DIM];
-} conflict_function;
+};
/* What is a subscript? Given two array accesses a subscript is the
tuple composed of the access functions for a given dimension.
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index aed65dfdb2f..d57ae35640c 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -80,7 +80,7 @@ renumber_gimple_stmt_uids (void)
basic_block bb;
set_gimple_stmt_max_uid (cfun, 0);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator bsi;
for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
@@ -279,7 +279,7 @@ collect_dfa_stats (struct dfa_stats_d *dfa_stats_p ATTRIBUTE_UNUSED)
memset ((void *)dfa_stats_p, 0, sizeof (struct dfa_stats_d));
/* Walk all the statements in the function counting references. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
@@ -739,7 +739,7 @@ dump_enumerated_decls (FILE *file, int flags)
memset (&wi, '\0', sizeof (wi));
wi.info = (void *) &decl_list;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 85dc79f458d..9097378a989 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -2685,7 +2685,7 @@ tree_could_trap_p (tree expr)
LTO partition. */
if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
{
- struct varpool_node *node;
+ varpool_node *node;
if (!DECL_EXTERNAL (expr))
return false;
node = varpool_variable_node (varpool_get_node (expr), NULL);
@@ -3304,7 +3304,7 @@ execute_lower_resx (void)
mnt_map = pointer_map_create ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last = last_stmt (bb);
if (last && is_gimple_resx (last))
@@ -3710,7 +3710,7 @@ execute_lower_eh_dispatch (void)
assign_filter_values ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last = last_stmt (bb);
if (last == NULL)
@@ -3810,7 +3810,7 @@ mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
else
lp_reachable = NULL;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
diff --git a/gcc/tree-emutls.c b/gcc/tree-emutls.c
index 9ba25fc4676..4595b1c0dd9 100644
--- a/gcc/tree-emutls.c
+++ b/gcc/tree-emutls.c
@@ -373,7 +373,7 @@ emutls_index (tree decl)
static tree
emutls_decl (tree decl)
{
- struct varpool_node *var;
+ varpool_node *var;
unsigned int i;
i = emutls_index (decl);
@@ -435,7 +435,7 @@ gen_emutls_addr (tree decl, struct lower_emutls_data *d)
addr = access_vars[index];
if (addr == NULL)
{
- struct varpool_node *cvar;
+ varpool_node *cvar;
tree cdecl;
gimple x;
@@ -638,7 +638,7 @@ lower_emutls_function_body (struct cgraph_node *node)
create a node for it. */
d.builtin_node = cgraph_get_create_node (d.builtin_decl);
- FOR_EACH_BB (d.bb)
+ FOR_EACH_BB_FN (d.bb, cfun)
{
gimple_stmt_iterator gsi;
unsigned int i, nedge;
@@ -707,10 +707,10 @@ lower_emutls_function_body (struct cgraph_node *node)
Callback for varpool_for_variable_and_aliases. */
static bool
-create_emultls_var (struct varpool_node *var, void *data)
+create_emultls_var (varpool_node *var, void *data)
{
tree cdecl;
- struct varpool_node *cvar;
+ varpool_node *cvar;
cdecl = new_emutls_decl (var->decl,
var->alias && var->analyzed
@@ -743,7 +743,7 @@ create_emultls_var (struct varpool_node *var, void *data)
static unsigned int
ipa_lower_emutls (void)
{
- struct varpool_node *var;
+ varpool_node *var;
struct cgraph_node *func;
bool any_aliases = false;
tree ctor_body = NULL;
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 7f6a1503085..59404ec14e2 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -110,8 +110,12 @@ along with GCC; see the file COPYING3. If not see
#include "tree-chrec.h"
#include "tree-data-ref.h"
#include "tree-scalar-evolution.h"
+#include "tree-ssa-loop-ivopts.h"
+#include "tree-ssa-address.h"
#include "tree-pass.h"
#include "dbgcnt.h"
+#include "expr.h"
+#include "optabs.h"
/* List of basic blocks in if-conversion-suitable order. */
static basic_block *ifc_bbs;
@@ -194,39 +198,48 @@ init_bb_predicate (basic_block bb)
set_bb_predicate (bb, boolean_true_node);
}
-/* Free the predicate of basic block BB. */
+/* Release the SSA_NAMEs associated with the predicate of basic block BB,
+ but don't actually free it. */
static inline void
-free_bb_predicate (basic_block bb)
+release_bb_predicate (basic_block bb)
{
- gimple_seq stmts;
-
- if (!bb_has_predicate (bb))
- return;
-
- /* Release the SSA_NAMEs created for the gimplification of the
- predicate. */
- stmts = bb_predicate_gimplified_stmts (bb);
+ gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
if (stmts)
{
gimple_stmt_iterator i;
for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i))
free_stmt_operands (cfun, gsi_stmt (i));
+ set_bb_predicate_gimplified_stmts (bb, NULL);
}
+}
+
+/* Free the predicate of basic block BB. */
+static inline void
+free_bb_predicate (basic_block bb)
+{
+ if (!bb_has_predicate (bb))
+ return;
+
+ release_bb_predicate (bb);
free (bb->aux);
bb->aux = NULL;
}
-/* Free the predicate of BB and reinitialize it with the true
- predicate. */
+/* Reinitialize predicate of BB with the true predicate. */
static inline void
reset_bb_predicate (basic_block bb)
{
- free_bb_predicate (bb);
- init_bb_predicate (bb);
+ if (!bb_has_predicate (bb))
+ init_bb_predicate (bb);
+ else
+ {
+ release_bb_predicate (bb);
+ set_bb_predicate (bb, boolean_true_node);
+ }
}
/* Returns a new SSA_NAME of type TYPE that is assigned the value of
@@ -382,10 +395,11 @@ fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
return build3 (COND_EXPR, type, cond, rhs, lhs);
}
-/* Add condition NC to the predicate list of basic block BB. */
+/* Add condition NC to the predicate list of basic block BB. LOOP is
+ the loop to be if-converted. */
static inline void
-add_to_predicate_list (basic_block bb, tree nc)
+add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
{
tree bc, *tp;
@@ -393,7 +407,14 @@ add_to_predicate_list (basic_block bb, tree nc)
return;
if (!is_predicated (bb))
- bc = nc;
+ {
+ /* If dominance tells us this basic block is always executed, don't
+ record any predicates for it. */
+ if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
+ return;
+
+ bc = nc;
+ }
else
{
bc = bb_predicate (bb);
@@ -434,7 +455,7 @@ add_to_dst_predicate_list (struct loop *loop, edge e,
cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
prev_cond, cond);
- add_to_predicate_list (e->dest, cond);
+ add_to_predicate_list (loop, e->dest, cond);
}
/* Return true if one of the successor edges of BB exits LOOP. */
@@ -464,7 +485,8 @@ bb_with_exit_edge_p (struct loop *loop, basic_block bb)
- there is a virtual PHI in a BB other than the loop->header. */
static bool
-if_convertible_phi_p (struct loop *loop, basic_block bb, gimple phi)
+if_convertible_phi_p (struct loop *loop, basic_block bb, gimple phi,
+ bool any_mask_load_store)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -479,7 +501,7 @@ if_convertible_phi_p (struct loop *loop, basic_block bb, gimple phi)
return false;
}
- if (flag_tree_loop_if_convert_stores)
+ if (flag_tree_loop_if_convert_stores || any_mask_load_store)
return true;
/* When the flag_tree_loop_if_convert_stores is not set, check
@@ -695,6 +717,56 @@ ifcvt_could_trap_p (gimple stmt, vec<data_reference_p> refs)
return gimple_could_trap_p (stmt);
}
+/* Return true if STMT could be converted into a masked load or store
+ (conditional load or store based on a mask computed from bb predicate). */
+
+static bool
+ifcvt_can_use_mask_load_store (gimple stmt)
+{
+ tree lhs, ref;
+ enum machine_mode mode;
+ basic_block bb = gimple_bb (stmt);
+ bool is_load;
+
+ if (!(flag_tree_loop_vectorize || bb->loop_father->force_vect)
+ || bb->loop_father->dont_vectorize
+ || !gimple_assign_single_p (stmt)
+ || gimple_has_volatile_ops (stmt))
+ return false;
+
+ /* Check whether this is a load or store. */
+ lhs = gimple_assign_lhs (stmt);
+ if (gimple_store_p (stmt))
+ {
+ if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
+ return false;
+ is_load = false;
+ ref = lhs;
+ }
+ else if (gimple_assign_load_p (stmt))
+ {
+ is_load = true;
+ ref = gimple_assign_rhs1 (stmt);
+ }
+ else
+ return false;
+
+ if (may_be_nonaddressable_p (ref))
+ return false;
+
+ /* Mask should be integer mode of the same size as the load/store
+ mode. */
+ mode = TYPE_MODE (TREE_TYPE (lhs));
+ if (int_mode_for_mode (mode) == BLKmode
+ || VECTOR_MODE_P (mode))
+ return false;
+
+ if (can_vec_mask_load_store_p (mode, is_load))
+ return true;
+
+ return false;
+}
+
/* Return true when STMT is if-convertible.
GIMPLE_ASSIGN statement is not if-convertible if,
@@ -704,7 +776,8 @@ ifcvt_could_trap_p (gimple stmt, vec<data_reference_p> refs)
static bool
if_convertible_gimple_assign_stmt_p (gimple stmt,
- vec<data_reference_p> refs)
+ vec<data_reference_p> refs,
+ bool *any_mask_load_store)
{
tree lhs = gimple_assign_lhs (stmt);
basic_block bb;
@@ -730,10 +803,21 @@ if_convertible_gimple_assign_stmt_p (gimple stmt,
return false;
}
+ /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
+ in between if_convertible_loop_p and combine_blocks
+ we can perform loop versioning. */
+ gimple_set_plf (stmt, GF_PLF_2, false);
+
if (flag_tree_loop_if_convert_stores)
{
if (ifcvt_could_trap_p (stmt, refs))
{
+ if (ifcvt_can_use_mask_load_store (stmt))
+ {
+ gimple_set_plf (stmt, GF_PLF_2, true);
+ *any_mask_load_store = true;
+ return true;
+ }
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "tree could trap...\n");
return false;
@@ -743,6 +827,12 @@ if_convertible_gimple_assign_stmt_p (gimple stmt,
if (gimple_assign_rhs_could_trap_p (stmt))
{
+ if (ifcvt_can_use_mask_load_store (stmt))
+ {
+ gimple_set_plf (stmt, GF_PLF_2, true);
+ *any_mask_load_store = true;
+ return true;
+ }
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "tree could trap...\n");
return false;
@@ -754,6 +844,12 @@ if_convertible_gimple_assign_stmt_p (gimple stmt,
&& bb != bb->loop_father->header
&& !bb_with_exit_edge_p (bb->loop_father, bb))
{
+ if (ifcvt_can_use_mask_load_store (stmt))
+ {
+ gimple_set_plf (stmt, GF_PLF_2, true);
+ *any_mask_load_store = true;
+ return true;
+ }
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "LHS is not var\n");
@@ -772,7 +868,8 @@ if_convertible_gimple_assign_stmt_p (gimple stmt,
- it is a GIMPLE_LABEL or a GIMPLE_COND. */
static bool
-if_convertible_stmt_p (gimple stmt, vec<data_reference_p> refs)
+if_convertible_stmt_p (gimple stmt, vec<data_reference_p> refs,
+ bool *any_mask_load_store)
{
switch (gimple_code (stmt))
{
@@ -782,7 +879,8 @@ if_convertible_stmt_p (gimple stmt, vec<data_reference_p> refs)
return true;
case GIMPLE_ASSIGN:
- return if_convertible_gimple_assign_stmt_p (stmt, refs);
+ return if_convertible_gimple_assign_stmt_p (stmt, refs,
+ any_mask_load_store);
case GIMPLE_CALL:
{
@@ -984,7 +1082,7 @@ get_loop_body_in_if_conv_order (const struct loop *loop)
S1 will be predicated with "x", and
S2 will be predicated with "!x". */
-static bool
+static void
predicate_bbs (loop_p loop)
{
unsigned int i;
@@ -996,7 +1094,7 @@ predicate_bbs (loop_p loop)
{
basic_block bb = ifc_bbs[i];
tree cond;
- gimple_stmt_iterator itr;
+ gimple stmt;
/* The loop latch is always executed and has no extra conditions
to be processed: skip it. */
@@ -1007,52 +1105,32 @@ predicate_bbs (loop_p loop)
}
cond = bb_predicate (bb);
-
- for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
+ stmt = last_stmt (bb);
+ if (stmt && gimple_code (stmt) == GIMPLE_COND)
{
- gimple stmt = gsi_stmt (itr);
-
- switch (gimple_code (stmt))
- {
- case GIMPLE_LABEL:
- case GIMPLE_ASSIGN:
- case GIMPLE_CALL:
- case GIMPLE_DEBUG:
- break;
-
- case GIMPLE_COND:
- {
- tree c2;
- edge true_edge, false_edge;
- location_t loc = gimple_location (stmt);
- tree c = fold_build2_loc (loc, gimple_cond_code (stmt),
- boolean_type_node,
- gimple_cond_lhs (stmt),
- gimple_cond_rhs (stmt));
-
- /* Add new condition into destination's predicate list. */
- extract_true_false_edges_from_block (gimple_bb (stmt),
- &true_edge, &false_edge);
-
- /* If C is true, then TRUE_EDGE is taken. */
- add_to_dst_predicate_list (loop, true_edge,
- unshare_expr (cond),
- unshare_expr (c));
-
- /* If C is false, then FALSE_EDGE is taken. */
- c2 = build1_loc (loc, TRUTH_NOT_EXPR,
- boolean_type_node, unshare_expr (c));
- add_to_dst_predicate_list (loop, false_edge,
- unshare_expr (cond), c2);
-
- cond = NULL_TREE;
- break;
- }
-
- default:
- /* Not handled yet in if-conversion. */
- return false;
- }
+ tree c2;
+ edge true_edge, false_edge;
+ location_t loc = gimple_location (stmt);
+ tree c = fold_build2_loc (loc, gimple_cond_code (stmt),
+ boolean_type_node,
+ gimple_cond_lhs (stmt),
+ gimple_cond_rhs (stmt));
+
+ /* Add new condition into destination's predicate list. */
+ extract_true_false_edges_from_block (gimple_bb (stmt),
+ &true_edge, &false_edge);
+
+ /* If C is true, then TRUE_EDGE is taken. */
+ add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
+ unshare_expr (c));
+
+ /* If C is false, then FALSE_EDGE is taken. */
+ c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
+ unshare_expr (c));
+ add_to_dst_predicate_list (loop, false_edge,
+ unshare_expr (cond), c2);
+
+ cond = NULL_TREE;
}
/* If current bb has only one successor, then consider it as an
@@ -1067,7 +1145,7 @@ predicate_bbs (loop_p loop)
if (cond == NULL_TREE)
cond = boolean_true_node;
- add_to_predicate_list (bb_n, cond);
+ add_to_predicate_list (loop, bb_n, cond);
}
}
@@ -1075,8 +1153,6 @@ predicate_bbs (loop_p loop)
reset_bb_predicate (loop->header);
gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
&& bb_predicate_gimplified_stmts (loop->latch) == NULL);
-
- return true;
}
/* Return true when LOOP is if-convertible. This is a helper function
@@ -1087,7 +1163,7 @@ static bool
if_convertible_loop_p_1 (struct loop *loop,
vec<loop_p> *loop_nest,
vec<data_reference_p> *refs,
- vec<ddr_p> *ddrs)
+ vec<ddr_p> *ddrs, bool *any_mask_load_store)
{
bool res;
unsigned int i;
@@ -1121,9 +1197,24 @@ if_convertible_loop_p_1 (struct loop *loop,
exit_bb = bb;
}
- res = predicate_bbs (loop);
- if (!res)
- return false;
+ for (i = 0; i < loop->num_nodes; i++)
+ {
+ basic_block bb = ifc_bbs[i];
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ switch (gimple_code (gsi_stmt (gsi)))
+ {
+ case GIMPLE_LABEL:
+ case GIMPLE_ASSIGN:
+ case GIMPLE_CALL:
+ case GIMPLE_DEBUG:
+ case GIMPLE_COND:
+ break;
+ default:
+ return false;
+ }
+ }
if (flag_tree_loop_if_convert_stores)
{
@@ -1135,6 +1226,7 @@ if_convertible_loop_p_1 (struct loop *loop,
DR_WRITTEN_AT_LEAST_ONCE (dr) = -1;
DR_RW_UNCONDITIONALLY (dr) = -1;
}
+ predicate_bbs (loop);
}
for (i = 0; i < loop->num_nodes; i++)
@@ -1142,17 +1234,31 @@ if_convertible_loop_p_1 (struct loop *loop,
basic_block bb = ifc_bbs[i];
gimple_stmt_iterator itr;
- for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
- if (!if_convertible_phi_p (loop, bb, gsi_stmt (itr)))
- return false;
-
/* Check the if-convertibility of statements in predicated BBs. */
- if (is_predicated (bb))
+ if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
- if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
+ if (!if_convertible_stmt_p (gsi_stmt (itr), *refs,
+ any_mask_load_store))
return false;
}
+ if (flag_tree_loop_if_convert_stores)
+ for (i = 0; i < loop->num_nodes; i++)
+ free_bb_predicate (ifc_bbs[i]);
+
+ /* Checking PHIs needs to be done after stmts, as the fact whether there
+ are any masked loads or stores affects the tests. */
+ for (i = 0; i < loop->num_nodes; i++)
+ {
+ basic_block bb = ifc_bbs[i];
+ gimple_stmt_iterator itr;
+
+ for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
+ if (!if_convertible_phi_p (loop, bb, gsi_stmt (itr),
+ *any_mask_load_store))
+ return false;
+ }
+
if (dump_file)
fprintf (dump_file, "Applying if-conversion\n");
@@ -1168,7 +1274,7 @@ if_convertible_loop_p_1 (struct loop *loop,
- if its basic blocks and phi nodes are if convertible. */
static bool
-if_convertible_loop_p (struct loop *loop)
+if_convertible_loop_p (struct loop *loop, bool *any_mask_load_store)
{
edge e;
edge_iterator ei;
@@ -1209,7 +1315,8 @@ if_convertible_loop_p (struct loop *loop)
refs.create (5);
ddrs.create (25);
stack_vec<loop_p, 3> loop_nest;
- res = if_convertible_loop_p_1 (loop, &loop_nest, &refs, &ddrs);
+ res = if_convertible_loop_p_1 (loop, &loop_nest, &refs, &ddrs,
+ any_mask_load_store);
if (flag_tree_loop_if_convert_stores)
{
@@ -1395,7 +1502,7 @@ predicate_all_scalar_phis (struct loop *loop)
gimplification of the predicates. */
static void
-insert_gimplified_predicates (loop_p loop)
+insert_gimplified_predicates (loop_p loop, bool any_mask_load_store)
{
unsigned int i;
@@ -1416,7 +1523,8 @@ insert_gimplified_predicates (loop_p loop)
stmts = bb_predicate_gimplified_stmts (bb);
if (stmts)
{
- if (flag_tree_loop_if_convert_stores)
+ if (flag_tree_loop_if_convert_stores
+ || any_mask_load_store)
{
/* Insert the predicate of the BB just after the label,
as the if-conversion of memory writes will use this
@@ -1575,9 +1683,49 @@ predicate_mem_writes (loop_p loop)
}
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- if ((stmt = gsi_stmt (gsi))
- && gimple_assign_single_p (stmt)
- && gimple_vdef (stmt))
+ if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
+ continue;
+ else if (gimple_plf (stmt, GF_PLF_2))
+ {
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs = gimple_assign_rhs1 (stmt);
+ tree ref, addr, ptr, masktype, mask_op0, mask_op1, mask;
+ gimple new_stmt;
+ int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
+
+ masktype = build_nonstandard_integer_type (bitsize, 1);
+ mask_op0 = build_int_cst (masktype, swap ? 0 : -1);
+ mask_op1 = build_int_cst (masktype, swap ? -1 : 0);
+ ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
+ mark_addressable (ref);
+ addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
+ true, NULL_TREE, true,
+ GSI_SAME_STMT);
+ cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
+ is_gimple_condexpr, NULL_TREE,
+ true, GSI_SAME_STMT);
+ mask = fold_build_cond_expr (masktype, unshare_expr (cond),
+ mask_op0, mask_op1);
+ mask = ifc_temp_var (masktype, mask, &gsi);
+ ptr = build_int_cst (reference_alias_ptr_type (ref), 0);
+ /* Copy points-to info if possible. */
+ if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
+ copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
+ ref);
+ if (TREE_CODE (lhs) == SSA_NAME)
+ {
+ new_stmt
+ = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
+ ptr, mask);
+ gimple_call_set_lhs (new_stmt, lhs);
+ }
+ else
+ new_stmt
+ = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
+ mask, rhs);
+ gsi_replace (&gsi, new_stmt, false);
+ }
+ else if (gimple_vdef (stmt))
{
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
@@ -1647,7 +1795,7 @@ remove_conditions_and_labels (loop_p loop)
blocks. Replace PHI nodes with conditional modify expressions. */
static void
-combine_blocks (struct loop *loop)
+combine_blocks (struct loop *loop, bool any_mask_load_store)
{
basic_block bb, exit_bb, merge_target_bb;
unsigned int orig_loop_num_nodes = loop->num_nodes;
@@ -1655,11 +1803,12 @@ combine_blocks (struct loop *loop)
edge e;
edge_iterator ei;
+ predicate_bbs (loop);
remove_conditions_and_labels (loop);
- insert_gimplified_predicates (loop);
+ insert_gimplified_predicates (loop, any_mask_load_store);
predicate_all_scalar_phis (loop);
- if (flag_tree_loop_if_convert_stores)
+ if (flag_tree_loop_if_convert_stores || any_mask_load_store)
predicate_mem_writes (loop);
/* Merge basic blocks: first remove all the edges in the loop,
@@ -1749,28 +1898,76 @@ combine_blocks (struct loop *loop)
ifc_bbs = NULL;
}
-/* If-convert LOOP when it is legal. For the moment this pass has no
- profitability analysis. Returns true when something changed. */
+/* Version LOOP before if-converting it, the original loop
+ will be then if-converted, the new copy of the loop will not,
+ and the LOOP_VECTORIZED internal call will be guarding which
+ loop to execute. The vectorizer pass will fold this
+ internal call into either true or false. */
static bool
+version_loop_for_if_conversion (struct loop *loop)
+{
+ basic_block cond_bb;
+ tree cond = make_ssa_name (boolean_type_node, NULL);
+ struct loop *new_loop;
+ gimple g;
+ gimple_stmt_iterator gsi;
+
+ g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
+ build_int_cst (integer_type_node, loop->num),
+ integer_zero_node);
+ gimple_call_set_lhs (g, cond);
+
+ initialize_original_copy_tables ();
+ new_loop = loop_version (loop, cond, &cond_bb,
+ REG_BR_PROB_BASE, REG_BR_PROB_BASE,
+ REG_BR_PROB_BASE, true);
+ free_original_copy_tables ();
+ if (new_loop == NULL)
+ return false;
+ new_loop->dont_vectorize = true;
+ new_loop->force_vect = false;
+ gsi = gsi_last_bb (cond_bb);
+ gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ update_ssa (TODO_update_ssa);
+ return true;
+}
+
+/* If-convert LOOP when it is legal. For the moment this pass has no
+ profitability analysis. Returns non-zero todo flags when something
+ changed. */
+
+static unsigned int
tree_if_conversion (struct loop *loop)
{
- bool changed = false;
+ unsigned int todo = 0;
ifc_bbs = NULL;
+ bool any_mask_load_store = false;
- if (!if_convertible_loop_p (loop)
+ if (!if_convertible_loop_p (loop, &any_mask_load_store)
|| !dbg_cnt (if_conversion_tree))
goto cleanup;
+ if (any_mask_load_store
+ && ((!flag_tree_loop_vectorize && !loop->force_vect)
+ || loop->dont_vectorize))
+ goto cleanup;
+
+ if (any_mask_load_store && !version_loop_for_if_conversion (loop))
+ goto cleanup;
+
/* Now all statements are if-convertible. Combine all the basic
blocks into one huge basic block doing the if-conversion
on-the-fly. */
- combine_blocks (loop);
-
- if (flag_tree_loop_if_convert_stores)
- mark_virtual_operands_for_renaming (cfun);
+ combine_blocks (loop, any_mask_load_store);
- changed = true;
+ todo |= TODO_cleanup_cfg;
+ if (flag_tree_loop_if_convert_stores || any_mask_load_store)
+ {
+ mark_virtual_operands_for_renaming (cfun);
+ todo |= TODO_update_ssa_only_virtuals;
+ }
cleanup:
if (ifc_bbs)
@@ -1784,7 +1981,7 @@ tree_if_conversion (struct loop *loop)
ifc_bbs = NULL;
}
- return changed;
+ return todo;
}
/* Tree if-conversion pass management. */
@@ -1793,7 +1990,6 @@ static unsigned int
main_tree_if_conversion (void)
{
struct loop *loop;
- bool changed = false;
unsigned todo = 0;
if (number_of_loops (cfun) <= 1)
@@ -1802,20 +1998,14 @@ main_tree_if_conversion (void)
FOR_EACH_LOOP (loop, 0)
if (flag_tree_loop_if_convert == 1
|| flag_tree_loop_if_convert_stores == 1
- || flag_tree_loop_vectorize
- || loop->force_vect)
- changed |= tree_if_conversion (loop);
-
- if (changed)
- todo |= TODO_cleanup_cfg;
-
- if (changed && flag_tree_loop_if_convert_stores)
- todo |= TODO_update_ssa_only_virtuals;
+ || ((flag_tree_loop_vectorize || loop->force_vect)
+ && !loop->dont_vectorize))
+ todo |= tree_if_conversion (loop);
#ifdef ENABLE_CHECKING
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
gcc_assert (!bb->aux);
}
#endif
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 4ae912a875a..28aac19b9af 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -1271,7 +1271,9 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
|| ! SSA_NAME_VAR (retval)
|| TREE_CODE (SSA_NAME_VAR (retval)) != RESULT_DECL)))
{
- copy = gimple_build_assign (id->retvar, retval);
+ copy = gimple_build_assign (id->do_not_unshare
+ ? id->retvar : unshare_expr (id->retvar),
+ retval);
/* id->retvar is already substituted. Skip it on later remapping. */
skip_first = true;
}
@@ -1790,7 +1792,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale,
{
edge->frequency = new_freq;
if (dump_file
- && profile_status_for_function (cfun) != PROFILE_ABSENT
+ && profile_status_for_fn (cfun) != PROFILE_ABSENT
&& (edge_freq > edge->frequency + 10
|| edge_freq < edge->frequency - 10))
{
@@ -2206,7 +2208,7 @@ initialize_cfun (tree new_fndecl, tree callee_fndecl, gcov_type count)
init_empty_tree_cfg ();
- profile_status_for_function (cfun) = profile_status_for_function (src_cfun);
+ profile_status_for_fn (cfun) = profile_status_for_fn (src_cfun);
ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
(ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count * count_scale /
REG_BR_PROB_BASE);
@@ -2486,7 +2488,7 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
new_bb->loop_father = entry_block_map->loop_father;
}
- last = last_basic_block;
+ last = last_basic_block_for_fn (cfun);
/* Now that we've duplicated the blocks, duplicate their edges. */
bool can_make_abormal_goto
@@ -2542,15 +2544,16 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
/* Zero out AUX fields of newly created block during EH edge
insertion. */
- for (; last < last_basic_block; last++)
+ for (; last < last_basic_block_for_fn (cfun); last++)
{
if (need_debug_cleanup)
- maybe_move_debug_stmts_to_successors (id, BASIC_BLOCK (last));
- BASIC_BLOCK (last)->aux = NULL;
+ maybe_move_debug_stmts_to_successors (id,
+ BASIC_BLOCK_FOR_FN (cfun, last));
+ BASIC_BLOCK_FOR_FN (cfun, last)->aux = NULL;
/* Update call edge destinations. This can not be done before loop
info is updated, because we may split basic blocks. */
if (id->transform_call_graph_edges == CB_CGE_DUPLICATE)
- redirect_all_calls (id, BASIC_BLOCK (last));
+ redirect_all_calls (id, BASIC_BLOCK_FOR_FN (cfun, last));
}
entry_block_map->aux = NULL;
exit_block_map->aux = NULL;
@@ -4441,11 +4444,11 @@ static void
fold_marked_statements (int first, struct pointer_set_t *statements)
{
for (; first < n_basic_blocks_for_fn (cfun); first++)
- if (BASIC_BLOCK (first))
+ if (BASIC_BLOCK_FOR_FN (cfun, first))
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start_bb (BASIC_BLOCK (first));
+ for (gsi = gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, first));
!gsi_end_p (gsi);
gsi_next (&gsi))
if (pointer_set_contains (statements, gsi_stmt (gsi)))
@@ -4471,7 +4474,7 @@ fold_marked_statements (int first, struct pointer_set_t *statements)
break;
}
if (gsi_end_p (i2))
- i2 = gsi_start_bb (BASIC_BLOCK (first));
+ i2 = gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, first));
else
gsi_next (&i2);
while (1)
@@ -4495,7 +4498,8 @@ fold_marked_statements (int first, struct pointer_set_t *statements)
is mood anyway. */
if (maybe_clean_or_replace_eh_stmt (old_stmt,
new_stmt))
- gimple_purge_dead_eh_edges (BASIC_BLOCK (first));
+ gimple_purge_dead_eh_edges (
+ BASIC_BLOCK_FOR_FN (cfun, first));
break;
}
gsi_next (&i2);
@@ -4515,7 +4519,8 @@ fold_marked_statements (int first, struct pointer_set_t *statements)
new_stmt);
if (maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt))
- gimple_purge_dead_eh_edges (BASIC_BLOCK (first));
+ gimple_purge_dead_eh_edges (BASIC_BLOCK_FOR_FN (cfun,
+ first));
}
}
}
@@ -4564,7 +4569,7 @@ optimize_inline_calls (tree fn)
will split id->current_basic_block, and the new blocks will
follow it; we'll trudge through them, processing their CALL_EXPRs
along the way. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
inlined_p |= gimple_expand_calls_inline (bb, &id);
pop_gimplify_context (NULL);
@@ -4607,7 +4612,8 @@ optimize_inline_calls (tree fn)
| TODO_cleanup_cfg
| (gimple_in_ssa_p (cfun) ? TODO_remove_unused_locals : 0)
| (gimple_in_ssa_p (cfun) ? TODO_update_address_taken : 0)
- | (profile_status != PROFILE_ABSENT ? TODO_rebuild_frequencies : 0));
+ | (profile_status_for_fn (cfun) != PROFILE_ABSENT
+ ? TODO_rebuild_frequencies : 0));
}
/* Passed to walk_tree. Copies the node pointed to, if appropriate. */
diff --git a/gcc/tree-inline.h b/gcc/tree-inline.h
index d871fc4e4b6..00c0b0cf738 100644
--- a/gcc/tree-inline.h
+++ b/gcc/tree-inline.h
@@ -36,7 +36,7 @@ enum copy_body_cge_which
/* Data required for function body duplication. */
-typedef struct copy_body_data
+struct copy_body_data
{
/* FUNCTION_DECL for function being inlined, or in general the
source function providing the original trees. */
@@ -135,7 +135,7 @@ typedef struct copy_body_data
/* Cilk keywords currently need to replace some variables that
ordinary nested functions do not. */
bool remap_var_for_cilk;
-} copy_body_data;
+};
/* Weights of constructions for estimate_num_insns. */
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index 0067cfe61e8..8e539f2ebcf 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -558,7 +558,7 @@ set_livein_block (tree var, basic_block bb)
if (def_block_index == -1
|| ! dominated_by_p (CDI_DOMINATORS, bb,
- BASIC_BLOCK (def_block_index)))
+ BASIC_BLOCK_FOR_FN (cfun, def_block_index)))
info->need_phi_state = NEED_PHI_STATE_MAYBE;
}
else
@@ -821,7 +821,7 @@ prune_unused_phi_nodes (bitmap phis, bitmap kills, bitmap uses)
adef = 1;
EXECUTE_IF_SET_IN_BITMAP (to_remove, 0, i, bi)
{
- def_bb = BASIC_BLOCK (i);
+ def_bb = BASIC_BLOCK_FOR_FN (cfun, i);
defs[adef].bb_index = i;
defs[adef].dfs_num = bb_dom_dfs_in (CDI_DOMINATORS, def_bb);
defs[adef + 1].bb_index = i;
@@ -895,7 +895,8 @@ prune_unused_phi_nodes (bitmap phis, bitmap kills, bitmap uses)
p = b;
else
{
- use_bb = get_immediate_dominator (CDI_DOMINATORS, BASIC_BLOCK (b));
+ use_bb = get_immediate_dominator (CDI_DOMINATORS,
+ BASIC_BLOCK_FOR_FN (cfun, b));
p = find_dfsnum_interval (defs, n_defs,
bb_dom_dfs_in (CDI_DOMINATORS, use_bb));
if (!bitmap_bit_p (phis, p))
@@ -907,7 +908,7 @@ prune_unused_phi_nodes (bitmap phis, bitmap kills, bitmap uses)
continue;
/* Add the new uses to the worklist. */
- def_bb = BASIC_BLOCK (p);
+ def_bb = BASIC_BLOCK_FOR_FN (cfun, p);
FOR_EACH_EDGE (e, ei, def_bb->preds)
{
u = e->src->index;
@@ -963,7 +964,7 @@ mark_phi_for_rewrite (basic_block bb, gimple phi)
bitmap_set_bit (blocks_with_phis_to_rewrite, idx);
- n = (unsigned) last_basic_block + 1;
+ n = (unsigned) last_basic_block_for_fn (cfun) + 1;
if (phis_to_rewrite.length () < n)
phis_to_rewrite.safe_grow_cleared (n);
@@ -1004,7 +1005,7 @@ insert_phi_nodes_for (tree var, bitmap phi_insertion_points, bool update_p)
/* And insert the PHI nodes. */
EXECUTE_IF_SET_IN_BITMAP (phi_insertion_points, 0, bb_index, bi)
{
- bb = BASIC_BLOCK (bb_index);
+ bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
if (update_p)
mark_block_for_update (bb);
@@ -2314,12 +2315,12 @@ rewrite_into_ssa (void)
/* Initialize the set of interesting blocks. The callback
mark_def_sites will add to this set those blocks that the renamer
should process. */
- interesting_blocks = sbitmap_alloc (last_basic_block);
+ interesting_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (interesting_blocks);
/* Initialize dominance frontier. */
- dfs = XNEWVEC (bitmap_head, last_basic_block);
- FOR_EACH_BB (bb)
+ dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&dfs[bb->index], &bitmap_default_obstack);
/* 1- Compute dominance frontiers. */
@@ -2336,7 +2337,7 @@ rewrite_into_ssa (void)
rewrite_blocks (ENTRY_BLOCK_PTR_FOR_FN (cfun), REWRITE_ALL);
/* Free allocated memory. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear (&dfs[bb->index]);
free (dfs);
@@ -2634,7 +2635,7 @@ prepare_def_site_for (tree name, bool insert_phi_p)
bb = gimple_bb (stmt);
if (bb)
{
- gcc_checking_assert (bb->index < last_basic_block);
+ gcc_checking_assert (bb->index < last_basic_block_for_fn (cfun));
mark_block_for_update (bb);
mark_def_interesting (name, stmt, bb, insert_phi_p);
}
@@ -3021,8 +3022,9 @@ insert_updated_phi_nodes_for (tree var, bitmap_head *dfs, bitmap blocks,
db->def_blocks);
if (entry != ENTRY_BLOCK_PTR_FOR_FN (cfun))
EXECUTE_IF_SET_IN_BITMAP (idf, 0, i, bi)
- if (BASIC_BLOCK (i) != entry
- && dominated_by_p (CDI_DOMINATORS, BASIC_BLOCK (i), entry))
+ if (BASIC_BLOCK_FOR_FN (cfun, i) != entry
+ && dominated_by_p (CDI_DOMINATORS,
+ BASIC_BLOCK_FOR_FN (cfun, i), entry))
bitmap_set_bit (pruned_idf, i);
}
else
@@ -3054,7 +3056,7 @@ insert_updated_phi_nodes_for (tree var, bitmap_head *dfs, bitmap blocks,
{
edge e;
edge_iterator ei;
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
FOR_EACH_EDGE (e, ei, bb->preds)
if (e->src->index >= 0)
@@ -3183,7 +3185,7 @@ update_ssa (unsigned update_flags)
blocks_with_phis_to_rewrite = BITMAP_ALLOC (NULL);
if (!phis_to_rewrite.exists ())
- phis_to_rewrite.create (last_basic_block + 1);
+ phis_to_rewrite.create (last_basic_block_for_fn (cfun) + 1);
blocks_to_update = BITMAP_ALLOC (NULL);
/* Ensure that the dominance information is up-to-date. */
@@ -3267,8 +3269,8 @@ update_ssa (unsigned update_flags)
/* If the caller requested PHI nodes to be added, compute
dominance frontiers. */
- dfs = XNEWVEC (bitmap_head, last_basic_block);
- FOR_EACH_BB (bb)
+ dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&dfs[bb->index], &bitmap_default_obstack);
compute_dominance_frontiers (dfs);
@@ -3294,7 +3296,7 @@ update_ssa (unsigned update_flags)
insert_updated_phi_nodes_for (sym, dfs, blocks_to_update,
update_flags);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear (&dfs[bb->index]);
free (dfs);
@@ -3315,7 +3317,7 @@ update_ssa (unsigned update_flags)
get_var_info (sym)->info.current_def = NULL_TREE;
/* Now start the renaming process at START_BB. */
- interesting_blocks = sbitmap_alloc (last_basic_block);
+ interesting_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (interesting_blocks);
EXECUTE_IF_SET_IN_BITMAP (blocks_to_update, 0, i, bi)
bitmap_set_bit (interesting_blocks, i);
@@ -3338,9 +3340,10 @@ update_ssa (unsigned update_flags)
c = 0;
EXECUTE_IF_SET_IN_BITMAP (blocks_to_update, 0, i, bi)
c++;
- fprintf (dump_file, "Number of blocks in CFG: %d\n", last_basic_block);
+ fprintf (dump_file, "Number of blocks in CFG: %d\n",
+ last_basic_block_for_fn (cfun));
fprintf (dump_file, "Number of blocks to update: %d (%3.0f%%)\n",
- c, PERCENT (c, last_basic_block));
+ c, PERCENT (c, last_basic_block_for_fn (cfun)));
if (dump_flags & TDF_DETAILS)
{
diff --git a/gcc/tree-iterator.h b/gcc/tree-iterator.h
index b5217f77873..105f371feb5 100644
--- a/gcc/tree-iterator.h
+++ b/gcc/tree-iterator.h
@@ -29,10 +29,10 @@ along with GCC; see the file COPYING3. If not see
/* Iterator object for GENERIC or GIMPLE TREE statements. */
-typedef struct {
+struct tree_stmt_iterator {
struct tree_statement_list_node *ptr;
tree container;
-} tree_stmt_iterator;
+};
static inline tree_stmt_iterator
tsi_start (tree t)
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index abf69f42669..c16e51fb7c7 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -588,7 +588,7 @@ copy_loop_before (struct loop *loop)
edge preheader = loop_preheader_edge (loop);
initialize_original_copy_tables ();
- res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
+ res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, NULL, preheader);
gcc_assert (res != NULL);
free_original_copy_tables ();
delete_update_ssa ();
@@ -1677,7 +1677,7 @@ tree_loop_distribution (void)
basic_block bb;
control_dependences *cd = NULL;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-nrv.c b/gcc/tree-nrv.c
index b42993d727b..e00463dcc60 100644
--- a/gcc/tree-nrv.c
+++ b/gcc/tree-nrv.c
@@ -144,7 +144,7 @@ tree_nrv (void)
return 0;
/* Look through each block for assignments to the RESULT_DECL. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -238,7 +238,7 @@ tree_nrv (void)
RESULT. */
data.var = found;
data.result = result;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
@@ -358,7 +358,7 @@ execute_return_slot_opt (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 5bb520fbf83..715ba6abaf2 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -1211,7 +1211,7 @@ static unsigned int
compute_object_sizes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c
index 8df3026309d..c5bba789637 100644
--- a/gcc/tree-outof-ssa.c
+++ b/gcc/tree-outof-ssa.c
@@ -835,7 +835,7 @@ eliminate_useless_phis (void)
gimple_stmt_iterator gsi;
tree result;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
{
@@ -893,7 +893,7 @@ rewrite_trees (var_map map ATTRIBUTE_UNUSED)
/* Search for PHIs where the destination has no partition, but one
or more arguments has a partition. This should not happen and can
create incorrect code. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -1101,7 +1101,7 @@ insert_backedge_copies (void)
mark_dfs_back_edges ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Mark block as possibly needing calculation of UIDs. */
bb->aux = &bb->aux;
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 4d570b19fc8..b7b43de4423 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -106,10 +106,10 @@ protected:
public:
/* A list of sub-passes to run, dependent on gate predicate. */
- struct opt_pass *sub;
+ opt_pass *sub;
/* Next in the list of passes to run, independent of gate predicate. */
- struct opt_pass *next;
+ opt_pass *next;
/* Static pass number, used as a fragment of the dump file name. */
int static_pass_number;
@@ -138,7 +138,7 @@ protected:
}
};
-struct varpool_node;
+class varpool_node;
struct cgraph_node;
struct lto_symtab_encoder_d;
@@ -171,7 +171,7 @@ public:
function body via this hook. */
unsigned int function_transform_todo_flags_start;
unsigned int (*function_transform) (struct cgraph_node *);
- void (*variable_transform) (struct varpool_node *);
+ void (*variable_transform) (varpool_node *);
protected:
ipa_opt_pass_d (const pass_data& data, gcc::context *ctxt,
@@ -183,7 +183,7 @@ protected:
void (*stmt_fixup) (struct cgraph_node *, gimple *),
unsigned int function_transform_todo_flags_start,
unsigned int (*function_transform) (struct cgraph_node *),
- void (*variable_transform) (struct varpool_node *))
+ void (*variable_transform) (varpool_node *))
: opt_pass (data, ctxt),
generate_summary (generate_summary),
write_summary (write_summary),
@@ -321,7 +321,7 @@ enum pass_positioning_ops
struct register_pass_info
{
- struct opt_pass *pass; /* New pass to register. */
+ opt_pass *pass; /* New pass to register. */
const char *reference_pass_name; /* Name of the reference pass for hooking
up the new pass. */
int ref_pass_instance_number; /* Insert the pass at the specified
@@ -583,16 +583,16 @@ extern gimple_opt_pass *make_pass_update_address_taken (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_convert_switch (gcc::context *ctxt);
/* Current optimization pass. */
-extern struct opt_pass *current_pass;
+extern opt_pass *current_pass;
-extern bool execute_one_pass (struct opt_pass *);
-extern void execute_pass_list (struct opt_pass *);
-extern void execute_ipa_pass_list (struct opt_pass *);
-extern void execute_ipa_summary_passes (struct ipa_opt_pass_d *);
+extern bool execute_one_pass (opt_pass *);
+extern void execute_pass_list (opt_pass *);
+extern void execute_ipa_pass_list (opt_pass *);
+extern void execute_ipa_summary_passes (ipa_opt_pass_d *);
extern void execute_all_ipa_transforms (void);
extern void execute_all_ipa_stmt_fixups (struct cgraph_node *, gimple *);
-extern bool pass_init_dump_file (struct opt_pass *);
-extern void pass_fini_dump_file (struct opt_pass *);
+extern bool pass_init_dump_file (opt_pass *);
+extern void pass_fini_dump_file (opt_pass *);
extern const char *get_current_pass_name (void);
extern void print_current_pass (FILE *);
@@ -601,7 +601,7 @@ extern void ipa_write_summaries (void);
extern void ipa_write_optimization_summaries (struct lto_symtab_encoder_d *);
extern void ipa_read_summaries (void);
extern void ipa_read_optimization_summaries (void);
-extern void register_one_dump_file (struct opt_pass *);
+extern void register_one_dump_file (opt_pass *);
extern bool function_called_by_processed_nodes_p (void);
/* Set to true if the pass is called the first time during compilation of the
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 0270e35885c..ab987f4049a 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -733,6 +733,9 @@ split_data_refs_to_components (struct loop *loop,
just fail. */
goto end;
}
+ /* predcom pass isn't prepared to handle calls with data references. */
+ if (is_gimple_call (DR_STMT (dr)))
+ goto end;
dr->aux = (void *) (size_t) i;
comp_father[i] = i;
comp_size[i] = 1;
diff --git a/gcc/tree-profile.c b/gcc/tree-profile.c
index 537c246c630..51e997ccd4f 100644
--- a/gcc/tree-profile.c
+++ b/gcc/tree-profile.c
@@ -637,7 +637,7 @@ tree_profiling (void)
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index ada942df389..27d8158e310 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -280,6 +280,8 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa.h"
#include "cfgloop.h"
#include "tree-chrec.h"
+#include "pointer-set.h"
+#include "tree-affine.h"
#include "tree-scalar-evolution.h"
#include "dumpfile.h"
#include "params.h"
@@ -1380,6 +1382,64 @@ follow_ssa_edge (struct loop *loop, gimple def, gimple halting_phi,
}
+/* Simplify PEELED_CHREC represented by (init_cond, arg) in LOOP.
+ Handle below case and return the corresponding POLYNOMIAL_CHREC:
+
+ # i_17 = PHI <i_13(5), 0(3)>
+ # _20 = PHI <_5(5), start_4(D)(3)>
+ ...
+ i_13 = i_17 + 1;
+ _5 = start_4(D) + i_13;
+
+ Though variable _20 appears as a PEELED_CHREC in the form of
+ (start_4, _5)_LOOP, it's a POLYNOMIAL_CHREC like {start_4, 1}_LOOP.
+
+ See PR41488. */
+
+static tree
+simplify_peeled_chrec (struct loop *loop, tree arg, tree init_cond)
+{
+ aff_tree aff1, aff2;
+ tree ev, left, right, type, step_val;
+ pointer_map_t *peeled_chrec_map = NULL;
+
+ ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, arg));
+ if (ev == NULL_TREE || TREE_CODE (ev) != POLYNOMIAL_CHREC)
+ return chrec_dont_know;
+
+ left = CHREC_LEFT (ev);
+ right = CHREC_RIGHT (ev);
+ type = TREE_TYPE (left);
+ step_val = chrec_fold_plus (type, init_cond, right);
+
+ /* Transform (init, {left, right}_LOOP)_LOOP to {init, right}_LOOP
+ if "left" equals to "init + right". */
+ if (operand_equal_p (left, step_val, 0))
+ {
+ if (dump_file && (dump_flags & TDF_SCEV))
+ fprintf (dump_file, "Simplify PEELED_CHREC into POLYNOMIAL_CHREC.\n");
+
+ return build_polynomial_chrec (loop->num, init_cond, right);
+ }
+
+ /* Try harder to check if they are equal. */
+ tree_to_aff_combination_expand (left, type, &aff1, &peeled_chrec_map);
+ tree_to_aff_combination_expand (step_val, type, &aff2, &peeled_chrec_map);
+ free_affine_expand_cache (&peeled_chrec_map);
+ aff_combination_scale (&aff2, double_int_minus_one);
+ aff_combination_add (&aff1, &aff2);
+
+ /* Transform (init, {left, right}_LOOP)_LOOP to {init, right}_LOOP
+ if "left" equals to "init + right". */
+ if (aff_combination_zero_p (&aff1))
+ {
+ if (dump_file && (dump_flags & TDF_SCEV))
+ fprintf (dump_file, "Simplify PEELED_CHREC into POLYNOMIAL_CHREC.\n");
+
+ return build_polynomial_chrec (loop->num, init_cond, right);
+ }
+ return chrec_dont_know;
+}
/* Given a LOOP_PHI_NODE, this function determines the evolution
function from LOOP_PHI_NODE to LOOP_PHI_NODE in the loop. */
@@ -1392,6 +1452,7 @@ analyze_evolution_in_loop (gimple loop_phi_node,
tree evolution_function = chrec_not_analyzed_yet;
struct loop *loop = loop_containing_stmt (loop_phi_node);
basic_block bb;
+ static bool simplify_peeled_chrec_p = true;
if (dump_file && (dump_flags & TDF_SCEV))
{
@@ -1442,7 +1503,19 @@ analyze_evolution_in_loop (gimple loop_phi_node,
all the other iterations it has the value of ARG.
For the moment, PEELED_CHREC nodes are not built. */
if (res != t_true)
- ev_fn = chrec_dont_know;
+ {
+ ev_fn = chrec_dont_know;
+ /* Try to recognize POLYNOMIAL_CHREC which appears in
+ the form of PEELED_CHREC, but guard the process with
+ a bool variable to keep the analyzer from infinite
+ recurrence for real PEELED_RECs. */
+ if (simplify_peeled_chrec_p && TREE_CODE (arg) == SSA_NAME)
+ {
+ simplify_peeled_chrec_p = false;
+ ev_fn = simplify_peeled_chrec (loop, arg, init_cond);
+ simplify_peeled_chrec_p = true;
+ }
+ }
/* When there are multiple back edges of the loop (which in fact never
happens currently, but nevertheless), merge their evolutions. */
@@ -3276,7 +3349,7 @@ scev_const_prop (void)
if (number_of_loops (cfun) <= 1)
return 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
loop = bb->loop_father;
diff --git a/gcc/tree-scalar-evolution.h b/gcc/tree-scalar-evolution.h
index 3a656911ac7..d2ef4b16879 100644
--- a/gcc/tree-scalar-evolution.h
+++ b/gcc/tree-scalar-evolution.h
@@ -35,7 +35,7 @@ extern tree resolve_mixers (struct loop *, tree);
extern void gather_stats_on_scev_database (void);
extern unsigned int scev_const_prop (void);
extern bool expression_expensive_p (tree);
-extern bool simple_iv (struct loop *, struct loop *, tree, struct affine_iv_d *,
+extern bool simple_iv (struct loop *, struct loop *, tree, struct affine_iv *,
bool);
extern tree compute_overall_effect_of_inner_loop (struct loop *, tree);
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 0890613852b..ebd42185c98 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -1252,7 +1252,7 @@ scan_function (void)
basic_block bb;
bool ret = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -3311,7 +3311,7 @@ sra_modify_function_body (void)
bool cfg_changed = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi))
@@ -3793,9 +3793,9 @@ propagate_dereference_distances (void)
{
basic_block bb;
- auto_vec<basic_block> queue (last_basic_block_for_function (cfun));
+ auto_vec<basic_block> queue (last_basic_block_for_fn (cfun));
queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
queue.quick_push (bb);
bb->aux = bb;
@@ -4572,7 +4572,7 @@ ipa_sra_modify_function_body (ipa_parm_adjustment_vec adjustments)
bool cfg_changed = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -4811,7 +4811,7 @@ convert_callers (struct cgraph_node *node, tree old_decl,
if (!encountered_recursive_call)
return;
- FOR_EACH_BB (this_block)
+ FOR_EACH_BB_FN (this_block, cfun)
{
gimple_stmt_iterator gsi;
@@ -4970,7 +4970,7 @@ ipa_early_sra (void)
bb_dereferences = XCNEWVEC (HOST_WIDE_INT,
func_param_count
- * last_basic_block_for_function (cfun));
+ * last_basic_block_for_fn (cfun));
final_bbs = BITMAP_ALLOC (NULL);
scan_function ();
diff --git a/gcc/tree-ssa-address.h b/gcc/tree-ssa-address.h
index 803f9c82fb1..a5a67c43bb2 100644
--- a/gcc/tree-ssa-address.h
+++ b/gcc/tree-ssa-address.h
@@ -25,7 +25,7 @@ extern rtx addr_for_mem_ref (tree exp, addr_space_t as, bool really_expand);
extern void get_address_description (tree, struct mem_address *);
extern tree tree_mem_ref_addr (tree, tree);
tree create_mem_ref (gimple_stmt_iterator *, tree,
- struct affine_tree_combination *, tree, tree, tree, bool);
+ struct aff_tree *, tree, tree, tree, bool);
extern void copy_ref_info (tree, tree);
tree maybe_fold_tmr (tree);
diff --git a/gcc/tree-ssa-alias.h b/gcc/tree-ssa-alias.h
index 44485bdd041..6c54ad90162 100644
--- a/gcc/tree-ssa-alias.h
+++ b/gcc/tree-ssa-alias.h
@@ -64,7 +64,7 @@ struct GTY(()) pt_solution
/* Simplified and cached information about a memory reference tree.
Used by the alias-oracle internally and externally in alternate
interfaces. */
-typedef struct ao_ref_s
+struct ao_ref
{
/* The original full memory reference tree or NULL_TREE if that is
not available. */
@@ -90,7 +90,7 @@ typedef struct ao_ref_s
/* Whether the memory is considered a volatile access. */
bool volatile_p;
-} ao_ref;
+};
/* In tree-ssa-alias.c */
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 473ee9227bc..97831c2eecf 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -788,7 +788,7 @@ ccp_initialize (void)
const_val = XCNEWVEC (prop_value_t, n_const_val);
/* Initialize simulation flags for PHI nodes and statements. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
@@ -822,7 +822,7 @@ ccp_initialize (void)
/* Now process PHI nodes. We never clear the simulate_again flag on
phi nodes, since we do not know which edges are executable yet,
except for phi nodes for virtual operands when we do not do store ccp. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
@@ -2546,7 +2546,7 @@ execute_fold_all_builtins (void)
basic_block bb;
unsigned int todoflags = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 70158d58d37..38a40787730 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -821,7 +821,7 @@ build_ssa_conflict_graph (tree_live_info_p liveinfo)
live = new_live_track (map);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -929,7 +929,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
map = init_var_map (num_ssa_names);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
tree arg;
@@ -1183,7 +1183,7 @@ coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl,
in the coalesce list because they do not need to be sorted, and simply
consume extra memory/compilation time in large programs. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->preds)
if (e->flags & EDGE_ABNORMAL)
diff --git a/gcc/tree-ssa-copy.c b/gcc/tree-ssa-copy.c
index 0dd5e147f3e..11daa5f59cb 100644
--- a/gcc/tree-ssa-copy.c
+++ b/gcc/tree-ssa-copy.c
@@ -469,7 +469,7 @@ init_copy_prop (void)
n_copy_of = num_ssa_names;
copy_of = XCNEWVEC (prop_value_t, n_copy_of);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
int depth = bb_loop_depth (bb);
@@ -567,14 +567,28 @@ fini_copy_prop (void)
if (copy_of[i].value != var
&& TREE_CODE (copy_of[i].value) == SSA_NAME)
{
+ basic_block copy_of_bb
+ = gimple_bb (SSA_NAME_DEF_STMT (copy_of[i].value));
+ basic_block var_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
if (POINTER_TYPE_P (TREE_TYPE (var))
&& SSA_NAME_PTR_INFO (var)
&& !SSA_NAME_PTR_INFO (copy_of[i].value))
- duplicate_ssa_name_ptr_info (copy_of[i].value,
- SSA_NAME_PTR_INFO (var));
+ {
+ duplicate_ssa_name_ptr_info (copy_of[i].value,
+ SSA_NAME_PTR_INFO (var));
+ /* Points-to information is cfg insensitive,
+ but alignment info might be cfg sensitive, if it
+ e.g. is derived from VRP derived non-zero bits.
+ So, do not copy alignment info if the two SSA_NAMEs
+ aren't defined in the same basic block. */
+ if (var_bb != copy_of_bb)
+ mark_ptr_info_alignment_unknown
+ (SSA_NAME_PTR_INFO (copy_of[i].value));
+ }
else if (!POINTER_TYPE_P (TREE_TYPE (var))
&& SSA_NAME_RANGE_INFO (var)
- && !SSA_NAME_RANGE_INFO (copy_of[i].value))
+ && !SSA_NAME_RANGE_INFO (copy_of[i].value)
+ && var_bb == copy_of_bb)
duplicate_ssa_name_range_info (copy_of[i].value,
SSA_NAME_RANGE_TYPE (var),
SSA_NAME_RANGE_INFO (var));
diff --git a/gcc/tree-ssa-copyrename.c b/gcc/tree-ssa-copyrename.c
index 90e070fb995..c7d514fe0ec 100644
--- a/gcc/tree-ssa-copyrename.c
+++ b/gcc/tree-ssa-copyrename.c
@@ -325,7 +325,7 @@ rename_ssa_copies (void)
map = init_var_map (num_ssa_names);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Scan for real copies. */
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -341,7 +341,7 @@ rename_ssa_copies (void)
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Treat PHI nodes as copies between the result and each argument. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index 8fc6fce3ec2..5abef5c1283 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -374,7 +374,7 @@ find_obviously_necessary_stmts (bool aggressive)
gimple phi, stmt;
int flags;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* PHI nodes are never inherently necessary. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -404,7 +404,7 @@ find_obviously_necessary_stmts (bool aggressive)
struct loop *loop;
scev_initialize ();
if (mark_irreducible_loops ())
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
@@ -1325,7 +1325,7 @@ eliminate_unnecessary_stmts (void)
}
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Remove dead PHI nodes. */
something_changed |= remove_dead_phis (bb);
@@ -1364,9 +1364,9 @@ tree_dce_init (bool aggressive)
if (aggressive)
{
- last_stmt_necessary = sbitmap_alloc (last_basic_block);
+ last_stmt_necessary = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (last_stmt_necessary);
- bb_contains_live_stmts = sbitmap_alloc (last_basic_block);
+ bb_contains_live_stmts = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (bb_contains_live_stmts);
}
@@ -1432,7 +1432,8 @@ perform_tree_ssa_dce (bool aggressive)
calculate_dominance_info (CDI_POST_DOMINATORS);
cd = new control_dependences (create_edge_list ());
- visited_control_parents = sbitmap_alloc (last_basic_block);
+ visited_control_parents =
+ sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (visited_control_parents);
mark_dfs_back_edges ();
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 82005afa0c9..2bd2a860dca 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -795,7 +795,7 @@ free_all_edge_infos (void)
edge_iterator ei;
edge e;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->preds)
{
@@ -866,7 +866,7 @@ tree_ssa_dominator_optimize (void)
{
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
update_stmt_if_modified (gsi_stmt (gsi));
@@ -902,7 +902,7 @@ tree_ssa_dominator_optimize (void)
iterator. */
EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (bb == NULL)
continue;
while (single_succ_p (bb)
@@ -1793,7 +1793,7 @@ record_edge_info (basic_block bb)
{
int i;
int n_labels = gimple_switch_num_labels (stmt);
- tree *info = XCNEWVEC (tree, last_basic_block);
+ tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
edge e;
edge_iterator ei;
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 7557b17bd3a..9161a4048f8 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -3388,7 +3388,7 @@ ssa_forward_propagate_and_combine (void)
cfg_changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index 8ad5d9a9b93..a37ef85d6fb 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -673,7 +673,7 @@ clear_unused_block_pointer (void)
basic_block bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
unsigned i;
@@ -791,7 +791,7 @@ remove_unused_locals (void)
usedvars = BITMAP_ALLOC (NULL);
/* Walk the CFG marking all referenced symbols. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
size_t i;
@@ -856,7 +856,7 @@ remove_unused_locals (void)
ignores them, and the second pass (if there were any) tries to remove
them. */
if (have_local_clobbers)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -960,17 +960,17 @@ new_tree_live_info (var_map map)
live = XNEW (struct tree_live_info_d);
live->map = map;
- live->num_blocks = last_basic_block;
+ live->num_blocks = last_basic_block_for_fn (cfun);
- live->livein = XNEWVEC (bitmap_head, last_basic_block);
- FOR_EACH_BB (bb)
+ live->livein = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&live->livein[bb->index], &liveness_bitmap_obstack);
- live->liveout = XNEWVEC (bitmap_head, last_basic_block);
- FOR_EACH_BB (bb)
+ live->liveout = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&live->liveout[bb->index], &liveness_bitmap_obstack);
- live->work_stack = XNEWVEC (int, last_basic_block);
+ live->work_stack = XNEWVEC (int, last_basic_block_for_fn (cfun));
live->stack_top = live->work_stack;
live->global = BITMAP_ALLOC (&liveness_bitmap_obstack);
@@ -1043,21 +1043,21 @@ live_worklist (tree_live_info_p live)
{
unsigned b;
basic_block bb;
- sbitmap visited = sbitmap_alloc (last_basic_block + 1);
+ sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun) + 1);
bitmap tmp = BITMAP_ALLOC (&liveness_bitmap_obstack);
bitmap_clear (visited);
/* Visit all the blocks in reverse order and propagate live on entry values
into the predecessors blocks. */
- FOR_EACH_BB_REVERSE (bb)
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
loe_visit_block (live, bb, visited, tmp);
/* Process any blocks which require further iteration. */
while (live->stack_top != live->work_stack)
{
b = *--(live->stack_top);
- loe_visit_block (live, BASIC_BLOCK (b), visited, tmp);
+ loe_visit_block (live, BASIC_BLOCK_FOR_FN (cfun, b), visited, tmp);
}
BITMAP_FREE (tmp);
@@ -1149,11 +1149,11 @@ calculate_live_on_exit (tree_live_info_p liveinfo)
edge_iterator ei;
/* live on entry calculations used liveout vectors for defs, clear them. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear (&liveinfo->liveout[bb->index]);
/* Set all the live-on-exit bits for uses in PHIs. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
size_t i;
@@ -1294,7 +1294,7 @@ dump_live_info (FILE *f, tree_live_info_p live, int flag)
if ((flag & LIVEDUMP_ENTRY) && live->livein)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (f, "\nLive on entry to BB%d : ", bb->index);
EXECUTE_IF_SET_IN_BITMAP (&live->livein[bb->index], 0, i, bi)
@@ -1308,7 +1308,7 @@ dump_live_info (FILE *f, tree_live_info_p live, int flag)
if ((flag & LIVEDUMP_EXIT) && live->liveout)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (f, "\nLive on exit from BB%d : ", bb->index);
EXECUTE_IF_SET_IN_BITMAP (&live->liveout[bb->index], 0, i, bi)
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index b96899dc24c..dbc93a5fda3 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1601,7 +1601,7 @@ analyze_memory_references (void)
loops postorder. */
i = 0;
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->loop_father != current_loops->tree_root)
bbs[i++] = bb;
n = i;
@@ -2401,12 +2401,12 @@ fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
static void
fill_always_executed_in (void)
{
- sbitmap contains_call = sbitmap_alloc (last_basic_block);
+ sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
basic_block bb;
struct loop *loop;
bitmap_clear (contains_call);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -2478,7 +2478,7 @@ tree_ssa_lim_finalize (void)
free_aux_for_edges ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
SET_ALWAYS_EXECUTED_IN (bb, NULL);
bitmap_obstack_release (&lim_bitmap_obstack);
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 54ecfad9210..e513d0f2928 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1074,7 +1074,7 @@ find_bivs (struct ivopts_data *data)
static void
mark_bivs (struct ivopts_data *data)
{
- gimple phi;
+ gimple phi, def;
tree var;
struct iv *iv, *incr_iv;
struct loop *loop = data->current_loop;
@@ -1090,6 +1090,13 @@ mark_bivs (struct ivopts_data *data)
continue;
var = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
+ def = SSA_NAME_DEF_STMT (var);
+ /* Don't mark iv peeled from other one as biv. */
+ if (def
+ && gimple_code (def) == GIMPLE_PHI
+ && gimple_bb (def) == loop->header)
+ continue;
+
incr_iv = get_iv (data, var);
if (!incr_iv)
continue;
@@ -2526,11 +2533,19 @@ add_old_iv_candidates (struct ivopts_data *data, struct iv *iv)
/* Additionally record the possibility of leaving the original iv
untouched. */
def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (data->current_loop));
- cand = add_candidate_1 (data,
- iv->base, iv->step, true, IP_ORIGINAL, NULL,
- SSA_NAME_DEF_STMT (def));
- cand->var_before = iv->ssa_name;
- cand->var_after = def;
+ /* Don't add candidate if it's from another PHI node because
+ it's an affine iv appearing in the form of PEELED_CHREC. */
+ phi = SSA_NAME_DEF_STMT (def);
+ if (gimple_code (phi) != GIMPLE_PHI)
+ {
+ cand = add_candidate_1 (data,
+ iv->base, iv->step, true, IP_ORIGINAL, NULL,
+ SSA_NAME_DEF_STMT (def));
+ cand->var_before = iv->ssa_name;
+ cand->var_after = def;
+ }
+ else
+ gcc_assert (gimple_bb (phi) == data->current_loop->header);
}
}
@@ -3013,7 +3028,7 @@ determine_common_wider_type (tree *a, tree *b)
static bool
get_computation_aff (struct loop *loop,
struct iv_use *use, struct iv_cand *cand, gimple at,
- struct affine_tree_combination *aff)
+ struct aff_tree *aff)
{
tree ubase = use->iv->base;
tree ustep = use->iv->step;
@@ -4581,7 +4596,7 @@ iv_elimination_compare_lt (struct ivopts_data *data,
struct tree_niter_desc *niter)
{
tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
- struct affine_tree_combination nit, tmpa, tmpb;
+ struct aff_tree nit, tmpa, tmpb;
enum tree_code comp;
HOST_WIDE_INT step;
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index e1d55ffbeed..ed30c7b0926 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -202,7 +202,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
- basic_block use_bb = BASIC_BLOCK (i);
+ basic_block use_bb = BASIC_BLOCK_FOR_FN (cfun, i);
struct loop *use_loop = use_bb->loop_father;
gcc_checking_assert (def_loop != use_loop
&& ! flow_loop_nested_p (def_loop, use_loop));
@@ -325,7 +325,7 @@ add_exit_phis_var (tree var, bitmap use_blocks, bitmap *loop_exits)
EXECUTE_IF_SET_IN_BITMAP (live_exits, 0, index, bi)
{
- add_exit_phi (BASIC_BLOCK (index), var);
+ add_exit_phi (BASIC_BLOCK_FOR_FN (cfun, index), var);
}
BITMAP_FREE (live_exits);
@@ -461,9 +461,9 @@ find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis)
if (changed_bbs)
EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi)
- find_uses_to_rename_bb (BASIC_BLOCK (index), use_blocks, need_phis);
+ find_uses_to_rename_bb (BASIC_BLOCK_FOR_FN (cfun, index), use_blocks, need_phis);
else
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
find_uses_to_rename_bb (bb, use_blocks, need_phis);
}
@@ -602,7 +602,7 @@ verify_loop_closed_ssa (bool verify_ssa_p)
timevar_push (TV_VERIFY_LOOP_CLOSED);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
@@ -728,14 +728,14 @@ copy_phi_node_args (unsigned first_new_block)
{
unsigned i;
- for (i = first_new_block; i < (unsigned) last_basic_block; i++)
- BASIC_BLOCK (i)->flags |= BB_DUPLICATED;
+ for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
+ BASIC_BLOCK_FOR_FN (cfun, i)->flags |= BB_DUPLICATED;
- for (i = first_new_block; i < (unsigned) last_basic_block; i++)
- add_phi_args_after_copy_bb (BASIC_BLOCK (i));
+ for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
+ add_phi_args_after_copy_bb (BASIC_BLOCK_FOR_FN (cfun, i));
- for (i = first_new_block; i < (unsigned) last_basic_block; i++)
- BASIC_BLOCK (i)->flags &= ~BB_DUPLICATED;
+ for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
+ BASIC_BLOCK_FOR_FN (cfun, i)->flags &= ~BB_DUPLICATED;
}
@@ -772,7 +772,7 @@ gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
verify_loop_closed_ssa (true);
#endif
- first_new_block = last_basic_block;
+ first_new_block = last_basic_block_for_fn (cfun);
if (!duplicate_loop_to_header_edge (loop, e, ndupl, wont_exit,
orig, to_remove, flags))
return false;
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 4c151e5bc88..5ae1d1390e1 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -171,7 +171,15 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
{
minv = wi::max (minv, minc, sgn);
maxv = wi::min (maxv, maxc, sgn);
- gcc_assert (wi::le_p (minv, maxv, sgn));
+ /* If the PHI result range are inconsistent with
+ the VAR range, give up on looking at the PHI
+ results. This can happen if VR_UNDEFINED is
+ involved. */
+ if (wi::gt_p (minv, maxv))
+ {
+ rtype = get_range_info (var, &minv, &maxv);
+ break;
+ }
}
}
}
diff --git a/gcc/tree-ssa-loop.h b/gcc/tree-ssa-loop.h
index 0eb8b742f92..2a7448b77a8 100644
--- a/gcc/tree-ssa-loop.h
+++ b/gcc/tree-ssa-loop.h
@@ -24,14 +24,14 @@ along with GCC; see the file COPYING3. If not see
/* Affine iv. */
-typedef struct affine_iv_d
+struct affine_iv
{
/* Iv = BASE + STEP * i. */
tree base, step;
/* True if this iv does not overflow. */
bool no_overflow;
-} affine_iv;
+};
/* Description of number of iterations of a loop. All the expressions inside
the structure can be evaluated at the end of the loop's preheader
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index d1b88d918bb..b2e24cf97f9 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -527,7 +527,7 @@ execute_cse_reciprocals (void)
calculate_dominance_info (CDI_POST_DOMINATORS);
#ifdef ENABLE_CHECKING
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
gcc_assert (!bb->aux);
#endif
@@ -540,7 +540,7 @@ execute_cse_reciprocals (void)
execute_cse_reciprocals_1 (NULL, name);
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
gimple phi;
@@ -1419,7 +1419,7 @@ execute_cse_sincos (void)
calculate_dominance_info (CDI_DOMINATORS);
memset (&sincos_stats, 0, sizeof (sincos_stats));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
bool cleanup_eh = false;
@@ -1939,7 +1939,7 @@ execute_optimize_bswap (void)
memset (&bswap_stats, 0, sizeof (bswap_stats));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -2785,7 +2785,7 @@ execute_optimize_widening_mul (void)
memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
diff --git a/gcc/tree-ssa-operands.h b/gcc/tree-ssa-operands.h
index 2d838005955..f5a779c94e4 100644
--- a/gcc/tree-ssa-operands.h
+++ b/gcc/tree-ssa-operands.h
@@ -37,7 +37,7 @@ typedef ssa_use_operand_t *use_operand_p;
struct use_optype_d
{
struct use_optype_d *next;
- struct ssa_use_operand_d use_ptr;
+ struct ssa_use_operand_t use_ptr;
};
typedef struct use_optype_d *use_optype_p;
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 6f8d30e6b34..4032839836c 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -69,6 +69,8 @@ static bool minmax_replacement (basic_block, basic_block,
edge, edge, gimple, tree, tree);
static bool abs_replacement (basic_block, basic_block,
edge, edge, gimple, tree, tree);
+static bool neg_replacement (basic_block, basic_block,
+ edge, edge, gimple, tree, tree);
static bool cond_store_replacement (basic_block, basic_block, edge, edge,
struct pointer_set_t *);
static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
@@ -336,6 +338,23 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
/* Calculate the set of non-trapping memory accesses. */
nontrap = get_non_trapping ();
+ /* The replacement of conditional negation with a non-branching
+ sequence is really only a win when optimizing for speed and we
+ can avoid transformations by gimple if-conversion that result
+ in poor RTL generation.
+
+ Ideally either gimple if-conversion or the RTL expanders will
+ be improved and the code to emit branchless conditional negation
+ can be removed. */
+ bool replace_conditional_negation = false;
+ if (!do_store_elim)
+ replace_conditional_negation
+ = ((!optimize_size && optimize >= 2)
+ || (((flag_tree_loop_vectorize || cfun->has_force_vect_loops)
+ && flag_tree_loop_if_convert != 0)
+ || flag_tree_loop_if_convert == 1
+ || flag_tree_loop_if_convert_stores == 1));
+
/* Search every basic block for COND_EXPR we may be able to optimize.
We walk the blocks in order that guarantees that a block with
@@ -489,6 +508,9 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
cfgchanged = true;
else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
cfgchanged = true;
+ else if (replace_conditional_negation
+ && neg_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
+ cfgchanged = true;
else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
cfgchanged = true;
}
@@ -1285,6 +1307,143 @@ abs_replacement (basic_block cond_bb, basic_block middle_bb,
return true;
}
+/* The function neg_replacement replaces conditional negation with
+ equivalent straight line code. Returns TRUE if replacement is done,
+ otherwise returns FALSE.
+
+ COND_BB branches around negation occuring in MIDDLE_BB.
+
+ E0 and E1 are edges out of COND_BB. E0 reaches MIDDLE_BB and
+ E1 reaches the other successor which should contain PHI with
+ arguments ARG0 and ARG1.
+
+ Assuming negation is to occur when the condition is true,
+ then the non-branching sequence is:
+
+ result = (rhs ^ -cond) + cond
+
+ Inverting the condition or its result gives us negation
+ when the original condition is false. */
+
+static bool
+neg_replacement (basic_block cond_bb, basic_block middle_bb,
+ edge e0 ATTRIBUTE_UNUSED, edge e1,
+ gimple phi, tree arg0, tree arg1)
+{
+ gimple new_stmt, cond;
+ gimple_stmt_iterator gsi;
+ gimple assign;
+ edge true_edge, false_edge;
+ tree rhs, lhs;
+ enum tree_code cond_code;
+ bool invert = false;
+
+ /* This transformation performs logical operations on the
+ incoming arguments. So force them to be integral types. */
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ return false;
+
+ /* OTHER_BLOCK must have only one executable statement which must have the
+ form arg0 = -arg1 or arg1 = -arg0. */
+
+ assign = last_and_only_stmt (middle_bb);
+ /* If we did not find the proper negation assignment, then we can not
+ optimize. */
+ if (assign == NULL)
+ return false;
+
+ /* If we got here, then we have found the only executable statement
+ in OTHER_BLOCK. If it is anything other than arg0 = -arg1 or
+ arg1 = -arg0, then we can not optimize. */
+ if (gimple_code (assign) != GIMPLE_ASSIGN)
+ return false;
+
+ lhs = gimple_assign_lhs (assign);
+
+ if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
+ return false;
+
+ rhs = gimple_assign_rhs1 (assign);
+
+ /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
+ if (!(lhs == arg0 && rhs == arg1)
+ && !(lhs == arg1 && rhs == arg0))
+ return false;
+
+ /* The basic sequence assumes we negate when the condition is true.
+ If we need the opposite, then we will either need to invert the
+ condition or its result. */
+ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
+ invert = false_edge->dest == middle_bb;
+
+ /* Unlike abs_replacement, we can handle arbitrary conditionals here. */
+ cond = last_stmt (cond_bb);
+ cond_code = gimple_cond_code (cond);
+
+ /* If inversion is needed, first try to invert the test since
+ that's cheapest. */
+ if (invert)
+ {
+ bool honor_nans
+ = HONOR_NANS (TYPE_MODE (TREE_TYPE (gimple_cond_lhs (cond))));
+ enum tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
+
+ /* If invert_tree_comparison was successful, then use its return
+ value as the new code and note that inversion is no longer
+ needed. */
+ if (new_code != ERROR_MARK)
+ {
+ cond_code = new_code;
+ invert = false;
+ }
+ }
+
+ tree cond_val = make_ssa_name (boolean_type_node, NULL);
+ new_stmt = gimple_build_assign_with_ops (cond_code, cond_val,
+ gimple_cond_lhs (cond),
+ gimple_cond_rhs (cond));
+ gsi = gsi_last_bb (cond_bb);
+ gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
+
+ /* If we still need inversion, then invert the result of the
+ condition. */
+ if (invert)
+ {
+ tree tmp = make_ssa_name (boolean_type_node, NULL);
+ new_stmt = gimple_build_assign_with_ops (BIT_XOR_EXPR, tmp,
+ cond_val, boolean_true_node);
+ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+ cond_val = tmp;
+ }
+
+ /* Get the condition in the right type so that we can perform
+ logical and arithmetic operations on it. */
+ tree cond_val_converted = make_ssa_name (TREE_TYPE (rhs), NULL);
+ new_stmt = gimple_build_assign_with_ops (NOP_EXPR, cond_val_converted,
+ cond_val, NULL_TREE);
+ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+
+ tree neg_cond_val_converted = make_ssa_name (TREE_TYPE (rhs), NULL);
+ new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, neg_cond_val_converted,
+ cond_val_converted, NULL_TREE);
+ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+
+ tree tmp = make_ssa_name (TREE_TYPE (rhs), NULL);
+ new_stmt = gimple_build_assign_with_ops (BIT_XOR_EXPR, tmp,
+ rhs, neg_cond_val_converted);
+ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+
+ tree new_lhs = make_ssa_name (TREE_TYPE (rhs), NULL);
+ new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, new_lhs,
+ tmp, cond_val_converted);
+ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+
+ replace_phi_edge_with_variable (cond_bb, e1, phi, new_lhs);
+
+ /* Note that we optimized this PHI. */
+ return true;
+}
+
/* Auxiliary functions to determine the set of memory accesses which
can't trap because they are preceded by accesses to the same memory
portion. We do that for MEM_REFs, so we only need to track
@@ -1706,7 +1865,7 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
== chrec_dont_know)
|| !then_datarefs.length ()
|| (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
- == chrec_dont_know)
+ == chrec_dont_know)
|| !else_datarefs.length ())
{
free_data_refs (then_datarefs);
@@ -1723,6 +1882,8 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
then_store = DR_STMT (then_dr);
then_lhs = gimple_get_lhs (then_store);
+ if (then_lhs == NULL_TREE)
+ continue;
found = false;
FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
@@ -1732,6 +1893,8 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
else_store = DR_STMT (else_dr);
else_lhs = gimple_get_lhs (else_store);
+ if (else_lhs == NULL_TREE)
+ continue;
if (operand_equal_p (then_lhs, else_lhs, 0))
{
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index ff616f36a32..21ef98c4695 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -2208,7 +2208,6 @@ compute_antic_aux (basic_block block, bool block_has_abnormal_pred_edge)
BB_VISITED (block) = 0;
BB_DEFERRED (block) = 1;
changed = true;
- worklist.release ();
goto maybe_dump_sets;
}
@@ -2442,10 +2441,10 @@ compute_antic (void)
/* If any predecessor edges are abnormal, we punt, so antic_in is empty.
We pre-build the map of blocks with incoming abnormal edges here. */
- has_abnormal_preds = sbitmap_alloc (last_basic_block);
+ has_abnormal_preds = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (has_abnormal_preds);
- FOR_ALL_BB (block)
+ FOR_ALL_BB_FN (block, cfun)
{
edge_iterator ei;
edge e;
@@ -2471,7 +2470,7 @@ compute_antic (void)
/* At the exit block we anticipate nothing. */
BB_VISITED (EXIT_BLOCK_PTR_FOR_FN (cfun)) = 1;
- changed_blocks = sbitmap_alloc (last_basic_block + 1);
+ changed_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun) + 1);
bitmap_ones (changed_blocks);
while (changed)
{
@@ -2487,7 +2486,7 @@ compute_antic (void)
{
if (bitmap_bit_p (changed_blocks, postorder[i]))
{
- basic_block block = BASIC_BLOCK (postorder[i]);
+ basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
changed |= compute_antic_aux (block,
bitmap_bit_p (has_abnormal_preds,
block->index));
@@ -2516,7 +2515,7 @@ compute_antic (void)
{
if (bitmap_bit_p (changed_blocks, postorder[i]))
{
- basic_block block = BASIC_BLOCK (postorder[i]);
+ basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
changed
|= compute_partial_antic_aux (block,
bitmap_bit_p (has_abnormal_preds,
@@ -3660,7 +3659,7 @@ insert (void)
basic_block bb;
int num_iterations = 0;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
NEW_SETS (bb) = bitmap_set_new ();
while (new_stuff)
@@ -3673,7 +3672,7 @@ insert (void)
/* Clear the NEW sets before the next iteration. We have already
fully propagated its contents. */
if (new_stuff)
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bitmap_set_free (NEW_SETS (bb));
}
statistics_histogram_event (cfun, "insert iterations", num_iterations);
@@ -4672,7 +4671,7 @@ init_pre (void)
sizeof (struct bitmap_set), 30);
pre_expr_pool = create_alloc_pool ("pre_expr nodes",
sizeof (struct pre_expr_d), 30);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
EXP_GEN (bb) = bitmap_set_new ();
PHI_GEN (bb) = bitmap_set_new ();
diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c
index 783b6513e30..fc8041fd1dd 100644
--- a/gcc/tree-ssa-propagate.c
+++ b/gcc/tree-ssa-propagate.c
@@ -495,10 +495,10 @@ ssa_prop_init (void)
vec_alloc (interesting_ssa_edges, 20);
vec_alloc (varying_ssa_edges, 20);
- executable_blocks = sbitmap_alloc (last_basic_block);
+ executable_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (executable_blocks);
- bb_in_list = sbitmap_alloc (last_basic_block);
+ bb_in_list = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (bb_in_list);
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -509,7 +509,7 @@ ssa_prop_init (void)
/* Initially assume that every edge in the CFG is not executable.
(including the edges coming out of the entry block). */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
@@ -1097,7 +1097,7 @@ substitute_and_fold (ssa_prop_get_value_fn get_value_fn,
}
/* Propagate into all uses and fold. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 3e7839fd4cc..48be01f881b 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -2028,7 +2028,8 @@ update_range_test (struct range_entry *range, struct range_entry *otherrange,
{
operand_entry_t oe = (*ops)[range->idx];
tree op = oe->op;
- gimple stmt = op ? SSA_NAME_DEF_STMT (op) : last_stmt (BASIC_BLOCK (oe->id));
+ gimple stmt = op ? SSA_NAME_DEF_STMT (op) :
+ last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id));
location_t loc = gimple_location (stmt);
tree optype = op ? TREE_TYPE (op) : boolean_type_node;
tree tem = build_range_check (loc, optype, exp, in_p, low, high);
@@ -2072,9 +2073,19 @@ update_range_test (struct range_entry *range, struct range_entry *otherrange,
tem = fold_convert_loc (loc, optype, tem);
gsi = gsi_for_stmt (stmt);
- tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, true,
- GSI_SAME_STMT);
- for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
+ /* In rare cases range->exp can be equal to lhs of stmt.
+ In that case we have to insert after the stmt rather then before
+ it. */
+ if (op == range->exp)
+ tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, false,
+ GSI_CONTINUE_LINKING);
+ else
+ {
+ tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, true,
+ GSI_SAME_STMT);
+ gsi_prev (&gsi);
+ }
+ for (; !gsi_end_p (gsi); gsi_prev (&gsi))
if (gimple_uid (gsi_stmt (gsi)))
break;
else
@@ -2281,7 +2292,8 @@ optimize_range_tests (enum tree_code opcode,
oe = (*ops)[i];
ranges[i].idx = i;
init_range_entry (ranges + i, oe->op,
- oe->op ? NULL : last_stmt (BASIC_BLOCK (oe->id)));
+ oe->op ? NULL :
+ last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)));
/* For | invert it now, we will invert it again before emitting
the optimized expression. */
if (opcode == BIT_IOR_EXPR
@@ -4561,7 +4573,7 @@ init_reassoc (void)
/* Reverse RPO (Reverse Post Order) will give us something where
deeper loops come later. */
pre_and_rev_post_order_compute (NULL, bbs, false);
- bb_rank = XCNEWVEC (long, last_basic_block);
+ bb_rank = XCNEWVEC (long, last_basic_block_for_fn (cfun));
operand_rank = pointer_map_create ();
/* Give each default definition a distinct rank. This includes
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 454d355ac28..081a5dbbd93 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -3982,7 +3982,7 @@ init_scc_vn (void)
shared_lookup_phiargs.create (0);
shared_lookup_references.create (0);
- rpo_numbers = XNEWVEC (int, last_basic_block);
+ rpo_numbers = XNEWVEC (int, last_basic_block_for_fn (cfun));
rpo_numbers_temp =
XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
pre_and_rev_post_order_compute (NULL, rpo_numbers_temp, false);
diff --git a/gcc/tree-ssa-sink.c b/gcc/tree-ssa-sink.c
index 947a58a13d1..ecc1f6b91df 100644
--- a/gcc/tree-ssa-sink.c
+++ b/gcc/tree-ssa-sink.c
@@ -182,10 +182,10 @@ nearest_common_dominator_of_uses (gimple stmt, bool *debug_stmts)
bitmap_set_bit (blocks, useblock->index);
}
}
- commondom = BASIC_BLOCK (bitmap_first_set_bit (blocks));
+ commondom = BASIC_BLOCK_FOR_FN (cfun, bitmap_first_set_bit (blocks));
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi)
commondom = nearest_common_dominator (CDI_DOMINATORS, commondom,
- BASIC_BLOCK (j));
+ BASIC_BLOCK_FOR_FN (cfun, j));
BITMAP_FREE (blocks);
return commondom;
}
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index b08a20e2694..bfe76d6cb0c 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -892,14 +892,16 @@ constraint_vec_find (vec<constraint_t> vec,
return found;
}
-/* Union two constraint vectors, TO and FROM. Put the result in TO. */
+/* Union two constraint vectors, TO and FROM. Put the result in TO.
+ Returns true of TO set is changed. */
-static void
+static bool
constraint_set_union (vec<constraint_t> *to,
vec<constraint_t> *from)
{
int i;
constraint_t c;
+ bool any_change = false;
FOR_EACH_VEC_ELT (*from, i, c)
{
@@ -907,18 +909,25 @@ constraint_set_union (vec<constraint_t> *to,
{
unsigned int place = to->lower_bound (c, constraint_less);
to->safe_insert (place, c);
+ any_change = true;
}
}
+ return any_change;
}
/* Expands the solution in SET to all sub-fields of variables included. */
-static void
-solution_set_expand (bitmap set)
+static bitmap
+solution_set_expand (bitmap set, bitmap *expanded)
{
bitmap_iterator bi;
unsigned j;
+ if (*expanded)
+ return *expanded;
+
+ *expanded = BITMAP_ALLOC (&iteration_obstack);
+
/* In a first pass expand to the head of the variables we need to
add all sub-fields off. This avoids quadratic behavior. */
EXECUTE_IF_SET_IN_BITMAP (set, 0, j, bi)
@@ -927,55 +936,52 @@ solution_set_expand (bitmap set)
if (v->is_artificial_var
|| v->is_full_var)
continue;
- bitmap_set_bit (set, v->head);
+ bitmap_set_bit (*expanded, v->head);
}
/* In the second pass now expand all head variables with subfields. */
- EXECUTE_IF_SET_IN_BITMAP (set, 0, j, bi)
+ EXECUTE_IF_SET_IN_BITMAP (*expanded, 0, j, bi)
{
varinfo_t v = get_varinfo (j);
- if (v->is_artificial_var
- || v->is_full_var
- || v->head != j)
+ if (v->head != j)
continue;
for (v = vi_next (v); v != NULL; v = vi_next (v))
- bitmap_set_bit (set, v->id);
+ bitmap_set_bit (*expanded, v->id);
}
+
+ /* And finally set the rest of the bits from SET. */
+ bitmap_ior_into (*expanded, set);
+
+ return *expanded;
}
-/* Union solution sets TO and FROM, and add INC to each member of FROM in the
+/* Union solution sets TO and DELTA, and add INC to each member of DELTA in the
process. */
static bool
-set_union_with_increment (bitmap to, bitmap from, HOST_WIDE_INT inc)
+set_union_with_increment (bitmap to, bitmap delta, HOST_WIDE_INT inc,
+ bitmap *expanded_delta)
{
bool changed = false;
bitmap_iterator bi;
unsigned int i;
- /* If the solution of FROM contains anything it is good enough to transfer
+ /* If the solution of DELTA contains anything it is good enough to transfer
this to TO. */
- if (bitmap_bit_p (from, anything_id))
+ if (bitmap_bit_p (delta, anything_id))
return bitmap_set_bit (to, anything_id);
- /* For zero offset simply union the solution into the destination. */
- if (inc == 0)
- return bitmap_ior_into (to, from);
-
/* If the offset is unknown we have to expand the solution to
all subfields. */
if (inc == UNKNOWN_OFFSET)
{
- bitmap tmp = BITMAP_ALLOC (&iteration_obstack);
- bitmap_copy (tmp, from);
- solution_set_expand (tmp);
- changed |= bitmap_ior_into (to, tmp);
- BITMAP_FREE (tmp);
+ delta = solution_set_expand (delta, expanded_delta);
+ changed |= bitmap_ior_into (to, delta);
return changed;
}
/* For non-zero offset union the offsetted solution into the destination. */
- EXECUTE_IF_SET_IN_BITMAP (from, 0, i, bi)
+ EXECUTE_IF_SET_IN_BITMAP (delta, 0, i, bi)
{
varinfo_t vi = get_varinfo (i);
@@ -1028,22 +1034,24 @@ insert_into_complex (constraint_graph_t graph,
/* Condense two variable nodes into a single variable node, by moving
- all associated info from SRC to TO. */
+ all associated info from FROM to TO. Returns true if TO node's
+ constraint set changes after the merge. */
-static void
+static bool
merge_node_constraints (constraint_graph_t graph, unsigned int to,
unsigned int from)
{
unsigned int i;
constraint_t c;
+ bool any_change = false;
gcc_checking_assert (find (from) == to);
/* Move all complex constraints from src node into to node */
FOR_EACH_VEC_ELT (graph->complex[from], i, c)
{
- /* In complex constraints for node src, we may have either
- a = *src, and *src = a, or an offseted constraint which are
+ /* In complex constraints for node FROM, we may have either
+ a = *FROM, and *FROM = a, or an offseted constraint which are
always added to the rhs node's constraints. */
if (c->rhs.type == DEREF)
@@ -1052,9 +1060,12 @@ merge_node_constraints (constraint_graph_t graph, unsigned int to,
c->lhs.var = to;
else
c->rhs.var = to;
+
}
- constraint_set_union (&graph->complex[to], &graph->complex[from]);
+ any_change = constraint_set_union (&graph->complex[to],
+ &graph->complex[from]);
graph->complex[from].release ();
+ return any_change;
}
@@ -1472,7 +1483,11 @@ unify_nodes (constraint_graph_t graph, unsigned int to, unsigned int from,
stats.unified_vars_static++;
merge_graph_nodes (graph, to, from);
- merge_node_constraints (graph, to, from);
+ if (merge_node_constraints (graph, to, from))
+ {
+ if (update_changed)
+ bitmap_set_bit (changed, to);
+ }
/* Mark TO as changed if FROM was changed. If TO was already marked
as changed, decrease the changed count. */
@@ -1567,7 +1582,7 @@ topo_visit (constraint_graph_t graph, struct topo_info *ti,
static void
do_sd_constraint (constraint_graph_t graph, constraint_t c,
- bitmap delta)
+ bitmap delta, bitmap *expanded_delta)
{
unsigned int lhs = c->lhs.var;
bool flag = false;
@@ -1592,7 +1607,7 @@ do_sd_constraint (constraint_graph_t graph, constraint_t c,
dereferenced at all valid offsets. */
if (roffset == UNKNOWN_OFFSET)
{
- solution_set_expand (delta);
+ delta = solution_set_expand (delta, expanded_delta);
/* No further offset processing is necessary. */
roffset = 0;
}
@@ -1654,7 +1669,7 @@ done:
as the starting solution for x. */
static void
-do_ds_constraint (constraint_t c, bitmap delta)
+do_ds_constraint (constraint_t c, bitmap delta, bitmap *expanded_delta)
{
unsigned int rhs = c->rhs.var;
bitmap sol = get_varinfo (rhs)->solution;
@@ -1690,7 +1705,7 @@ do_ds_constraint (constraint_t c, bitmap delta)
dereferenced at all valid offsets. */
if (loff == UNKNOWN_OFFSET)
{
- solution_set_expand (delta);
+ delta = solution_set_expand (delta, expanded_delta);
loff = 0;
}
@@ -1752,7 +1767,8 @@ do_ds_constraint (constraint_t c, bitmap delta)
constraint (IE *x = &y, x = *y, *x = y, and x = y with offsets involved). */
static void
-do_complex_constraint (constraint_graph_t graph, constraint_t c, bitmap delta)
+do_complex_constraint (constraint_graph_t graph, constraint_t c, bitmap delta,
+ bitmap *expanded_delta)
{
if (c->lhs.type == DEREF)
{
@@ -1763,26 +1779,26 @@ do_complex_constraint (constraint_graph_t graph, constraint_t c, bitmap delta)
else
{
/* *x = y */
- do_ds_constraint (c, delta);
+ do_ds_constraint (c, delta, expanded_delta);
}
}
else if (c->rhs.type == DEREF)
{
/* x = *y */
if (!(get_varinfo (c->lhs.var)->is_special_var))
- do_sd_constraint (graph, c, delta);
+ do_sd_constraint (graph, c, delta, expanded_delta);
}
else
{
bitmap tmp;
- bitmap solution;
bool flag = false;
- gcc_checking_assert (c->rhs.type == SCALAR && c->lhs.type == SCALAR);
- solution = get_varinfo (c->rhs.var)->solution;
+ gcc_checking_assert (c->rhs.type == SCALAR && c->lhs.type == SCALAR
+ && c->rhs.offset != 0 && c->lhs.offset == 0);
tmp = get_varinfo (c->lhs.var)->solution;
- flag = set_union_with_increment (tmp, solution, c->rhs.offset);
+ flag = set_union_with_increment (tmp, delta, c->rhs.offset,
+ expanded_delta);
if (flag)
bitmap_set_bit (changed, c->lhs.var);
@@ -2701,6 +2717,7 @@ solve_graph (constraint_graph_t graph)
solution_empty = bitmap_empty_p (solution);
/* Process the complex constraints */
+ bitmap expanded_pts = NULL;
FOR_EACH_VEC_ELT (complex, j, c)
{
/* XXX: This is going to unsort the constraints in
@@ -2715,8 +2732,9 @@ solve_graph (constraint_graph_t graph)
is a constraint where the lhs side is receiving
some set from elsewhere. */
if (!solution_empty || c->lhs.type != DEREF)
- do_complex_constraint (graph, c, pts);
+ do_complex_constraint (graph, c, pts, &expanded_pts);
}
+ BITMAP_FREE (expanded_pts);
solution_empty = bitmap_empty_p (solution);
@@ -2900,7 +2918,7 @@ get_constraint_for_ssa_var (tree t, vec<ce_s> *results, bool address_p)
if (TREE_CODE (t) == VAR_DECL
&& (TREE_STATIC (t) || DECL_EXTERNAL (t)))
{
- struct varpool_node *node = varpool_get_node (t);
+ varpool_node *node = varpool_get_node (t);
if (node && node->alias && node->analyzed)
{
node = varpool_variable_node (node, NULL);
@@ -3698,15 +3716,6 @@ make_transitive_closure_constraints (varinfo_t vi)
lhs.offset = 0;
rhs.type = DEREF;
rhs.var = vi->id;
- rhs.offset = 0;
- process_constraint (new_constraint (lhs, rhs));
-
- /* VAR = VAR + UNKNOWN; */
- lhs.type = SCALAR;
- lhs.var = vi->id;
- lhs.offset = 0;
- rhs.type = SCALAR;
- rhs.var = vi->id;
rhs.offset = UNKNOWN_OFFSET;
process_constraint (new_constraint (lhs, rhs));
}
@@ -5732,7 +5741,7 @@ create_variable_info_for (tree decl, const char *name)
for it. */
else
{
- struct varpool_node *vnode = varpool_get_node (decl);
+ varpool_node *vnode = varpool_get_node (decl);
/* For escaped variables initialize them from nonlocal. */
if (!varpool_all_refs_explicit_p (vnode))
@@ -6764,7 +6773,7 @@ compute_points_to_sets (void)
intra_create_variable_infos ();
/* Now walk all statements and build the constraint set. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -6811,7 +6820,7 @@ compute_points_to_sets (void)
}
/* Compute the call-used/clobbered sets. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -7065,7 +7074,7 @@ static unsigned int
ipa_pta_execute (void)
{
struct cgraph_node *node;
- struct varpool_node *var;
+ varpool_node *var;
int from;
in_ipa_mode = 1;
diff --git a/gcc/tree-ssa-tail-merge.c b/gcc/tree-ssa-tail-merge.c
index d722a9bedfb..4e05246762d 100644
--- a/gcc/tree-ssa-tail-merge.c
+++ b/gcc/tree-ssa-tail-merge.c
@@ -454,7 +454,7 @@ same_succ_hash (const_same_succ e)
int flags;
unsigned int i;
unsigned int first = bitmap_first_set_bit (e->bbs);
- basic_block bb = BASIC_BLOCK (first);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, first);
int size = 0;
gimple_stmt_iterator gsi;
gimple stmt;
@@ -502,8 +502,8 @@ same_succ_hash (const_same_succ e)
EXECUTE_IF_SET_IN_BITMAP (e->succs, 0, s, bs)
{
- int n = find_edge (bb, BASIC_BLOCK (s))->dest_idx;
- for (gsi = gsi_start_phis (BASIC_BLOCK (s)); !gsi_end_p (gsi);
+ int n = find_edge (bb, BASIC_BLOCK_FOR_FN (cfun, s))->dest_idx;
+ for (gsi = gsi_start_phis (BASIC_BLOCK_FOR_FN (cfun, s)); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
@@ -572,8 +572,8 @@ same_succ_def::equal (const value_type *e1, const compare_type *e2)
first1 = bitmap_first_set_bit (e1->bbs);
first2 = bitmap_first_set_bit (e2->bbs);
- bb1 = BASIC_BLOCK (first1);
- bb2 = BASIC_BLOCK (first2);
+ bb1 = BASIC_BLOCK_FOR_FN (cfun, first1);
+ bb2 = BASIC_BLOCK_FOR_FN (cfun, first2);
if (BB_SIZE (bb1) != BB_SIZE (bb2))
return 0;
@@ -754,7 +754,7 @@ find_same_succ (void)
same_succ same = same_succ_alloc ();
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
find_same_succ_bb (bb, &same);
if (same == NULL)
@@ -771,7 +771,7 @@ init_worklist (void)
{
alloc_aux_for_blocks (sizeof (struct aux_bb_info));
same_succ_htab.create (n_basic_blocks_for_fn (cfun));
- same_succ_edge_flags = XCNEWVEC (int, last_basic_block);
+ same_succ_edge_flags = XCNEWVEC (int, last_basic_block_for_fn (cfun));
deleted_bbs = BITMAP_ALLOC (NULL);
deleted_bb_preds = BITMAP_ALLOC (NULL);
worklist.create (n_basic_blocks_for_fn (cfun));
@@ -834,7 +834,7 @@ same_succ_flush_bbs (bitmap bbs)
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (bbs, 0, i, bi)
- same_succ_flush_bb (BASIC_BLOCK (i));
+ same_succ_flush_bb (BASIC_BLOCK_FOR_FN (cfun, i));
}
/* Release the last vdef in BB, either normal or phi result. */
@@ -887,7 +887,7 @@ update_worklist (void)
same = same_succ_alloc ();
EXECUTE_IF_SET_IN_BITMAP (deleted_bb_preds, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
gcc_assert (bb != NULL);
find_same_succ_bb (bb, &same);
if (same == NULL)
@@ -1015,7 +1015,7 @@ reset_cluster_vectors (void)
for (i = 0; i < all_clusters.length (); ++i)
delete_cluster (all_clusters[i]);
all_clusters.truncate (0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
BB_CLUSTER (bb) = NULL;
}
@@ -1075,7 +1075,7 @@ set_cluster (basic_block bb1, basic_block bb2)
merge = BB_CLUSTER (bb1);
merge_clusters (merge, old);
EXECUTE_IF_SET_IN_BITMAP (old->bbs, 0, i, bi)
- BB_CLUSTER (BASIC_BLOCK (i)) = merge;
+ BB_CLUSTER (BASIC_BLOCK_FOR_FN (cfun, i)) = merge;
all_clusters[old->index] = NULL;
update_rep_bb (merge, old->rep_bb);
delete_cluster (old);
@@ -1320,7 +1320,7 @@ same_phi_alternatives (same_succ same_succ, basic_block bb1, basic_block bb2)
EXECUTE_IF_SET_IN_BITMAP (same_succ->succs, 0, s, bs)
{
- succ = BASIC_BLOCK (s);
+ succ = BASIC_BLOCK_FOR_FN (cfun, s);
e1 = find_edge (bb1, succ);
e2 = find_edge (bb2, succ);
if (e1->flags & EDGE_COMPLEX
@@ -1406,7 +1406,7 @@ find_clusters_1 (same_succ same_succ)
EXECUTE_IF_SET_IN_BITMAP (same_succ->bbs, 0, i, bi)
{
- bb1 = BASIC_BLOCK (i);
+ bb1 = BASIC_BLOCK_FOR_FN (cfun, i);
/* TODO: handle blocks with phi-nodes. We'll have to find corresponding
phi-nodes in bb1 and bb2, with the same alternatives for the same
@@ -1417,7 +1417,7 @@ find_clusters_1 (same_succ same_succ)
nr_comparisons = 0;
EXECUTE_IF_SET_IN_BITMAP (same_succ->bbs, i + 1, j, bj)
{
- bb2 = BASIC_BLOCK (j);
+ bb2 = BASIC_BLOCK_FOR_FN (cfun, j);
if (bb_has_non_vop_phi (bb2))
continue;
@@ -1573,7 +1573,7 @@ apply_clusters (void)
bitmap_clear_bit (c->bbs, bb2->index);
EXECUTE_IF_SET_IN_BITMAP (c->bbs, 0, j, bj)
{
- bb1 = BASIC_BLOCK (j);
+ bb1 = BASIC_BLOCK_FOR_FN (cfun, j);
bitmap_clear_bit (update_bbs, bb1->index);
replace_block_by (bb1, bb2);
@@ -1633,7 +1633,7 @@ update_debug_stmts (void)
gimple stmt;
gimple_stmt_iterator gsi;
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c
index fa6a248c5ac..22ae47b766b 100644
--- a/gcc/tree-ssa-ter.c
+++ b/gcc/tree-ssa-ter.c
@@ -683,7 +683,7 @@ find_replaceable_exprs (var_map map)
bitmap_obstack_initialize (&ter_bitmap_obstack);
table = new_temp_expr_table (map);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
find_replaceable_in_bb (table, bb);
gcc_checking_assert (bitmap_empty_p (table->partition_in_use));
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index ad727a1afec..af8fd850835 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -1412,7 +1412,7 @@ mark_threaded_blocks (bitmap threaded_blocks)
{
EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (EDGE_COUNT (bb->preds) > 1
&& !redirection_block_p (bb))
{
@@ -1442,51 +1442,39 @@ mark_threaded_blocks (bitmap threaded_blocks)
by trimming off the end of the jump thread path. */
EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->aux)
{
vec<jump_thread_edge *> *path = THREAD_PATH (e);
- /* Basically we're looking for a situation where we can see
- 3 or more loop structures on a jump threading path. */
-
- struct loop *first_father = (*path)[0]->e->src->loop_father;
- struct loop *second_father = NULL;
- for (unsigned int i = 0; i < path->length (); i++)
+ for (unsigned int i = 0, crossed_headers = 0;
+ i < path->length ();
+ i++)
{
- /* See if this is a loop father we have not seen before. */
- if ((*path)[i]->e->dest->loop_father != first_father
- && (*path)[i]->e->dest->loop_father != second_father)
+ basic_block dest = (*path)[i]->e->dest;
+ crossed_headers += (dest == dest->loop_father->header);
+ if (crossed_headers > 1)
{
- /* We've already seen two loop fathers, so we
- need to trim this jump threading path. */
- if (second_father != NULL)
- {
- /* Trim from entry I onwards. */
- for (unsigned int j = i; j < path->length (); j++)
- delete (*path)[j];
- path->truncate (i);
-
- /* Now that we've truncated the path, make sure
- what's left is still valid. We need at least
- two edges on the path and the last edge can not
- be a joiner. This should never happen, but let's
- be safe. */
- if (path->length () < 2
- || (path->last ()->type
- == EDGE_COPY_SRC_JOINER_BLOCK))
- {
- delete_jump_thread_path (path);
- e->aux = NULL;
- }
- break;
- }
- else
+ /* Trim from entry I onwards. */
+ for (unsigned int j = i; j < path->length (); j++)
+ delete (*path)[j];
+ path->truncate (i);
+
+ /* Now that we've truncated the path, make sure
+ what's left is still valid. We need at least
+ two edges on the path and the last edge can not
+ be a joiner. This should never happen, but let's
+ be safe. */
+ if (path->length () < 2
+ || (path->last ()->type
+ == EDGE_COPY_SRC_JOINER_BLOCK))
{
- second_father = (*path)[i]->e->dest->loop_father;
+ delete_jump_thread_path (path);
+ e->aux = NULL;
}
+ break;
}
}
}
@@ -1512,7 +1500,7 @@ mark_threaded_blocks (bitmap threaded_blocks)
we have to iterate on those rather than the threaded_edges vector. */
EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->aux)
@@ -1592,7 +1580,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
loop structure. */
EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (EDGE_COUNT (bb->preds) > 0)
retval |= thread_block (bb, true);
@@ -1631,7 +1619,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
ahead and thread it, else ignore it. */
basic_block bb;
edge e;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* If we do end up threading here, we can remove elements from
BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
diff --git a/gcc/tree-ssa-uncprop.c b/gcc/tree-ssa-uncprop.c
index 44194b83ae6..63a2e10472c 100644
--- a/gcc/tree-ssa-uncprop.c
+++ b/gcc/tree-ssa-uncprop.c
@@ -65,7 +65,7 @@ associate_equivalences_with_edges (void)
/* Walk over each block. If the block ends with a control statement,
then it might create a useful equivalence. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple stmt;
@@ -179,7 +179,7 @@ associate_equivalences_with_edges (void)
&& !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (cond))
{
int i, n_labels = gimple_switch_num_labels (stmt);
- tree *info = XCNEWVEC (tree, last_basic_block);
+ tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
/* Walk over the case label vector. Record blocks
which are reached by a single case label which represents
@@ -214,7 +214,8 @@ associate_equivalences_with_edges (void)
equivalency = XNEW (struct edge_equivalency);
equivalency->rhs = x;
equivalency->lhs = cond;
- find_edge (bb, BASIC_BLOCK (i))->aux = equivalency;
+ find_edge (bb, BASIC_BLOCK_FOR_FN (cfun, i))->aux =
+ equivalency;
}
}
free (info);
@@ -405,7 +406,7 @@ tree_ssa_uncprop (void)
/* we just need to empty elements out of the hash table, and cleanup the
AUX field on the edges. */
val_ssa_equiv.dispose ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index e456e26ae61..93c95588719 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -176,7 +176,7 @@ warn_uninitialized_vars (bool warn_possibly_uninitialized)
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
@@ -2129,7 +2129,7 @@ execute_late_warn_uninitialized (void)
added_to_worklist = pointer_set_create ();
/* Initialize worklist */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index 9b31a712935..4a12e614538 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -999,7 +999,7 @@ verify_ssa (bool check_modified_stmt)
/* Now verify all the uses and make sure they agree with the definitions
found in the previous pass. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
gimple phi;
@@ -1456,7 +1456,7 @@ execute_update_addresses_taken (void)
/* Collect into ADDRESSES_TAKEN all variables whose address is taken within
the function body. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -1558,7 +1558,7 @@ execute_update_addresses_taken (void)
variables and operands need to be rewritten to expose bare symbols. */
if (!bitmap_empty_p (suitable_for_renaming))
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
gimple stmt = gsi_stmt (gsi);
diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c
index 5a22cfd6d96..dc82340c99e 100644
--- a/gcc/tree-stdarg.c
+++ b/gcc/tree-stdarg.c
@@ -72,7 +72,7 @@ reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb)
if (! dominated_by_p (CDI_DOMINATORS, va_arg_bb, va_start_bb))
return false;
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
ret = true;
@@ -536,7 +536,7 @@ check_all_va_list_escapes (struct stdarg_info *si)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
@@ -703,7 +703,7 @@ execute_optimize_stdarg (void)
|| TREE_TYPE (cfun_va_list) == char_type_node);
gcc_assert (is_gimple_reg_type (cfun_va_list) == va_list_simple_ptr);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
@@ -813,7 +813,7 @@ execute_optimize_stdarg (void)
memset (&wi, 0, sizeof (wi));
wi.info = si.va_list_vars;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 93a3bff6595..07d490b2317 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -1426,7 +1426,7 @@ do_switchconv (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
const char *failure_reason;
gimple stmt = last_stmt (bb);
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index f7f95b3492c..ccdd7e47b33 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -497,31 +497,17 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
/* Unknown data dependence. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
- gimple earlier_stmt;
-
- if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't determine dependence between ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
+ if (dump_enabled_p ())
+ {
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't determine dependence between ");
+ dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
+ dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
+ dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
-
- /* We do not vectorize basic blocks with write-write dependencies. */
- if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
- return true;
-
- /* Check that it's not a load-after-store dependence. */
- earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
- if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
- return true;
-
- return false;
+ }
}
-
- if (dump_enabled_p ())
+ else if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"determined dependence between ");
@@ -531,49 +517,23 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
dump_printf (MSG_NOTE, "\n");
}
- /* Do not vectorize basic blocks with write-write dependences. */
+ /* We do not vectorize basic blocks with write-write dependencies. */
if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
return true;
- /* Check dependence between DRA and DRB for basic block vectorization.
- If the accesses share same bases and offsets, we can compare their initial
- constant offsets to decide whether they differ or not. In case of a read-
- write dependence we check that the load is before the store to ensure that
- vectorization will not change the order of the accesses. */
-
- HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
- gimple earlier_stmt;
-
- /* Check that the data-refs have same bases and offsets. If not, we can't
- determine if they are dependent. */
- if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
- || !dr_equal_offsets_p (dra, drb))
- return true;
-
- /* Check the types. */
- type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
- type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
-
- if (type_size_a != type_size_b
- || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
- TREE_TYPE (DR_REF (drb))))
- return true;
-
- init_a = TREE_INT_CST_LOW (DR_INIT (dra));
- init_b = TREE_INT_CST_LOW (DR_INIT (drb));
-
- /* Two different locations - no dependence. */
- if (init_a != init_b)
- return false;
-
- /* We have a read-write dependence. Check that the load is before the store.
+ /* If we have a read-write dependence check that the load is before the store.
When we vectorize basic blocks, vector load can be only before
corresponding scalar load, and vector store can be only after its
corresponding scalar store. So the order of the acceses is preserved in
case the load is before the store. */
- earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
+ gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
- return false;
+ {
+ /* That only holds for load-store pairs taking part in vectorization. */
+ if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
+ && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
+ return false;
+ }
return true;
}
@@ -2957,6 +2917,24 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
enum machine_mode pmode;
int punsignedp, pvolatilep;
+ base = DR_REF (dr);
+ /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
+ see if we can use the def stmt of the address. */
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt)
+ && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+ || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ && TREE_CODE (base) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
+ && integer_zerop (TREE_OPERAND (base, 1))
+ && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
+ if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
+ base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
+ }
+
/* The gather builtins need address of the form
loop_invariant + vector * {1, 2, 4, 8}
or
@@ -2969,7 +2947,7 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
vectorized. The following code attempts to find such a preexistng
SSA_NAME OFF and put the loop invariants into a tree BASE
that can be gimplified before the loop. */
- base = get_inner_reference (DR_REF (dr), &pbitsize, &pbitpos, &off,
+ base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
&pmode, &punsignedp, &pvolatilep, false);
gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
@@ -3466,7 +3444,10 @@ again:
offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
- if (is_gimple_call (stmt))
+ if (is_gimple_call (stmt)
+ && (!gimple_call_internal_p (stmt)
+ || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
+ && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
{
if (dump_enabled_p ())
{
@@ -5117,6 +5098,14 @@ vect_supportable_dr_alignment (struct data_reference *dr,
if (aligned_access_p (dr) && !check_aligned_accesses)
return dr_aligned;
+ /* For now assume all conditional loads/stores support unaligned
+ access without any special code. */
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt)
+ && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+ || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
+ return dr_unaligned_supported;
+
if (loop_vinfo)
{
vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index 8bb04cf9f11..f289fdcf71b 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -1537,7 +1537,7 @@ expand_vector_operations (void)
basic_block bb;
bool cfg_changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index 63e7025fc6c..7fdb08205a8 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -703,12 +703,42 @@ slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
loop->nb_iterations = niters;
}
+/* Helper routine of slpeel_tree_duplicate_loop_to_edge_cfg.
+ For all PHI arguments in FROM->dest and TO->dest from those
+ edges ensure that TO->dest PHI arguments have current_def
+ to that in from. */
+
+static void
+slpeel_duplicate_current_defs_from_edges (edge from, edge to)
+{
+ gimple_stmt_iterator gsi_from, gsi_to;
+
+ for (gsi_from = gsi_start_phis (from->dest),
+ gsi_to = gsi_start_phis (to->dest);
+ !gsi_end_p (gsi_from) && !gsi_end_p (gsi_to);
+ gsi_next (&gsi_from), gsi_next (&gsi_to))
+ {
+ gimple from_phi = gsi_stmt (gsi_from);
+ gimple to_phi = gsi_stmt (gsi_to);
+ tree from_arg = PHI_ARG_DEF_FROM_EDGE (from_phi, from);
+ tree to_arg = PHI_ARG_DEF_FROM_EDGE (to_phi, to);
+ if (TREE_CODE (from_arg) == SSA_NAME
+ && TREE_CODE (to_arg) == SSA_NAME
+ && get_current_def (to_arg) == NULL_TREE)
+ set_current_def (to_arg, get_current_def (from_arg));
+ }
+}
+
/* Given LOOP this function generates a new copy of it and puts it
- on E which is either the entry or exit of LOOP. */
+ on E which is either the entry or exit of LOOP. If SCALAR_LOOP is
+ non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the
+ basic blocks from SCALAR_LOOP instead of LOOP, but to either the
+ entry or exit of LOOP. */
struct loop *
-slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
+slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop,
+ struct loop *scalar_loop, edge e)
{
struct loop *new_loop;
basic_block *new_bbs, *bbs;
@@ -722,19 +752,22 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
if (!at_exit && e != loop_preheader_edge (loop))
return NULL;
- bbs = XNEWVEC (basic_block, loop->num_nodes + 1);
- get_loop_body_with_size (loop, bbs, loop->num_nodes);
+ if (scalar_loop == NULL)
+ scalar_loop = loop;
+
+ bbs = XNEWVEC (basic_block, scalar_loop->num_nodes + 1);
+ get_loop_body_with_size (scalar_loop, bbs, scalar_loop->num_nodes);
/* Check whether duplication is possible. */
- if (!can_copy_bbs_p (bbs, loop->num_nodes))
+ if (!can_copy_bbs_p (bbs, scalar_loop->num_nodes))
{
free (bbs);
return NULL;
}
/* Generate new loop structure. */
- new_loop = duplicate_loop (loop, loop_outer (loop));
- duplicate_subloops (loop, new_loop);
+ new_loop = duplicate_loop (scalar_loop, loop_outer (scalar_loop));
+ duplicate_subloops (scalar_loop, new_loop);
exit_dest = exit->dest;
was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
@@ -744,35 +777,80 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
/* Also copy the pre-header, this avoids jumping through hoops to
duplicate the loop entry PHI arguments. Create an empty
pre-header unconditionally for this. */
- basic_block preheader = split_edge (loop_preheader_edge (loop));
+ basic_block preheader = split_edge (loop_preheader_edge (scalar_loop));
edge entry_e = single_pred_edge (preheader);
- bbs[loop->num_nodes] = preheader;
- new_bbs = XNEWVEC (basic_block, loop->num_nodes + 1);
+ bbs[scalar_loop->num_nodes] = preheader;
+ new_bbs = XNEWVEC (basic_block, scalar_loop->num_nodes + 1);
- copy_bbs (bbs, loop->num_nodes + 1, new_bbs,
+ exit = single_exit (scalar_loop);
+ copy_bbs (bbs, scalar_loop->num_nodes + 1, new_bbs,
&exit, 1, &new_exit, NULL,
e->src, true);
- basic_block new_preheader = new_bbs[loop->num_nodes];
+ exit = single_exit (loop);
+ basic_block new_preheader = new_bbs[scalar_loop->num_nodes];
+
+ add_phi_args_after_copy (new_bbs, scalar_loop->num_nodes + 1, NULL);
- add_phi_args_after_copy (new_bbs, loop->num_nodes + 1, NULL);
+ if (scalar_loop != loop)
+ {
+ /* If we copied from SCALAR_LOOP rather than LOOP, SSA_NAMEs from
+ SCALAR_LOOP will have current_def set to SSA_NAMEs in the new_loop,
+ but LOOP will not. slpeel_update_phi_nodes_for_guard{1,2} expects
+ the LOOP SSA_NAMEs (on the exit edge and edge from latch to
+ header) to have current_def set, so copy them over. */
+ slpeel_duplicate_current_defs_from_edges (single_exit (scalar_loop),
+ exit);
+ slpeel_duplicate_current_defs_from_edges (EDGE_SUCC (scalar_loop->latch,
+ 0),
+ EDGE_SUCC (loop->latch, 0));
+ }
if (at_exit) /* Add the loop copy at exit. */
{
+ if (scalar_loop != loop)
+ {
+ gimple_stmt_iterator gsi;
+ new_exit = redirect_edge_and_branch (new_exit, exit_dest);
+
+ for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gimple phi = gsi_stmt (gsi);
+ tree orig_arg = PHI_ARG_DEF_FROM_EDGE (phi, e);
+ location_t orig_locus
+ = gimple_phi_arg_location_from_edge (phi, e);
+
+ add_phi_arg (phi, orig_arg, new_exit, orig_locus);
+ }
+ }
redirect_edge_and_branch_force (e, new_preheader);
flush_pending_stmts (e);
set_immediate_dominator (CDI_DOMINATORS, new_preheader, e->src);
if (was_imm_dom)
- set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
+ set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_exit->src);
/* And remove the non-necessary forwarder again. Keep the other
one so we have a proper pre-header for the loop at the exit edge. */
- redirect_edge_pred (single_succ_edge (preheader), single_pred (preheader));
+ redirect_edge_pred (single_succ_edge (preheader),
+ single_pred (preheader));
delete_basic_block (preheader);
- set_immediate_dominator (CDI_DOMINATORS, loop->header,
- loop_preheader_edge (loop)->src);
+ set_immediate_dominator (CDI_DOMINATORS, scalar_loop->header,
+ loop_preheader_edge (scalar_loop)->src);
}
else /* Add the copy at entry. */
{
+ if (scalar_loop != loop)
+ {
+ /* Remove the non-necessary forwarder of scalar_loop again. */
+ redirect_edge_pred (single_succ_edge (preheader),
+ single_pred (preheader));
+ delete_basic_block (preheader);
+ set_immediate_dominator (CDI_DOMINATORS, scalar_loop->header,
+ loop_preheader_edge (scalar_loop)->src);
+ preheader = split_edge (loop_preheader_edge (loop));
+ entry_e = single_pred_edge (preheader);
+ }
+
redirect_edge_and_branch_force (entry_e, new_preheader);
flush_pending_stmts (entry_e);
set_immediate_dominator (CDI_DOMINATORS, new_preheader, entry_e->src);
@@ -783,15 +861,39 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
/* And remove the non-necessary forwarder again. Keep the other
one so we have a proper pre-header for the loop at the exit edge. */
- redirect_edge_pred (single_succ_edge (new_preheader), single_pred (new_preheader));
+ redirect_edge_pred (single_succ_edge (new_preheader),
+ single_pred (new_preheader));
delete_basic_block (new_preheader);
set_immediate_dominator (CDI_DOMINATORS, new_loop->header,
loop_preheader_edge (new_loop)->src);
}
- for (unsigned i = 0; i < loop->num_nodes+1; i++)
+ for (unsigned i = 0; i < scalar_loop->num_nodes + 1; i++)
rename_variables_in_bb (new_bbs[i]);
+ if (scalar_loop != loop)
+ {
+ /* Update new_loop->header PHIs, so that on the preheader
+ edge they are the ones from loop rather than scalar_loop. */
+ gimple_stmt_iterator gsi_orig, gsi_new;
+ edge orig_e = loop_preheader_edge (loop);
+ edge new_e = loop_preheader_edge (new_loop);
+
+ for (gsi_orig = gsi_start_phis (loop->header),
+ gsi_new = gsi_start_phis (new_loop->header);
+ !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_new);
+ gsi_next (&gsi_orig), gsi_next (&gsi_new))
+ {
+ gimple orig_phi = gsi_stmt (gsi_orig);
+ gimple new_phi = gsi_stmt (gsi_new);
+ tree orig_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, orig_e);
+ location_t orig_locus
+ = gimple_phi_arg_location_from_edge (orig_phi, orig_e);
+
+ add_phi_arg (new_phi, orig_arg, new_e, orig_locus);
+ }
+ }
+
free (new_bbs);
free (bbs);
@@ -1002,6 +1104,8 @@ set_prologue_iterations (basic_block bb_before_first_loop,
Input:
- LOOP: the loop to be peeled.
+ - SCALAR_LOOP: if non-NULL, the alternate loop from which basic blocks
+ should be copied.
- E: the exit or entry edge of LOOP.
If it is the entry edge, we peel the first iterations of LOOP. In this
case first-loop is LOOP, and second-loop is the newly created loop.
@@ -1043,8 +1147,8 @@ set_prologue_iterations (basic_block bb_before_first_loop,
FORNOW the resulting code will not be in loop-closed-ssa form.
*/
-static struct loop*
-slpeel_tree_peel_loop_to_edge (struct loop *loop,
+static struct loop *
+slpeel_tree_peel_loop_to_edge (struct loop *loop, struct loop *scalar_loop,
edge e, tree *first_niters,
tree niters, bool update_first_loop_count,
unsigned int th, bool check_profitability,
@@ -1061,7 +1165,6 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
gimple_stmt_iterator gsi;
edge exit_e = single_exit (loop);
source_location loop_loc;
- tree cost_pre_condition = NULL_TREE;
/* There are many aspects to how likely the first loop is going to be executed.
Without histogram we can't really do good job. Simply set it to
2/3, so the first loop is not reordered to the end of function and
@@ -1129,7 +1232,8 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
orig_exit_bb:
*/
- if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
+ if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, scalar_loop,
+ e)))
{
loop_loc = find_loop_location (loop);
dump_printf_loc (MSG_MISSED_OPTIMIZATION, loop_loc,
@@ -1263,21 +1367,17 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
/* Epilogue peeling. */
if (!update_first_loop_count)
{
+ loop_vec_info loop_vinfo = loop_vec_info_for_loop (loop);
+ tree scalar_loop_iters = LOOP_VINFO_NITERSM1 (loop_vinfo);
+ unsigned limit = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
+ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
+ limit = limit + 1;
+ if (check_profitability
+ && th > limit)
+ limit = th;
pre_condition =
- fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
- build_int_cst (TREE_TYPE (*first_niters), 0));
- if (check_profitability)
- {
- tree scalar_loop_iters
- = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
- (loop_vec_info_for_loop (loop)));
- cost_pre_condition =
- fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
- build_int_cst (TREE_TYPE (scalar_loop_iters), th));
-
- pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
- cost_pre_condition, pre_condition);
- }
+ fold_build2 (LT_EXPR, boolean_type_node, scalar_loop_iters,
+ build_int_cst (TREE_TYPE (scalar_loop_iters), limit));
if (cond_expr)
{
pre_condition =
@@ -1625,6 +1725,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo,
unsigned int th, bool check_profitability)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
struct loop *new_loop;
edge update_e;
basic_block preheader;
@@ -1641,11 +1742,12 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo,
loop_num = loop->num;
- new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
- &ratio_mult_vf_name, ni_name, false,
- th, check_profitability,
- cond_expr, cond_expr_stmt_list,
- 0, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+ new_loop
+ = slpeel_tree_peel_loop_to_edge (loop, scalar_loop, single_exit (loop),
+ &ratio_mult_vf_name, ni_name, false,
+ th, check_profitability,
+ cond_expr, cond_expr_stmt_list,
+ 0, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
gcc_assert (new_loop);
gcc_assert (loop_num == loop->num);
#ifdef ENABLE_CHECKING
@@ -1878,6 +1980,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, tree ni_name,
unsigned int th, bool check_profitability)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
tree niters_of_prolog_loop;
tree wide_prolog_niters;
struct loop *new_loop;
@@ -1899,11 +2002,11 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, tree ni_name,
/* Peel the prolog loop and iterate it niters_of_prolog_loop. */
new_loop =
- slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
+ slpeel_tree_peel_loop_to_edge (loop, scalar_loop,
+ loop_preheader_edge (loop),
&niters_of_prolog_loop, ni_name, true,
th, check_profitability, NULL_TREE, NULL,
- bound,
- 0);
+ bound, 0);
gcc_assert (new_loop);
#ifdef ENABLE_CHECKING
@@ -1922,6 +2025,9 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, tree ni_name,
/* Update number of times loop executes. */
LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
TREE_TYPE (ni_name), ni_name, niters_of_prolog_loop);
+ LOOP_VINFO_NITERSM1 (loop_vinfo) = fold_build2 (MINUS_EXPR,
+ TREE_TYPE (ni_name),
+ LOOP_VINFO_NITERSM1 (loop_vinfo), niters_of_prolog_loop);
if (types_compatible_p (sizetype, TREE_TYPE (niters_of_prolog_loop)))
wide_prolog_niters = niters_of_prolog_loop;
@@ -2187,6 +2293,7 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
unsigned int th, bool check_profitability)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
basic_block condition_bb;
gimple_stmt_iterator gsi, cond_exp_gsi;
basic_block merge_bb;
@@ -2222,8 +2329,43 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
initialize_original_copy_tables ();
- loop_version (loop, cond_expr, &condition_bb,
- prob, prob, REG_BR_PROB_BASE - prob, true);
+ if (scalar_loop)
+ {
+ edge scalar_e;
+ basic_block preheader, scalar_preheader;
+
+ /* We don't want to scale SCALAR_LOOP's frequencies, we need to
+ scale LOOP's frequencies instead. */
+ loop_version (scalar_loop, cond_expr, &condition_bb,
+ prob, REG_BR_PROB_BASE, REG_BR_PROB_BASE - prob, true);
+ scale_loop_frequencies (loop, prob, REG_BR_PROB_BASE);
+ /* CONDITION_BB was created above SCALAR_LOOP's preheader,
+ while we need to move it above LOOP's preheader. */
+ e = loop_preheader_edge (loop);
+ scalar_e = loop_preheader_edge (scalar_loop);
+ gcc_assert (empty_block_p (e->src)
+ && single_pred_p (e->src));
+ gcc_assert (empty_block_p (scalar_e->src)
+ && single_pred_p (scalar_e->src));
+ gcc_assert (single_pred_p (condition_bb));
+ preheader = e->src;
+ scalar_preheader = scalar_e->src;
+ scalar_e = find_edge (condition_bb, scalar_preheader);
+ e = single_pred_edge (preheader);
+ redirect_edge_and_branch_force (single_pred_edge (condition_bb),
+ scalar_preheader);
+ redirect_edge_and_branch_force (scalar_e, preheader);
+ redirect_edge_and_branch_force (e, condition_bb);
+ set_immediate_dominator (CDI_DOMINATORS, condition_bb,
+ single_pred (condition_bb));
+ set_immediate_dominator (CDI_DOMINATORS, scalar_preheader,
+ single_pred (scalar_preheader));
+ set_immediate_dominator (CDI_DOMINATORS, preheader,
+ condition_bb);
+ }
+ else
+ loop_version (loop, cond_expr, &condition_bb,
+ prob, prob, REG_BR_PROB_BASE - prob, true);
if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
&& dump_enabled_p ())
@@ -2246,24 +2388,29 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
basic block (i.e. it has two predecessors). Just in order to simplify
following transformations in the vectorizer, we fix this situation
here by adding a new (empty) block on the exit-edge of the loop,
- with the proper loop-exit phis to maintain loop-closed-form. */
+ with the proper loop-exit phis to maintain loop-closed-form.
+ If loop versioning wasn't done from loop, but scalar_loop instead,
+ merge_bb will have already just a single successor. */
merge_bb = single_exit (loop)->dest;
- gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
- new_exit_bb = split_edge (single_exit (loop));
- new_exit_e = single_exit (loop);
- e = EDGE_SUCC (new_exit_bb, 0);
-
- for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ if (scalar_loop == NULL || EDGE_COUNT (merge_bb->preds) >= 2)
{
- tree new_res;
- orig_phi = gsi_stmt (gsi);
- new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
- new_phi = create_phi_node (new_res, new_exit_bb);
- arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
- add_phi_arg (new_phi, arg, new_exit_e,
- gimple_phi_arg_location_from_edge (orig_phi, e));
- adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
+ gcc_assert (EDGE_COUNT (merge_bb->preds) >= 2);
+ new_exit_bb = split_edge (single_exit (loop));
+ new_exit_e = single_exit (loop);
+ e = EDGE_SUCC (new_exit_bb, 0);
+
+ for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ tree new_res;
+ orig_phi = gsi_stmt (gsi);
+ new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
+ new_phi = create_phi_node (new_res, new_exit_bb);
+ arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
+ add_phi_arg (new_phi, arg, new_exit_e,
+ gimple_phi_arg_location_from_edge (orig_phi, e));
+ adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
+ }
}
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 1037cac1dca..819392a56ea 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -374,7 +374,11 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
analyze_pattern_stmt = false;
}
- if (gimple_get_lhs (stmt) == NULL_TREE)
+ if (gimple_get_lhs (stmt) == NULL_TREE
+ /* MASK_STORE has no lhs, but is ok. */
+ && (!is_gimple_call (stmt)
+ || !gimple_call_internal_p (stmt)
+ || gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
{
if (is_gimple_call (stmt))
{
@@ -426,7 +430,12 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
else
{
gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
- scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt)
+ && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
+ else
+ scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
@@ -791,12 +800,14 @@ vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
/* Function vect_get_loop_niters.
Determine how many iterations the loop is executed and place it
- in NUMBER_OF_ITERATIONS.
+ in NUMBER_OF_ITERATIONS. Place the number of latch iterations
+ in NUMBER_OF_ITERATIONSM1.
Return the loop exit condition. */
static gimple
-vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
+vect_get_loop_niters (struct loop *loop, tree *number_of_iterations,
+ tree *number_of_iterationsm1)
{
tree niters;
@@ -805,12 +816,14 @@ vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
"=== get_loop_niters ===\n");
niters = number_of_latch_executions (loop);
+ *number_of_iterationsm1 = niters;
+
/* We want the number of loop header executions which is the number
of latch executions plus one.
??? For UINT_MAX latch executions this number overflows to zero
for loops like do { n++; } while (n != 0); */
if (niters && !chrec_contains_undetermined (niters))
- niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), niters,
+ niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), unshare_expr (niters),
build_int_cst (TREE_TYPE (niters), 1));
*number_of_iterations = niters;
@@ -916,6 +929,7 @@ new_loop_vec_info (struct loop *loop)
gcc_assert (nbbs == loop->num_nodes);
LOOP_VINFO_BBS (res) = bbs;
+ LOOP_VINFO_NITERSM1 (res) = NULL;
LOOP_VINFO_NITERS (res) = NULL;
LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
@@ -1071,7 +1085,7 @@ vect_analyze_loop_form (struct loop *loop)
{
loop_vec_info loop_vinfo;
gimple loop_cond;
- tree number_of_iterations = NULL;
+ tree number_of_iterations = NULL, number_of_iterationsm1 = NULL;
loop_vec_info inner_loop_vinfo = NULL;
if (dump_enabled_p ())
@@ -1246,7 +1260,8 @@ vect_analyze_loop_form (struct loop *loop)
}
}
- loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
+ loop_cond = vect_get_loop_niters (loop, &number_of_iterations,
+ &number_of_iterationsm1);
if (!loop_cond)
{
if (dump_enabled_p ())
@@ -1280,6 +1295,7 @@ vect_analyze_loop_form (struct loop *loop)
}
loop_vinfo = new_loop_vec_info (loop);
+ LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
@@ -5637,12 +5653,11 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
tree var;
tree ratio_name;
tree ratio_mult_vf_name;
- tree ni = LOOP_VINFO_NITERS (loop_vinfo);
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
tree log_vf;
- log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
+ log_vf = build_int_cst (TREE_TYPE (ni_name), exact_log2 (vf));
/* If epilogue loop is required because of data accesses with gaps, we
subtract one iteration from the total number of iterations here for
@@ -5654,7 +5669,7 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
build_one_cst (TREE_TYPE (ni_name)));
if (!is_gimple_val (ni_minus_gap_name))
{
- var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
+ var = create_tmp_var (TREE_TYPE (ni_name), "ni_gap");
gimple stmts = NULL;
ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
true, var);
@@ -5665,12 +5680,22 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
ni_minus_gap_name = ni_name;
/* Create: ratio = ni >> log2(vf) */
-
- ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
- ni_minus_gap_name, log_vf);
+ /* ??? As we have ni == number of latch executions + 1, ni could
+ have overflown to zero. So avoid computing ratio based on ni
+ but compute it using the fact that we know ratio will be at least
+ one, thus via (ni - vf) >> log2(vf) + 1. */
+ ratio_name
+ = fold_build2 (PLUS_EXPR, TREE_TYPE (ni_name),
+ fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_name),
+ fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
+ ni_minus_gap_name,
+ build_int_cst
+ (TREE_TYPE (ni_name), vf)),
+ log_vf),
+ build_int_cst (TREE_TYPE (ni_name), 1));
if (!is_gimple_val (ratio_name))
{
- var = create_tmp_var (TREE_TYPE (ni), "bnd");
+ var = create_tmp_var (TREE_TYPE (ni_name), "bnd");
gimple stmts = NULL;
ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
gsi_insert_seq_on_edge_immediate (pe, stmts);
@@ -5685,7 +5710,7 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
ratio_name, log_vf);
if (!is_gimple_val (ratio_mult_vf_name))
{
- var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
+ var = create_tmp_var (TREE_TYPE (ni_name), "ratio_mult_vf");
gimple stmts = NULL;
ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
true, var);
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 5d0ccb6bcff..2e2a56afa44 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -235,7 +235,7 @@ vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
/* This use is out of pattern use, if LHS has other uses that are
pattern uses, we should mark the stmt itself, and not the pattern
stmt. */
- if (TREE_CODE (lhs) == SSA_NAME)
+ if (lhs && TREE_CODE (lhs) == SSA_NAME)
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
{
if (is_gimple_debug (USE_STMT (use_p)))
@@ -393,7 +393,27 @@ exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
first case, and whether var corresponds to USE. */
if (!gimple_assign_copy_p (stmt))
- return false;
+ {
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_MASK_STORE:
+ operand = gimple_call_arg (stmt, 3);
+ if (operand == use)
+ return true;
+ /* FALLTHRU */
+ case IFN_MASK_LOAD:
+ operand = gimple_call_arg (stmt, 2);
+ if (operand == use)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+ }
+
if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
return false;
operand = gimple_assign_rhs1 (stmt);
@@ -1696,6 +1716,413 @@ vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
vectype_in);
}
+
+static tree permute_vec_elements (tree, tree, tree, gimple,
+ gimple_stmt_iterator *);
+
+
+/* Function vectorizable_mask_load_store.
+
+ Check if STMT performs a conditional load or store that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+
+static bool
+vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
+ gimple *vec_stmt, slp_tree slp_node)
+{
+ tree vec_dest = NULL;
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info prev_stmt_info;
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree elem_type;
+ gimple new_stmt;
+ tree dummy;
+ tree dataref_ptr = NULL_TREE;
+ gimple ptr_incr;
+ int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ int ncopies;
+ int i, j;
+ bool inv_p;
+ tree gather_base = NULL_TREE, gather_off = NULL_TREE;
+ tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
+ int gather_scale = 1;
+ enum vect_def_type gather_dt = vect_unknown_def_type;
+ bool is_store;
+ tree mask;
+ gimple def_stmt;
+ tree def;
+ enum vect_def_type dt;
+
+ if (slp_node != NULL)
+ return false;
+
+ ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
+ gcc_assert (ncopies >= 1);
+
+ is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
+ mask = gimple_call_arg (stmt, 2);
+ if (TYPE_PRECISION (TREE_TYPE (mask))
+ != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
+ return false;
+
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop && ncopies > 1)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "multiple types in nested loop.");
+ return false;
+ }
+
+ if (!STMT_VINFO_RELEVANT_P (stmt_info))
+ return false;
+
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ return false;
+
+ if (!STMT_VINFO_DATA_REF (stmt_info))
+ return false;
+
+ elem_type = TREE_TYPE (vectype);
+
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
+ return false;
+
+ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ return false;
+
+ if (STMT_VINFO_GATHER_P (stmt_info))
+ {
+ gimple def_stmt;
+ tree def;
+ gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
+ &gather_off, &gather_scale);
+ gcc_assert (gather_decl);
+ if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
+ &def_stmt, &def, &gather_dt,
+ &gather_off_vectype))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "gather index use not simple.");
+ return false;
+ }
+ }
+ else if (tree_int_cst_compare (nested_in_vect_loop
+ ? STMT_VINFO_DR_STEP (stmt_info)
+ : DR_STEP (dr), size_zero_node) <= 0)
+ return false;
+ else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
+ || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
+ return false;
+
+ if (TREE_CODE (mask) != SSA_NAME)
+ return false;
+
+ if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
+ &def_stmt, &def, &dt))
+ return false;
+
+ if (is_store)
+ {
+ tree rhs = gimple_call_arg (stmt, 3);
+ if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
+ &def_stmt, &def, &dt))
+ return false;
+ }
+
+ if (!vec_stmt) /* transformation not required. */
+ {
+ STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
+ if (is_store)
+ vect_model_store_cost (stmt_info, ncopies, false, dt,
+ NULL, NULL, NULL);
+ else
+ vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
+ return true;
+ }
+
+ /** Transform. **/
+
+ if (STMT_VINFO_GATHER_P (stmt_info))
+ {
+ tree vec_oprnd0 = NULL_TREE, op;
+ tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
+ tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
+ tree ptr, vec_mask = NULL_TREE, mask_op, var, scale;
+ tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
+ edge pe = loop_preheader_edge (loop);
+ gimple_seq seq;
+ basic_block new_bb;
+ enum { NARROW, NONE, WIDEN } modifier;
+ int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
+
+ if (nunits == gather_off_nunits)
+ modifier = NONE;
+ else if (nunits == gather_off_nunits / 2)
+ {
+ unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
+ modifier = WIDEN;
+
+ for (i = 0; i < gather_off_nunits; ++i)
+ sel[i] = i | nunits;
+
+ perm_mask = vect_gen_perm_mask (gather_off_vectype, sel);
+ gcc_assert (perm_mask != NULL_TREE);
+ }
+ else if (nunits == gather_off_nunits * 2)
+ {
+ unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
+ modifier = NARROW;
+
+ for (i = 0; i < nunits; ++i)
+ sel[i] = i < gather_off_nunits
+ ? i : i + nunits - gather_off_nunits;
+
+ perm_mask = vect_gen_perm_mask (vectype, sel);
+ gcc_assert (perm_mask != NULL_TREE);
+ ncopies *= 2;
+ }
+ else
+ gcc_unreachable ();
+
+ rettype = TREE_TYPE (TREE_TYPE (gather_decl));
+ srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ scaletype = TREE_VALUE (arglist);
+ gcc_checking_assert (types_compatible_p (srctype, rettype)
+ && types_compatible_p (srctype, masktype));
+
+ vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
+
+ ptr = fold_convert (ptrtype, gather_base);
+ if (!is_gimple_min_invariant (ptr))
+ {
+ ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
+ gcc_assert (!new_bb);
+ }
+
+ scale = build_int_cst (scaletype, gather_scale);
+
+ prev_stmt_info = NULL;
+ for (j = 0; j < ncopies; ++j)
+ {
+ if (modifier == WIDEN && (j & 1))
+ op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
+ perm_mask, stmt, gsi);
+ else if (j == 0)
+ op = vec_oprnd0
+ = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
+ else
+ op = vec_oprnd0
+ = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
+
+ if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
+ {
+ gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
+ == TYPE_VECTOR_SUBPARTS (idxtype));
+ var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
+ var = make_ssa_name (var, NULL);
+ op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
+ new_stmt
+ = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
+ op, NULL_TREE);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ op = var;
+ }
+
+ if (j == 0)
+ vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
+ else
+ {
+ vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
+ &def, &dt);
+ vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
+ }
+
+ mask_op = vec_mask;
+ if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
+ {
+ gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
+ == TYPE_VECTOR_SUBPARTS (masktype));
+ var = vect_get_new_vect_var (masktype, vect_simple_var, NULL);
+ var = make_ssa_name (var, NULL);
+ mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
+ new_stmt
+ = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
+ mask_op, NULL_TREE);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ mask_op = var;
+ }
+
+ new_stmt
+ = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
+ scale);
+
+ if (!useless_type_conversion_p (vectype, rettype))
+ {
+ gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
+ == TYPE_VECTOR_SUBPARTS (rettype));
+ var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
+ op = make_ssa_name (var, new_stmt);
+ gimple_call_set_lhs (new_stmt, op);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ var = make_ssa_name (vec_dest, NULL);
+ op = build1 (VIEW_CONVERT_EXPR, vectype, op);
+ new_stmt
+ = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var, op,
+ NULL_TREE);
+ }
+ else
+ {
+ var = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, var);
+ }
+
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ if (modifier == NARROW)
+ {
+ if ((j & 1) == 0)
+ {
+ prev_res = var;
+ continue;
+ }
+ var = permute_vec_elements (prev_res, var,
+ perm_mask, stmt, gsi);
+ new_stmt = SSA_NAME_DEF_STMT (var);
+ }
+
+ if (prev_stmt_info == NULL)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ return true;
+ }
+ else if (is_store)
+ {
+ tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
+ prev_stmt_info = NULL;
+ for (i = 0; i < ncopies; i++)
+ {
+ unsigned align, misalign;
+
+ if (i == 0)
+ {
+ tree rhs = gimple_call_arg (stmt, 3);
+ vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
+ vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
+ /* We should have catched mismatched types earlier. */
+ gcc_assert (useless_type_conversion_p (vectype,
+ TREE_TYPE (vec_rhs)));
+ dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
+ NULL_TREE, &dummy, gsi,
+ &ptr_incr, false, &inv_p);
+ gcc_assert (!inv_p);
+ }
+ else
+ {
+ vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
+ &def, &dt);
+ vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
+ vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
+ &def, &dt);
+ vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
+ TYPE_SIZE_UNIT (vectype));
+ }
+
+ align = TYPE_ALIGN_UNIT (vectype);
+ if (aligned_access_p (dr))
+ misalign = 0;
+ else if (DR_MISALIGNMENT (dr) == -1)
+ {
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
+ }
+ else
+ misalign = DR_MISALIGNMENT (dr);
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
+ misalign);
+ new_stmt
+ = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
+ gimple_call_arg (stmt, 1),
+ vec_mask, vec_rhs);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ if (i == 0)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ }
+ else
+ {
+ tree vec_mask = NULL_TREE;
+ prev_stmt_info = NULL;
+ vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
+ for (i = 0; i < ncopies; i++)
+ {
+ unsigned align, misalign;
+
+ if (i == 0)
+ {
+ vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
+ dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
+ NULL_TREE, &dummy, gsi,
+ &ptr_incr, false, &inv_p);
+ gcc_assert (!inv_p);
+ }
+ else
+ {
+ vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
+ &def, &dt);
+ vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
+ TYPE_SIZE_UNIT (vectype));
+ }
+
+ align = TYPE_ALIGN_UNIT (vectype);
+ if (aligned_access_p (dr))
+ misalign = 0;
+ else if (DR_MISALIGNMENT (dr) == -1)
+ {
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
+ }
+ else
+ misalign = DR_MISALIGNMENT (dr);
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
+ misalign);
+ new_stmt
+ = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
+ gimple_call_arg (stmt, 1),
+ vec_mask);
+ gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest, NULL));
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ if (i == 0)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ }
+
+ return true;
+}
+
+
/* Function vectorizable_call.
Check if STMT performs a function call that can be vectorized.
@@ -1738,6 +2165,12 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (!is_gimple_call (stmt))
return false;
+ if (gimple_call_internal_p (stmt)
+ && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+ || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
+ return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
+ slp_node);
+
if (gimple_call_lhs (stmt) == NULL_TREE
|| TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
return false;
@@ -4049,10 +4482,6 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
}
-static tree permute_vec_elements (tree, tree, tree, gimple,
- gimple_stmt_iterator *);
-
-
/* Function vectorizable_operation.
Check if STMT performs a binary, unary or ternary operation that can
@@ -6565,6 +6994,10 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
case call_vec_info_type:
done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
stmt = gsi_stmt (*gsi);
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt)
+ && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ is_store = true;
break;
case call_simd_clone_vec_info_type:
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index c11f8a8c1b7..1c411c4dec6 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -75,11 +75,13 @@ along with GCC; see the file COPYING3. If not see
#include "tree-phinodes.h"
#include "ssa-iterators.h"
#include "tree-ssa-loop-manip.h"
+#include "tree-cfg.h"
#include "cfgloop.h"
#include "tree-vectorizer.h"
#include "tree-pass.h"
#include "tree-ssa-propagate.h"
#include "dbgcnt.h"
+#include "gimple-fold.h"
/* Loop or bb location. */
source_location vect_location;
@@ -157,7 +159,7 @@ adjust_simduid_builtins (hash_table <simduid_to_vf> &htab)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
@@ -265,7 +267,7 @@ note_simd_array_uses (hash_table <simd_array_to_simduid> *htab)
wi.info = &ns;
ns.htab = htab;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
@@ -317,6 +319,60 @@ vect_destroy_datarefs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
}
+/* If LOOP has been versioned during ifcvt, return the internal call
+ guarding it. */
+
+static gimple
+vect_loop_vectorized_call (struct loop *loop)
+{
+ basic_block bb = loop_preheader_edge (loop)->src;
+ gimple g;
+ do
+ {
+ g = last_stmt (bb);
+ if (g)
+ break;
+ if (!single_pred_p (bb))
+ break;
+ bb = single_pred (bb);
+ }
+ while (1);
+ if (g && gimple_code (g) == GIMPLE_COND)
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (g);
+ gsi_prev (&gsi);
+ if (!gsi_end_p (gsi))
+ {
+ g = gsi_stmt (gsi);
+ if (is_gimple_call (g)
+ && gimple_call_internal_p (g)
+ && gimple_call_internal_fn (g) == IFN_LOOP_VECTORIZED
+ && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
+ || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
+ return g;
+ }
+ }
+ return NULL;
+}
+
+/* Fold LOOP_VECTORIZED internal call G to VALUE and
+ update any immediate uses of it's LHS. */
+
+static void
+fold_loop_vectorized_call (gimple g, tree value)
+{
+ tree lhs = gimple_call_lhs (g);
+ use_operand_p use_p;
+ imm_use_iterator iter;
+ gimple use_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt (g);
+
+ update_call_from_tree (&gsi, value);
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, value);
+}
+
/* Function vectorize_loops.
Entry point to loop vectorization phase. */
@@ -330,6 +386,8 @@ vectorize_loops (void)
struct loop *loop;
hash_table <simduid_to_vf> simduid_to_vf_htab;
hash_table <simd_array_to_simduid> simd_array_to_simduid_htab;
+ bool any_ifcvt_loops = false;
+ unsigned ret = 0;
vect_loops_num = number_of_loops (cfun);
@@ -352,8 +410,11 @@ vectorize_loops (void)
than all previously defined loops. This fact allows us to run
only over initial loops skipping newly generated ones. */
FOR_EACH_LOOP (loop, 0)
- if ((flag_tree_loop_vectorize && optimize_loop_nest_for_speed_p (loop))
- || loop->force_vect)
+ if (loop->dont_vectorize)
+ any_ifcvt_loops = true;
+ else if ((flag_tree_loop_vectorize
+ && optimize_loop_nest_for_speed_p (loop))
+ || loop->force_vect)
{
loop_vec_info loop_vinfo;
vect_location = find_loop_location (loop);
@@ -372,6 +433,39 @@ vectorize_loops (void)
if (!dbg_cnt (vect_loop))
break;
+ gimple loop_vectorized_call = vect_loop_vectorized_call (loop);
+ if (loop_vectorized_call)
+ {
+ tree arg = gimple_call_arg (loop_vectorized_call, 1);
+ basic_block *bbs;
+ unsigned int i;
+ struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
+
+ LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
+ gcc_checking_assert (vect_loop_vectorized_call
+ (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
+ == loop_vectorized_call);
+ bbs = get_loop_body (scalar_loop);
+ for (i = 0; i < scalar_loop->num_nodes; i++)
+ {
+ basic_block bb = bbs[i];
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gimple phi = gsi_stmt (gsi);
+ gimple_set_uid (phi, 0);
+ }
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ gimple_set_uid (stmt, 0);
+ }
+ }
+ free (bbs);
+ }
+
if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
&& dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
@@ -392,6 +486,12 @@ vectorize_loops (void)
*simduid_to_vf_htab.find_slot (simduid_to_vf_data, INSERT)
= simduid_to_vf_data;
}
+
+ if (loop_vectorized_call)
+ {
+ fold_loop_vectorized_call (loop_vectorized_call, boolean_true_node);
+ ret |= TODO_cleanup_cfg;
+ }
}
vect_location = UNKNOWN_LOCATION;
@@ -405,6 +505,21 @@ vectorize_loops (void)
/* ----------- Finalize. ----------- */
+ if (any_ifcvt_loops)
+ for (i = 1; i < vect_loops_num; i++)
+ {
+ loop = get_loop (cfun, i);
+ if (loop && loop->dont_vectorize)
+ {
+ gimple g = vect_loop_vectorized_call (loop);
+ if (g)
+ {
+ fold_loop_vectorized_call (g, boolean_false_node);
+ ret |= TODO_cleanup_cfg;
+ }
+ }
+ }
+
for (i = 1; i < vect_loops_num; i++)
{
loop_vec_info loop_vinfo;
@@ -462,7 +577,7 @@ vectorize_loops (void)
return TODO_cleanup_cfg;
}
- return 0;
+ return ret;
}
@@ -475,7 +590,7 @@ execute_vect_slp (void)
init_stmt_vec_info_vec ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
vect_location = find_bb_location (bb);
@@ -551,7 +666,7 @@ make_pass_slp_vectorize (gcc::context *ctxt)
static unsigned int
increase_alignment (void)
{
- struct varpool_node *vnode;
+ varpool_node *vnode;
vect_location = UNKNOWN_LOCATION;
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 4427d6a7b33..54e73c8c9a0 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -250,8 +250,11 @@ typedef struct _loop_vec_info {
/* The loop basic blocks. */
basic_block *bbs;
+ /* Number of latch executions. */
+ tree num_itersm1;
/* Number of iterations. */
tree num_iters;
+ /* Number of iterations of the original loop. */
tree num_iters_unchanged;
/* Minimum number of iterations below which vectorization is expected to
@@ -344,14 +347,20 @@ typedef struct _loop_vec_info {
fix it up. */
bool operands_swapped;
+ /* If if-conversion versioned this loop before conversion, this is the
+ loop version without if-conversion. */
+ struct loop *scalar_loop;
+
} *loop_vec_info;
/* Access Functions. */
#define LOOP_VINFO_LOOP(L) (L)->loop
#define LOOP_VINFO_BBS(L) (L)->bbs
+#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
#define LOOP_VINFO_NITERS(L) (L)->num_iters
-/* Since LOOP_VINFO_NITERS can change after prologue peeling
- retain total unchanged scalar loop iterations for cost model. */
+/* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
+ prologue peeling retain total unchanged scalar loop iterations for
+ cost model. */
#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
#define LOOP_VINFO_COST_MODEL_MIN_ITERS(L) (L)->min_profitable_iters
#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
@@ -376,6 +385,7 @@ typedef struct _loop_vec_info {
#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
#define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
+#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
(L)->may_misalign_stmts.length () > 0
@@ -934,7 +944,8 @@ extern source_location vect_location;
in tree-vect-loop-manip.c. */
extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
-struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, edge);
+struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
+ struct loop *, edge);
extern void vect_loop_versioning (loop_vec_info, unsigned int, bool);
extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree, tree,
unsigned int, bool);
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 49231d1dd87..731a6debff6 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -3682,6 +3682,47 @@ extract_range_basic (value_range_t *vr, gimple stmt)
break;
}
}
+ else if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt))
+ {
+ enum tree_code subcode = ERROR_MARK;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_UBSAN_CHECK_ADD:
+ subcode = PLUS_EXPR;
+ break;
+ case IFN_UBSAN_CHECK_SUB:
+ subcode = MINUS_EXPR;
+ break;
+ case IFN_UBSAN_CHECK_MUL:
+ subcode = MULT_EXPR;
+ break;
+ default:
+ break;
+ }
+ if (subcode != ERROR_MARK)
+ {
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, we'll complain, but will actually do
+ wrapping operation. */
+ flag_wrapv = 1;
+ extract_range_from_binary_expr (vr, subcode, type,
+ gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 1));
+ flag_wrapv = saved_flag_wrapv;
+
+ /* If for both arguments vrp_valueize returned non-NULL,
+ this should have been already folded and if not, it
+ wasn't folded because of overflow. Avoid removing the
+ UBSAN_CHECK_* calls in that case. */
+ if (vr->type == VR_RANGE
+ && (vr->min == vr->max
+ || operand_equal_p (vr->min, vr->max, 0)))
+ set_value_range_to_varying (vr);
+ return;
+ }
+ }
if (INTEGRAL_TYPE_P (type)
&& gimple_stmt_nonnegative_warnv_p (stmt, &sop))
set_value_range_to_nonnegative (vr, type,
@@ -4423,7 +4464,7 @@ infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_
if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
return false;
- if (infer_nonnull_range (stmt, op))
+ if (infer_nonnull_range (stmt, op, true, true))
{
*val_p = build_int_cst (TREE_TYPE (op), 0);
*comp_code_p = NE_EXPR;
@@ -5818,13 +5859,13 @@ find_assert_locations_1 (basic_block bb, sbitmap live)
static bool
find_assert_locations (void)
{
- int *rpo = XNEWVEC (int, last_basic_block);
- int *bb_rpo = XNEWVEC (int, last_basic_block);
- int *last_rpo = XCNEWVEC (int, last_basic_block);
+ int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
int rpo_cnt, i;
bool need_asserts;
- live = XCNEWVEC (sbitmap, last_basic_block);
+ live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
for (i = 0; i < rpo_cnt; ++i)
bb_rpo[rpo[i]] = i;
@@ -5859,7 +5900,7 @@ find_assert_locations (void)
need_asserts = false;
for (i = rpo_cnt - 1; i >= 0; --i)
{
- basic_block bb = BASIC_BLOCK (rpo[i]);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
edge e;
edge_iterator ei;
@@ -5918,7 +5959,7 @@ find_assert_locations (void)
XDELETEVEC (rpo);
XDELETEVEC (bb_rpo);
XDELETEVEC (last_rpo);
- for (i = 0; i < last_basic_block; ++i)
+ for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
if (live[i])
sbitmap_free (live[i]);
XDELETEVEC (live);
@@ -6315,7 +6356,7 @@ check_all_array_refs (void)
basic_block bb;
gimple_stmt_iterator si;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
edge e;
@@ -6476,7 +6517,7 @@ remove_range_assertions (void)
/* Note that the BSI iterator bump happens at the bottom of the
loop and no bump is necessary if we're removing the statement
referenced by the current BSI. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
{
gimple stmt = gsi_stmt (si);
@@ -6591,7 +6632,7 @@ vrp_initialize (void)
vr_value = XCNEWVEC (value_range_t *, num_vr_values);
vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
@@ -9443,7 +9484,7 @@ identify_jump_threads (void)
I doubt it's worth the effort for the classes of jump
threading opportunities we are trying to identify at this
point in compilation. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last;
diff --git a/gcc/tree.c b/gcc/tree.c
index 8fa75425949..124f3c93505 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -552,6 +552,8 @@ initialize_tree_contains_struct (void)
gcc_assert (tree_contains_struct[FUNCTION_DECL][TS_FUNCTION_DECL]);
gcc_assert (tree_contains_struct[IMPORTED_DECL][TS_DECL_MINIMAL]);
gcc_assert (tree_contains_struct[IMPORTED_DECL][TS_DECL_COMMON]);
+ gcc_assert (tree_contains_struct[NAMELIST_DECL][TS_DECL_MINIMAL]);
+ gcc_assert (tree_contains_struct[NAMELIST_DECL][TS_DECL_COMMON]);
}
@@ -5490,7 +5492,7 @@ find_decls_types_in_node (struct cgraph_node *n, struct free_lang_data_d *fld)
NAMESPACE_DECLs, etc). */
static void
-find_decls_types_in_var (struct varpool_node *v, struct free_lang_data_d *fld)
+find_decls_types_in_var (varpool_node *v, struct free_lang_data_d *fld)
{
find_decls_types (v->decl, fld);
}
@@ -5544,7 +5546,7 @@ static void
free_lang_data_in_cgraph (void)
{
struct cgraph_node *n;
- struct varpool_node *v;
+ varpool_node *v;
struct free_lang_data_d fld;
tree t;
unsigned i;
diff --git a/gcc/tree.def b/gcc/tree.def
index 96649fb372c..04d068c179d 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -383,6 +383,16 @@ DEFTREECODE (NAMESPACE_DECL, "namespace_decl", tcc_declaration, 0)
IMPORTED_DECL_ASSOCIATED_DECL (NODE) accesses the imported declaration. */
DEFTREECODE (IMPORTED_DECL, "imported_decl", tcc_declaration, 0)
+/* A namelist declaration.
+ The Fortran FE uses this to represent a namelist statement, e.g.:
+ NAMELIST /namelist-group-name/ namelist-group-object-list.
+ Whenever a declaration import appears in a lexical block, the BLOCK node
+ representing that lexical block in GIMPLE will contain an NAMELIST_DECL
+ node, linked via BLOCK_VARS accessor of the said BLOCK.
+ For a given NODE which code is NAMELIST_DECL,
+ NAMELIST_DECL_ASSOCIATED_DECL (NODE) accesses the imported declaration. */
+DEFTREECODE (NAMELIST_DECL, "namelist_decl", tcc_declaration, 0)
+
/* A translation unit. This is not technically a declaration, since it
can't be looked up, but it's close enough. */
DEFTREECODE (TRANSLATION_UNIT_DECL, "translation_unit_decl",\
diff --git a/gcc/tree.h b/gcc/tree.h
index 0aa05c5ce0c..a270c8e8c3d 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -2667,6 +2667,11 @@ extern vec<tree, va_gc> **decl_debug_args_insert (tree);
#define IMPORTED_DECL_ASSOCIATED_DECL(NODE) \
(DECL_INITIAL (IMPORTED_DECL_CHECK (NODE)))
+/* Getter of the symbol declaration associated with the
+ NAMELIST_DECL node. */
+#define NAMELIST_DECL_ASSOCIATED_DECL(NODE) \
+ (DECL_INITIAL (NODE))
+
/* A STATEMENT_LIST chains statements together in GENERIC and GIMPLE.
To reduce overhead, the nodes containing the statements are not trees.
This avoids the overhead of tree_common on all linked list elements.
diff --git a/gcc/tsan.c b/gcc/tsan.c
index 4efcfe565aa..d12459fbfbf 100644
--- a/gcc/tsan.c
+++ b/gcc/tsan.c
@@ -640,7 +640,7 @@ instrument_memory_accesses (void)
gimple_stmt_iterator gsi;
bool fentry_exit_instrument = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
fentry_exit_instrument |= instrument_gimple (&gsi);
return fentry_exit_instrument;
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index e33e62a028a..51b4f8dd7bf 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "cgraph.h"
#include "tree-pass.h"
#include "tree-ssa-alias.h"
+#include "tree-pretty-print.h"
#include "internal-fn.h"
#include "gimple-expr.h"
#include "gimple.h"
@@ -40,9 +41,8 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
#include "ubsan.h"
#include "c-family/c-common.h"
-
-/* From trans-mem.c. */
-#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
+#include "rtl.h"
+#include "expr.h"
/* Map from a tree to a VAR_DECL tree. */
@@ -105,45 +105,53 @@ decl_for_type_insert (tree type, tree decl)
/* Helper routine, which encodes a value in the pointer_sized_int_node.
Arguments with precision <= POINTER_SIZE are passed directly,
- the rest is passed by reference. T is a value we are to encode. */
+ the rest is passed by reference. T is a value we are to encode.
+ IN_EXPAND_P is true if this function is called during expansion. */
tree
-ubsan_encode_value (tree t)
+ubsan_encode_value (tree t, bool in_expand_p)
{
tree type = TREE_TYPE (t);
- switch (TREE_CODE (type))
- {
- case INTEGER_TYPE:
- if (TYPE_PRECISION (type) <= POINTER_SIZE)
+ const unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
+ if (bitsize <= POINTER_SIZE)
+ switch (TREE_CODE (type))
+ {
+ case BOOLEAN_TYPE:
+ case ENUMERAL_TYPE:
+ case INTEGER_TYPE:
return fold_build1 (NOP_EXPR, pointer_sized_int_node, t);
+ case REAL_TYPE:
+ {
+ tree itype = build_nonstandard_integer_type (bitsize, true);
+ t = fold_build1 (VIEW_CONVERT_EXPR, itype, t);
+ return fold_convert (pointer_sized_int_node, t);
+ }
+ default:
+ gcc_unreachable ();
+ }
+ else
+ {
+ if (!DECL_P (t) || !TREE_ADDRESSABLE (t))
+ {
+ /* The reason for this is that we don't want to pessimize
+ code by making vars unnecessarily addressable. */
+ tree var = create_tmp_var (type, NULL);
+ tree tem = build2 (MODIFY_EXPR, void_type_node, var, t);
+ if (in_expand_p)
+ {
+ rtx mem
+ = assign_stack_temp_for_type (TYPE_MODE (type),
+ GET_MODE_SIZE (TYPE_MODE (type)),
+ type);
+ SET_DECL_RTL (var, mem);
+ expand_assignment (var, t, false);
+ return build_fold_addr_expr (var);
+ }
+ t = build_fold_addr_expr (var);
+ return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t);
+ }
else
return build_fold_addr_expr (t);
- case REAL_TYPE:
- {
- unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
- if (bitsize <= POINTER_SIZE)
- {
- tree itype = build_nonstandard_integer_type (bitsize, true);
- t = fold_build1 (VIEW_CONVERT_EXPR, itype, t);
- return fold_convert (pointer_sized_int_node, t);
- }
- else
- {
- if (!TREE_ADDRESSABLE (t))
- {
- /* The reason for this is that we don't want to pessimize
- code by making vars unnecessarily addressable. */
- tree var = create_tmp_var (TREE_TYPE (t), NULL);
- tree tem = build2 (MODIFY_EXPR, void_type_node, var, t);
- t = build_fold_addr_expr (var);
- return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t);
- }
- else
- return build_fold_addr_expr (t);
- }
- }
- default:
- gcc_unreachable ();
}
}
@@ -632,6 +640,99 @@ instrument_null (gimple_stmt_iterator gsi, bool is_lhs)
instrument_member_call (&gsi);
}
+/* Build an ubsan builtin call for the signed-integer-overflow
+ sanitization. CODE says what kind of builtin are we building,
+ LOC is a location, LHSTYPE is the type of LHS, OP0 and OP1
+ are operands of the binary operation. */
+
+tree
+ubsan_build_overflow_builtin (tree_code code, location_t loc, tree lhstype,
+ tree op0, tree op1)
+{
+ tree data = ubsan_create_data ("__ubsan_overflow_data", loc, NULL,
+ ubsan_type_descriptor (lhstype, false),
+ NULL_TREE);
+ enum built_in_function fn_code;
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ fn_code = BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW;
+ break;
+ case MINUS_EXPR:
+ fn_code = BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW;
+ break;
+ case MULT_EXPR:
+ fn_code = BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW;
+ break;
+ case NEGATE_EXPR:
+ fn_code = BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ tree fn = builtin_decl_explicit (fn_code);
+ return build_call_expr_loc (loc, fn, 2 + (code != NEGATE_EXPR),
+ build_fold_addr_expr_loc (loc, data),
+ ubsan_encode_value (op0, true),
+ op1 ? ubsan_encode_value (op1, true)
+ : NULL_TREE);
+}
+
+/* Perform the signed integer instrumentation. GSI is the iterator
+ pointing at statement we are trying to instrument. */
+
+static void
+instrument_si_overflow (gimple_stmt_iterator gsi)
+{
+ gimple stmt = gsi_stmt (gsi);
+ tree_code code = gimple_assign_rhs_code (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree lhstype = TREE_TYPE (lhs);
+ tree a, b;
+ gimple g;
+
+ /* If this is not a signed operation, don't instrument anything here.
+ Also punt on bit-fields. */
+ if (!INTEGRAL_TYPE_P (lhstype)
+ || TYPE_OVERFLOW_WRAPS (lhstype)
+ || GET_MODE_BITSIZE (TYPE_MODE (lhstype)) != TYPE_PRECISION (lhstype))
+ return;
+
+ switch (code)
+ {
+ case MINUS_EXPR:
+ case PLUS_EXPR:
+ case MULT_EXPR:
+ /* Transform
+ i = u {+,-,*} 5;
+ into
+ i = UBSAN_CHECK_{ADD,SUB,MUL} (u, 5); */
+ a = gimple_assign_rhs1 (stmt);
+ b = gimple_assign_rhs2 (stmt);
+ g = gimple_build_call_internal (code == PLUS_EXPR
+ ? IFN_UBSAN_CHECK_ADD
+ : code == MINUS_EXPR
+ ? IFN_UBSAN_CHECK_SUB
+ : IFN_UBSAN_CHECK_MUL, 2, a, b);
+ gimple_call_set_lhs (g, lhs);
+ gsi_replace (&gsi, g, false);
+ break;
+ case NEGATE_EXPR:
+ /* Represent i = -u;
+ as
+ i = UBSAN_CHECK_SUB (0, u); */
+ a = build_int_cst (lhstype, 0);
+ b = gimple_assign_rhs1 (stmt);
+ g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b);
+ gimple_call_set_lhs (g, lhs);
+ gsi_replace (&gsi, g, false);
+ break;
+ default:
+ break;
+ }
+}
+
/* Gate and execute functions for ubsan pass. */
static unsigned int
@@ -640,7 +741,7 @@ ubsan_pass (void)
basic_block bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
@@ -651,6 +752,10 @@ ubsan_pass (void)
continue;
}
+ if ((flag_sanitize & SANITIZE_SI_OVERFLOW)
+ && is_gimple_assign (stmt))
+ instrument_si_overflow (gsi);
+
if (flag_sanitize & SANITIZE_NULL)
{
if (gimple_store_p (stmt))
@@ -668,7 +773,7 @@ ubsan_pass (void)
static bool
gate_ubsan (void)
{
- return flag_sanitize & SANITIZE_NULL;
+ return flag_sanitize & (SANITIZE_NULL | SANITIZE_SI_OVERFLOW);
}
namespace {
diff --git a/gcc/ubsan.h b/gcc/ubsan.h
index 666e5fe15ab..fa7698509c4 100644
--- a/gcc/ubsan.h
+++ b/gcc/ubsan.h
@@ -41,8 +41,9 @@ extern tree ubsan_instrument_unreachable (location_t);
extern tree ubsan_create_data (const char *, location_t,
const struct ubsan_mismatch_data *, ...);
extern tree ubsan_type_descriptor (tree, bool);
-extern tree ubsan_encode_value (tree);
+extern tree ubsan_encode_value (tree, bool = false);
extern bool is_ubsan_builtin_p (tree);
+extern tree ubsan_build_overflow_builtin (tree_code, location_t, tree, tree, tree);
#endif /* GCC_UBSAN_H */
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index e40b0ada7f1..f90dc4ec4b7 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -542,7 +542,7 @@ verify_histograms (void)
error_found = false;
visited_hists = pointer_set_create ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
@@ -648,7 +648,7 @@ gimple_value_profile_transformations (void)
gimple_stmt_iterator gsi;
bool changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -1961,7 +1961,7 @@ gimple_find_values_to_profile (histogram_values *values)
histogram_value hist = NULL;
values->create (0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
gimple_values_to_profile (gsi_stmt (gsi), values);
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index b0cf3fb3069..c6cb6393cfa 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -6945,7 +6945,7 @@ vt_find_locations (void)
/* Compute reverse completion order of depth first search of the CFG
so that the data-flow runs faster. */
rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
- bb_order = XNEWVEC (int, last_basic_block);
+ bb_order = XNEWVEC (int, last_basic_block_for_fn (cfun));
pre_and_rev_post_order_compute (NULL, rc_order, false);
for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
bb_order[rc_order[i]] = i;
@@ -6953,12 +6953,12 @@ vt_find_locations (void)
worklist = fibheap_new ();
pending = fibheap_new ();
- visited = sbitmap_alloc (last_basic_block);
- in_worklist = sbitmap_alloc (last_basic_block);
- in_pending = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ in_worklist = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ in_pending = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_worklist);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
fibheap_insert (pending, bb_order[bb->index], bb);
bitmap_ones (in_pending);
@@ -7118,7 +7118,7 @@ vt_find_locations (void)
}
if (success && MAY_HAVE_DEBUG_INSNS)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
gcc_assert (VTI (bb)->flooded);
free (bb_order);
@@ -7246,7 +7246,7 @@ dump_dataflow_sets (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (dump_file, "\nBasic block %d:\n", bb->index);
fprintf (dump_file, "IN:\n");
@@ -9419,7 +9419,7 @@ vt_emit_notes (void)
/* Free memory occupied by the out hash tables, as they aren't used
anymore. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
dataflow_set_clear (&VTI (bb)->out);
/* Enable emitting notes by functions (mainly by set_variable_part and
@@ -9435,7 +9435,7 @@ vt_emit_notes (void)
dataflow_set_init (&cur);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Emit the notes for changes of variable locations between two
subsequent basic blocks. */
@@ -9864,7 +9864,7 @@ vt_initialize (void)
changed_variables.create (10);
/* Init the IN and OUT sets. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
VTI (bb)->visited = false;
VTI (bb)->flooded = false;
@@ -10012,7 +10012,7 @@ vt_initialize (void)
vt_add_function_parameters ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
HOST_WIDE_INT pre, post = 0;
@@ -10155,7 +10155,7 @@ delete_debug_insns (void)
if (!MAY_HAVE_DEBUG_INSNS)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_SAFE (bb, insn, next)
if (DEBUG_INSN_P (insn))
@@ -10198,12 +10198,12 @@ vt_finalize (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
VTI (bb)->mos.release ();
}
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
dataflow_set_destroy (&VTI (bb)->in);
dataflow_set_destroy (&VTI (bb)->out);
diff --git a/gcc/varasm.c b/gcc/varasm.c
index b6ebd9c1295..dba1c69c6ce 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -2366,7 +2366,7 @@ mark_decl_referenced (tree decl)
}
else if (TREE_CODE (decl) == VAR_DECL)
{
- struct varpool_node *node = varpool_node_for_decl (decl);
+ varpool_node *node = varpool_node_for_decl (decl);
/* C++ frontend use mark_decl_references to force COMDAT variables
to be output that might appear dead otherwise. */
node->force_output = true;
@@ -6718,7 +6718,7 @@ default_binds_local_p_1 (const_tree exp, int shlib)
if (TREE_CODE (exp) == VAR_DECL && TREE_PUBLIC (exp)
&& (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
{
- struct varpool_node *vnode = varpool_get_node (exp);
+ varpool_node *vnode = varpool_get_node (exp);
if (vnode && resolution_local_p (vnode->resolution))
resolved_locally = true;
if (vnode
@@ -6811,7 +6811,7 @@ decl_binds_to_current_def_p (tree decl)
if (TREE_CODE (decl) == VAR_DECL
&& (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
{
- struct varpool_node *vnode = varpool_get_node (decl);
+ varpool_node *vnode = varpool_get_node (decl);
if (vnode
&& vnode->resolution != LDPR_UNKNOWN)
return resolution_to_local_definition_p (vnode->resolution);
diff --git a/gcc/varpool.c b/gcc/varpool.c
index 1e469b3a253..0f36cd139aa 100644
--- a/gcc/varpool.c
+++ b/gcc/varpool.c
@@ -78,7 +78,7 @@ varpool_remove_node_removal_hook (struct varpool_node_hook_list *entry)
/* Call all node removal hooks. */
static void
-varpool_call_node_removal_hooks (struct varpool_node *node)
+varpool_call_node_removal_hooks (varpool_node *node)
{
struct varpool_node_hook_list *entry = first_varpool_node_removal_hook;
while (entry)
@@ -119,7 +119,7 @@ varpool_remove_variable_insertion_hook (struct varpool_node_hook_list *entry)
/* Call all node insertion hooks. */
void
-varpool_call_variable_insertion_hooks (struct varpool_node *node)
+varpool_call_variable_insertion_hooks (varpool_node *node)
{
struct varpool_node_hook_list *entry = first_varpool_variable_insertion_hook;
while (entry)
@@ -131,19 +131,19 @@ varpool_call_variable_insertion_hooks (struct varpool_node *node)
/* Allocate new callgraph node and insert it into basic data structures. */
-struct varpool_node *
+varpool_node *
varpool_create_empty_node (void)
{
- struct varpool_node *node = ggc_alloc_cleared_varpool_node ();
+ varpool_node *node = ggc_alloc_cleared_varpool_node ();
node->type = SYMTAB_VARIABLE;
return node;
}
/* Return varpool node assigned to DECL. Create new one when needed. */
-struct varpool_node *
+varpool_node *
varpool_node_for_decl (tree decl)
{
- struct varpool_node *node = varpool_get_node (decl);
+ varpool_node *node = varpool_get_node (decl);
gcc_checking_assert (TREE_CODE (decl) == VAR_DECL);
if (node)
return node;
@@ -156,7 +156,7 @@ varpool_node_for_decl (tree decl)
/* Remove node from the varpool. */
void
-varpool_remove_node (struct varpool_node *node)
+varpool_remove_node (varpool_node *node)
{
tree init;
varpool_call_node_removal_hooks (node);
@@ -174,7 +174,7 @@ varpool_remove_node (struct varpool_node *node)
/* Renove node initializer when it is no longer needed. */
void
-varpool_remove_initializer (struct varpool_node *node)
+varpool_remove_initializer (varpool_node *node)
{
if (DECL_INITIAL (node->decl)
&& !DECL_IN_CONSTANT_POOL (node->decl)
@@ -192,7 +192,7 @@ varpool_remove_initializer (struct varpool_node *node)
/* Dump given cgraph node. */
void
-dump_varpool_node (FILE *f, struct varpool_node *node)
+dump_varpool_node (FILE *f, varpool_node *node)
{
dump_symtab_base (f, node);
fprintf (f, " Availability: %s\n",
@@ -215,7 +215,7 @@ dump_varpool_node (FILE *f, struct varpool_node *node)
void
dump_varpool (FILE *f)
{
- struct varpool_node *node;
+ varpool_node *node;
fprintf (f, "variable pool:\n\n");
FOR_EACH_VARIABLE (node)
@@ -231,7 +231,7 @@ debug_varpool (void)
}
/* Given an assembler name, lookup node. */
-struct varpool_node *
+varpool_node *
varpool_node_for_asm (tree asmname)
{
if (symtab_node *node = symtab_node_for_asm (asmname))
@@ -247,7 +247,7 @@ varpool_node_for_asm (tree asmname)
tree
ctor_for_folding (tree decl)
{
- struct varpool_node *node, *real_node;
+ varpool_node *node, *real_node;
tree real_decl;
if (TREE_CODE (decl) != VAR_DECL
@@ -337,7 +337,7 @@ ctor_for_folding (tree decl)
void
varpool_add_new_variable (tree decl)
{
- struct varpool_node *node;
+ varpool_node *node;
varpool_finalize_decl (decl);
node = varpool_node_for_decl (decl);
varpool_call_variable_insertion_hooks (node);
@@ -348,7 +348,7 @@ varpool_add_new_variable (tree decl)
/* Return variable availability. See cgraph.h for description of individual
return values. */
enum availability
-cgraph_variable_initializer_availability (struct varpool_node *node)
+cgraph_variable_initializer_availability (varpool_node *node)
{
gcc_assert (cgraph_function_flags_ready);
if (!node->definition)
@@ -376,7 +376,7 @@ cgraph_variable_initializer_availability (struct varpool_node *node)
}
void
-varpool_analyze_node (struct varpool_node *node)
+varpool_analyze_node (varpool_node *node)
{
tree decl = node->decl;
@@ -401,14 +401,14 @@ varpool_analyze_node (struct varpool_node *node)
/* Assemble thunks and aliases associated to NODE. */
static void
-assemble_aliases (struct varpool_node *node)
+assemble_aliases (varpool_node *node)
{
int i;
struct ipa_ref *ref;
for (i = 0; ipa_ref_list_referring_iterate (&node->ref_list, i, ref); i++)
if (ref->use == IPA_REF_ALIAS)
{
- struct varpool_node *alias = ipa_ref_referring_varpool_node (ref);
+ varpool_node *alias = ipa_ref_referring_varpool_node (ref);
do_assemble_alias (alias->decl,
DECL_ASSEMBLER_NAME (node->decl));
assemble_aliases (alias);
@@ -418,7 +418,7 @@ assemble_aliases (struct varpool_node *node)
/* Output one variable, if necessary. Return whether we output it. */
bool
-varpool_assemble_decl (struct varpool_node *node)
+varpool_assemble_decl (varpool_node *node)
{
tree decl = node->decl;
@@ -465,7 +465,7 @@ varpool_assemble_decl (struct varpool_node *node)
The queue is linked via AUX pointers and terminated by pointer to 1. */
static void
-enqueue_node (struct varpool_node *node, struct varpool_node **first)
+enqueue_node (varpool_node *node, varpool_node **first)
{
if (node->aux)
return;
@@ -482,8 +482,8 @@ enqueue_node (struct varpool_node *node, struct varpool_node **first)
static void
varpool_remove_unreferenced_decls (void)
{
- struct varpool_node *next, *node;
- struct varpool_node *first = (struct varpool_node *)(void *)1;
+ varpool_node *next, *node;
+ varpool_node *first = (varpool_node *)(void *)1;
int i;
struct ipa_ref *ref;
@@ -505,10 +505,10 @@ varpool_remove_unreferenced_decls (void)
fprintf (cgraph_dump_file, " %s", node->asm_name ());
}
}
- while (first != (struct varpool_node *)(void *)1)
+ while (first != (varpool_node *)(void *)1)
{
node = first;
- first = (struct varpool_node *)first->aux;
+ first = (varpool_node *)first->aux;
if (node->same_comdat_group)
{
@@ -553,7 +553,7 @@ varpool_remove_unreferenced_decls (void)
conflicts between read-only and read-only requiring relocations
sections can be resolved. */
void
-varpool_finalize_named_section_flags (struct varpool_node *node)
+varpool_finalize_named_section_flags (varpool_node *node)
{
if (!TREE_ASM_WRITTEN (node->decl)
&& !node->alias
@@ -570,7 +570,7 @@ bool
varpool_output_variables (void)
{
bool changed = false;
- struct varpool_node *node;
+ varpool_node *node;
if (seen_error ())
return false;
@@ -594,7 +594,7 @@ tree
add_new_static_var (tree type)
{
tree new_decl;
- struct varpool_node *new_node;
+ varpool_node *new_node;
new_decl = create_tmp_var_raw (type, NULL);
DECL_NAME (new_decl) = create_tmp_var_name (NULL);
@@ -613,10 +613,10 @@ add_new_static_var (tree type)
/* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful.
Extra name aliases are output whenever DECL is output. */
-struct varpool_node *
+varpool_node *
varpool_create_variable_alias (tree alias, tree decl)
{
- struct varpool_node *alias_node;
+ varpool_node *alias_node;
gcc_assert (TREE_CODE (decl) == VAR_DECL);
gcc_assert (TREE_CODE (alias) == VAR_DECL);
@@ -632,10 +632,10 @@ varpool_create_variable_alias (tree alias, tree decl)
/* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful.
Extra name aliases are output whenever DECL is output. */
-struct varpool_node *
+varpool_node *
varpool_extra_name_alias (tree alias, tree decl)
{
- struct varpool_node *alias_node;
+ varpool_node *alias_node;
#ifndef ASM_OUTPUT_DEF
/* If aliases aren't supported by the assembler, fail. */
@@ -659,8 +659,8 @@ varpool_extra_name_alias (tree alias, tree decl)
skipped. */
bool
-varpool_for_node_and_aliases (struct varpool_node *node,
- bool (*callback) (struct varpool_node *, void *),
+varpool_for_node_and_aliases (varpool_node *node,
+ bool (*callback) (varpool_node *, void *),
void *data,
bool include_overwritable)
{
@@ -672,7 +672,7 @@ varpool_for_node_and_aliases (struct varpool_node *node,
for (i = 0; ipa_ref_list_referring_iterate (&node->ref_list, i, ref); i++)
if (ref->use == IPA_REF_ALIAS)
{
- struct varpool_node *alias = ipa_ref_referring_varpool_node (ref);
+ varpool_node *alias = ipa_ref_referring_varpool_node (ref);
if (include_overwritable
|| cgraph_variable_initializer_availability (alias) > AVAIL_OVERWRITABLE)
if (varpool_for_node_and_aliases (alias, callback, data,
diff --git a/gcc/vtable-verify.c b/gcc/vtable-verify.c
index 46c5621509d..af61e930b20 100644
--- a/gcc/vtable-verify.c
+++ b/gcc/vtable-verify.c
@@ -513,10 +513,10 @@ var_is_used_for_virtual_call_p (tree lhs, int *mem_ref_depth)
{
gimple stmt2 = USE_STMT (use_p);
- if (gimple_code (stmt2) == GIMPLE_CALL)
+ if (is_gimple_call (stmt2))
{
tree fncall = gimple_call_fn (stmt2);
- if (TREE_CODE (fncall) == OBJ_TYPE_REF)
+ if (fncall && TREE_CODE (fncall) == OBJ_TYPE_REF)
found_vcall = true;
else
return false;
@@ -527,7 +527,7 @@ var_is_used_for_virtual_call_p (tree lhs, int *mem_ref_depth)
(gimple_phi_result (stmt2),
mem_ref_depth);
}
- else if (gimple_code (stmt2) == GIMPLE_ASSIGN)
+ else if (is_gimple_assign (stmt2))
{
tree rhs = gimple_assign_rhs1 (stmt2);
if (TREE_CODE (rhs) == ADDR_EXPR
@@ -586,10 +586,10 @@ verify_bb_vtables (basic_block bb)
stmt = gsi_stmt (gsi_virtual_call);
/* Count virtual calls. */
- if (gimple_code (stmt) == GIMPLE_CALL)
+ if (is_gimple_call (stmt))
{
tree fncall = gimple_call_fn (stmt);
- if (TREE_CODE (fncall) == OBJ_TYPE_REF)
+ if (fncall && TREE_CODE (fncall) == OBJ_TYPE_REF)
total_num_virtual_calls++;
}
@@ -646,9 +646,6 @@ verify_bb_vtables (basic_block bb)
if (vtable_map_node && vtable_map_node->vtbl_map_decl)
{
- use_operand_p use_p;
- ssa_op_iter iter;
-
vtable_map_node->is_used = true;
vtbl_var_decl = vtable_map_node->vtbl_map_decl;
@@ -695,35 +692,27 @@ verify_bb_vtables (basic_block bb)
gimple_call_set_lhs (call_stmt, tmp0);
update_stmt (call_stmt);
- /* Find the next stmt, after the vptr assignment
- statememt, which should use the result of the
- vptr assignment statement value. */
- gsi_next (&gsi_vtbl_assign);
- gimple next_stmt = gsi_stmt (gsi_vtbl_assign);
-
- if (!next_stmt)
- return;
-
- /* Find any/all uses of 'lhs' in next_stmt, and
- replace them with 'tmp0'. */
+ /* Replace all uses of lhs with tmp0. */
found = false;
- FOR_EACH_PHI_OR_STMT_USE (use_p, next_stmt, iter,
- SSA_OP_ALL_USES)
+ imm_use_iterator iterator;
+ gimple use_stmt;
+ FOR_EACH_IMM_USE_STMT (use_stmt, iterator, lhs)
{
- tree op = USE_FROM_PTR (use_p);
- if (op == lhs)
- {
- SET_USE (use_p, tmp0);
- found = true;
- }
+ use_operand_p use_p;
+ if (use_stmt == call_stmt)
+ continue;
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iterator)
+ SET_USE (use_p, tmp0);
+ update_stmt (use_stmt);
+ found = true;
}
- update_stmt (next_stmt);
+
gcc_assert (found);
/* Insert the new verification call just after the
statement that gets the vtable pointer out of the
object. */
- gsi_vtbl_assign = gsi_for_stmt (stmt);
+ gcc_assert (gsi_stmt (gsi_vtbl_assign) == stmt);
gsi_insert_after (&gsi_vtbl_assign, call_stmt,
GSI_NEW_STMT);
@@ -746,7 +735,7 @@ vtable_verify_main (void)
unsigned int ret = 1;
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
verify_bb_vtables (bb);
return ret;
diff --git a/gcc/web.c b/gcc/web.c
index 8e8c4658fd2..d281f45b230 100644
--- a/gcc/web.c
+++ b/gcc/web.c
@@ -351,7 +351,7 @@ web_main (void)
df_set_flags (DF_DEFER_INSN_RESCAN);
/* Assign ids to the uses. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
unsigned int uid = INSN_UID (insn);
@@ -379,7 +379,7 @@ web_main (void)
use_entry = XCNEWVEC (struct web_entry, uses_num);
/* Produce the web. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
unsigned int uid = INSN_UID (insn);
@@ -404,7 +404,7 @@ web_main (void)
/* Update the instruction stream, allocating new registers for split pseudos
in progress. */
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
unsigned int uid = INSN_UID (insn);