summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog9
-rw-r--r--MAINTAINERS4
-rw-r--r--config/ChangeLog5
-rw-r--r--config/bootstrap-debug-lean.mk1
-rw-r--r--contrib/ChangeLog4
-rwxr-xr-xcontrib/analyze_brprob.py2
-rw-r--r--gcc/ChangeLog1864
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in5
-rw-r--r--gcc/ada/ChangeLog788
-rw-r--r--gcc/ada/adadecode.c2
-rw-r--r--gcc/ada/adaint.c4
-rw-r--r--gcc/ada/argv.c4
-rw-r--r--gcc/ada/binde.adb1
-rw-r--r--gcc/ada/checks.adb16
-rw-r--r--gcc/ada/contracts.adb303
-rw-r--r--gcc/ada/contracts.ads14
-rw-r--r--gcc/ada/doc/gnat_rm/standard_and_implementation_defined_restrictions.rst7
-rw-r--r--gcc/ada/doc/gnat_ugn/platform_specific_information.rst30
-rw-r--r--gcc/ada/doc/gnat_ugn/the_gnat_compilation_model.rst2
-rw-r--r--gcc/ada/einfo.adb23
-rw-r--r--gcc/ada/einfo.ads15
-rw-r--r--gcc/ada/errout.adb2
-rw-r--r--gcc/ada/errout.ads4
-rw-r--r--gcc/ada/exp_aggr.adb6
-rw-r--r--gcc/ada/exp_attr.adb2
-rw-r--r--gcc/ada/exp_ch11.adb4
-rw-r--r--gcc/ada/exp_ch3.adb164
-rw-r--r--gcc/ada/exp_ch4.adb15
-rw-r--r--gcc/ada/exp_ch5.adb4
-rw-r--r--gcc/ada/exp_ch6.adb4
-rw-r--r--gcc/ada/exp_ch9.adb15
-rw-r--r--gcc/ada/exp_disp.adb180
-rw-r--r--gcc/ada/exp_imgv.adb2
-rw-r--r--gcc/ada/exp_intr.adb3
-rw-r--r--gcc/ada/exp_prag.adb2
-rw-r--r--gcc/ada/exp_spark.adb2
-rw-r--r--gcc/ada/exp_util.adb79
-rw-r--r--gcc/ada/fname.adb5
-rw-r--r--gcc/ada/freeze.adb4
-rw-r--r--gcc/ada/gcc-interface/Make-lang.in2
-rw-r--r--gcc/ada/gcc-interface/Makefile.in33
-rw-r--r--gcc/ada/gcc-interface/misc.c3
-rw-r--r--gcc/ada/gcc-interface/trans.c8
-rw-r--r--gcc/ada/gcc-interface/utils.c21
-rw-r--r--gcc/ada/get_spark_xrefs.adb493
-rw-r--r--gcc/ada/get_spark_xrefs.ads60
-rw-r--r--gcc/ada/gnat1drv.adb13
-rw-r--r--gcc/ada/gnat_rm.texi686
-rw-r--r--gcc/ada/gnat_ugn.texi266
-rw-r--r--gcc/ada/gnatbind.adb4
-rw-r--r--gcc/ada/init.c104
-rw-r--r--gcc/ada/inline.adb6
-rw-r--r--gcc/ada/lib-writ.adb8
-rw-r--r--gcc/ada/lib-xref-spark_specific.adb1280
-rw-r--r--gcc/ada/lib-xref.adb1
-rw-r--r--gcc/ada/lib-xref.ads32
-rw-r--r--gcc/ada/libgnarl/a-intnam__qnx.ads146
-rw-r--r--gcc/ada/libgnarl/g-thread.adb10
-rw-r--r--gcc/ada/libgnarl/g-thread.ads11
-rw-r--r--gcc/ada/libgnarl/s-intman__qnx.adb298
-rw-r--r--gcc/ada/libgnarl/s-osinte__qnx.adb109
-rw-r--r--gcc/ada/libgnarl/s-osinte__qnx.ads617
-rw-r--r--gcc/ada/libgnarl/s-qnx.ads122
-rw-r--r--gcc/ada/libgnarl/s-taprop__linux.adb6
-rw-r--r--gcc/ada/libgnarl/s-taprop__mingw.adb14
-rw-r--r--gcc/ada/libgnarl/s-taprop__posix.adb6
-rw-r--r--gcc/ada/libgnarl/s-taprop__qnx.adb1355
-rw-r--r--gcc/ada/libgnarl/s-tasini.adb4
-rw-r--r--gcc/ada/libgnarl/s-taskin.ads2
-rw-r--r--gcc/ada/libgnarl/s-tasren.adb2
-rw-r--r--gcc/ada/libgnarl/s-tassta.adb58
-rw-r--r--gcc/ada/libgnarl/s-tassta.ads2
-rw-r--r--gcc/ada/libgnarl/s-tasuti.adb6
-rw-r--r--gcc/ada/libgnarl/s-tasuti.ads6
-rw-r--r--gcc/ada/libgnarl/s-tporft.adb4
-rw-r--r--gcc/ada/libgnat/a-tags.adb8
-rw-r--r--gcc/ada/libgnat/g-altive.ads12
-rw-r--r--gcc/ada/libgnat/s-rident.ads1
-rw-r--r--gcc/ada/libgnat/s-spsufi.adb4
-rw-r--r--gcc/ada/libgnat/system-qnx-aarch64.ads157
-rw-r--r--gcc/ada/link.c1
-rw-r--r--gcc/ada/namet.adb151
-rw-r--r--gcc/ada/namet.ads79
-rw-r--r--gcc/ada/opt.ads22
-rw-r--r--gcc/ada/par-ch3.adb19
-rw-r--r--gcc/ada/par-ch6.adb1
-rw-r--r--gcc/ada/par-ch7.adb4
-rw-r--r--gcc/ada/par-ch9.adb8
-rw-r--r--gcc/ada/par-endh.adb2
-rw-r--r--gcc/ada/par-util.adb8
-rw-r--r--gcc/ada/put_spark_xrefs.adb194
-rw-r--r--gcc/ada/put_spark_xrefs.ads62
-rw-r--r--gcc/ada/rtsfind.ads2
-rw-r--r--gcc/ada/s-oscons-tmplt.c9
-rw-r--r--gcc/ada/sem.adb8
-rw-r--r--gcc/ada/sem_aggr.adb337
-rw-r--r--gcc/ada/sem_attr.adb14
-rw-r--r--gcc/ada/sem_ch12.adb31
-rw-r--r--gcc/ada/sem_ch13.adb109
-rw-r--r--gcc/ada/sem_ch2.adb6
-rw-r--r--gcc/ada/sem_ch3.adb28
-rw-r--r--gcc/ada/sem_ch4.adb98
-rw-r--r--gcc/ada/sem_ch5.adb3
-rw-r--r--gcc/ada/sem_ch6.adb62
-rw-r--r--gcc/ada/sem_ch7.adb18
-rw-r--r--gcc/ada/sem_ch8.adb64
-rw-r--r--gcc/ada/sem_ch9.adb26
-rw-r--r--gcc/ada/sem_dim.adb16
-rw-r--r--gcc/ada/sem_disp.adb22
-rw-r--r--gcc/ada/sem_elab.adb1703
-rw-r--r--gcc/ada/sem_elab.ads9
-rw-r--r--gcc/ada/sem_eval.adb2
-rw-r--r--gcc/ada/sem_intr.adb2
-rw-r--r--gcc/ada/sem_prag.adb86
-rw-r--r--gcc/ada/sem_prag.ads2
-rw-r--r--gcc/ada/sem_res.adb96
-rw-r--r--gcc/ada/sem_spark.adb1
-rw-r--r--gcc/ada/sem_spark.ads8
-rw-r--r--gcc/ada/sem_util.adb232
-rw-r--r--gcc/ada/sem_util.ads9
-rw-r--r--gcc/ada/sem_warn.adb3
-rwxr-xr-xgcc/ada/set_targ.adb3
-rw-r--r--gcc/ada/sigtramp-qnx.c273
-rw-r--r--gcc/ada/sinfo.adb44
-rw-r--r--gcc/ada/sinfo.ads116
-rw-r--r--gcc/ada/spark_xrefs.adb189
-rw-r--r--gcc/ada/spark_xrefs.ads354
-rw-r--r--gcc/ada/spark_xrefs_test.adb321
-rw-r--r--gcc/ada/sprint.adb19
-rw-r--r--gcc/ada/style.adb2
-rw-r--r--gcc/ada/stylesw.adb9
-rw-r--r--gcc/ada/switch-c.adb14
-rw-r--r--gcc/ada/terminals.c4
-rw-r--r--gcc/ada/tracebak.c14
-rw-r--r--gcc/asan.c16
-rw-r--r--gcc/auto-profile.c4
-rw-r--r--gcc/bb-reorder.c197
-rw-r--r--gcc/builtins.c75
-rw-r--r--gcc/builtins.h1
-rw-r--r--gcc/c-family/ChangeLog20
-rw-r--r--gcc/c-family/c-common.c7
-rw-r--r--gcc/c-family/c-common.h1
-rw-r--r--gcc/c-family/c-opts.c3
-rw-r--r--gcc/c-family/c-warn.c24
-rw-r--r--gcc/c-family/c.opt6
-rw-r--r--gcc/c/ChangeLog7
-rw-r--r--gcc/c/c-parser.c564
-rw-r--r--gcc/cfgexpand.c2
-rw-r--r--gcc/cgraph.c161
-rw-r--r--gcc/cgraph.h41
-rw-r--r--gcc/cgraphbuild.c14
-rw-r--r--gcc/cgraphclones.c72
-rw-r--r--gcc/cgraphunit.c8
-rw-r--r--gcc/collect2.c41
-rw-r--r--gcc/common/config/i386/i386-common.c32
-rw-r--r--gcc/compare-elim.c2
-rw-r--r--gcc/config.gcc4
-rw-r--r--gcc/config/aarch64/aarch64-modes.def36
-rw-r--r--gcc/config/aarch64/aarch64-simd.md176
-rw-r--r--gcc/config/aarch64/aarch64-sve.md609
-rw-r--r--gcc/config/aarch64/aarch64.c383
-rw-r--r--gcc/config/aarch64/aarch64.h2
-rw-r--r--gcc/config/aarch64/aarch64.md21
-rw-r--r--gcc/config/aarch64/constraints.md18
-rw-r--r--gcc/config/aarch64/iterators.md419
-rw-r--r--gcc/config/aarch64/predicates.md13
-rw-r--r--gcc/config/arc/arc.h2
-rw-r--r--gcc/config/arm/arm-cpus.in20
-rw-r--r--gcc/config/arm/arm-fixed.md8
-rw-r--r--gcc/config/arm/arm-protos.h2
-rw-r--r--gcc/config/arm/arm-tables.opt13
-rw-r--r--gcc/config/arm/arm.c303
-rw-r--r--gcc/config/arm/arm.h5
-rw-r--r--gcc/config/arm/arm.md159
-rw-r--r--gcc/config/arm/ldmstm.md72
-rw-r--r--gcc/config/arm/sync.md36
-rw-r--r--gcc/config/arm/thumb2.md26
-rw-r--r--gcc/config/arm/vfp.md56
-rw-r--r--gcc/config/arm/xgene1.md124
-rw-r--r--gcc/config/cr16/cr16.h2
-rw-r--r--gcc/config/darwin-c.c22
-rw-r--r--gcc/config/elfos.h8
-rw-r--r--gcc/config/i386/cpuid.h1
-rw-r--r--gcc/config/i386/driver-i386.c7
-rw-r--r--gcc/config/i386/gfniintrin.h189
-rw-r--r--gcc/config/i386/i386-builtin-types.def1
-rw-r--r--gcc/config/i386/i386-builtin.def28
-rw-r--r--gcc/config/i386/i386-c.c6
-rw-r--r--gcc/config/i386/i386.c217
-rw-r--r--gcc/config/i386/i386.h7
-rw-r--r--gcc/config/i386/i386.md33
-rw-r--r--gcc/config/i386/i386.opt12
-rw-r--r--gcc/config/i386/predicates.md3
-rw-r--r--gcc/config/i386/sse.md126
-rw-r--r--gcc/config/i386/x86-tune.def4
-rw-r--r--gcc/config/m68k/m68kelf.h2
-rw-r--r--gcc/config/mips/mips.h19
-rw-r--r--gcc/config/powerpcspe/aix43.h2
-rw-r--r--gcc/config/powerpcspe/aix51.h2
-rw-r--r--gcc/config/powerpcspe/aix52.h2
-rw-r--r--gcc/config/powerpcspe/aix53.h2
-rw-r--r--gcc/config/powerpcspe/aix61.h2
-rw-r--r--gcc/config/powerpcspe/aix71.h2
-rw-r--r--gcc/config/powerpcspe/xcoff.h2
-rw-r--r--gcc/config/riscv/linux.h11
-rw-r--r--gcc/config/riscv/riscv-protos.h3
-rw-r--r--gcc/config/riscv/riscv.c173
-rw-r--r--gcc/config/riscv/riscv.h43
-rw-r--r--gcc/config/riscv/riscv.md49
-rw-r--r--gcc/config/rs6000/aix43.h2
-rw-r--r--gcc/config/rs6000/aix51.h2
-rw-r--r--gcc/config/rs6000/aix52.h2
-rw-r--r--gcc/config/rs6000/aix53.h2
-rw-r--r--gcc/config/rs6000/aix61.h2
-rw-r--r--gcc/config/rs6000/aix71.h2
-rw-r--r--gcc/config/rs6000/altivec.h8
-rw-r--r--gcc/config/rs6000/altivec.md47
-rw-r--r--gcc/config/rs6000/power9.md8
-rw-r--r--gcc/config/rs6000/rs6000-builtin.def192
-rw-r--r--gcc/config/rs6000/rs6000-c.c339
-rw-r--r--gcc/config/rs6000/rs6000-protos.h2
-rw-r--r--gcc/config/rs6000/rs6000.c432
-rw-r--r--gcc/config/rs6000/rs6000.md198
-rw-r--r--gcc/config/rs6000/vsx.md469
-rw-r--r--gcc/config/rs6000/xcoff.h2
-rw-r--r--gcc/config/sh/sh-mem.cc8
-rw-r--r--gcc/config/sol2.h4
-rw-r--r--gcc/config/v850/v850.h2
-rwxr-xr-xgcc/configure6
-rw-r--r--gcc/configure.ac6
-rw-r--r--gcc/coverage.c10
-rw-r--r--gcc/cp/ChangeLog103
-rw-r--r--gcc/cp/Make-lang.in2
-rw-r--r--gcc/cp/call.c24
-rw-r--r--gcc/cp/class.c16
-rw-r--r--gcc/cp/constexpr.c74
-rw-r--r--gcc/cp/cp-gimplify.c29
-rw-r--r--gcc/cp/cp-objcp-common.c61
-rw-r--r--gcc/cp/cp-tree.h3
-rw-r--r--gcc/cp/cp-ubsan.c5
-rw-r--r--gcc/cp/decl.c29
-rw-r--r--gcc/cp/decl2.c9
-rw-r--r--gcc/cp/except.c2
-rw-r--r--gcc/cp/expr.c16
-rw-r--r--gcc/cp/init.c31
-rw-r--r--gcc/cp/lambda.c127
-rw-r--r--gcc/cp/parser.c7
-rw-r--r--gcc/cp/pt.c14
-rw-r--r--gcc/cp/rtti.c9
-rw-r--r--gcc/cp/semantics.c33
-rw-r--r--gcc/cp/tree.c4
-rw-r--r--gcc/cp/typeck.c78
-rw-r--r--gcc/cp/typeck2.c33
-rw-r--r--gcc/cselib.c39
-rw-r--r--gcc/debug.h3
-rw-r--r--gcc/defaults.h2
-rw-r--r--gcc/diagnostic.c65
-rw-r--r--gcc/doc/cpp.texi46
-rw-r--r--gcc/doc/extend.texi139
-rw-r--r--gcc/doc/gcov.texi331
-rw-r--r--gcc/doc/generic.texi27
-rw-r--r--gcc/doc/invoke.texi100
-rw-r--r--gcc/doc/md.texi246
-rw-r--r--gcc/doc/rtl.texi3
-rw-r--r--gcc/doc/sourcebuild.texi13
-rw-r--r--gcc/doc/tm.texi31
-rw-r--r--gcc/doc/tm.texi.in4
-rw-r--r--gcc/dumpfile.h1
-rw-r--r--gcc/dwarf2cfi.c18
-rw-r--r--gcc/dwarf2out.c60
-rw-r--r--gcc/early-remat.c22
-rw-r--r--gcc/emit-rtl.c26
-rw-r--r--gcc/emit-rtl.h1
-rw-r--r--gcc/explow.c14
-rw-r--r--gcc/expmed.c10
-rw-r--r--gcc/expmed.h2
-rw-r--r--gcc/expr.c9
-rw-r--r--gcc/final.c2
-rw-r--r--gcc/fold-const.c10
-rw-r--r--gcc/fortran/ChangeLog273
-rw-r--r--gcc/fortran/arith.c1
-rw-r--r--gcc/fortran/check.c25
-rw-r--r--gcc/fortran/convert.c22
-rw-r--r--gcc/fortran/decl.c43
-rw-r--r--gcc/fortran/expr.c42
-rw-r--r--gcc/fortran/frontend-passes.c214
-rw-r--r--gcc/fortran/gfortran.h5
-rw-r--r--gcc/fortran/interface.c9
-rw-r--r--gcc/fortran/intrinsic.c26
-rw-r--r--gcc/fortran/intrinsic.h4
-rw-r--r--gcc/fortran/invoke.texi32
-rw-r--r--gcc/fortran/iresolve.c60
-rw-r--r--gcc/fortran/lang.opt8
-rw-r--r--gcc/fortran/options.c5
-rw-r--r--gcc/fortran/parse.c12
-rw-r--r--gcc/fortran/resolve.c28
-rw-r--r--gcc/fortran/simplify.c5
-rw-r--r--gcc/fortran/trans-array.c161
-rw-r--r--gcc/fortran/trans-decl.c24
-rw-r--r--gcc/fortran/trans-expr.c96
-rw-r--r--gcc/fortran/trans-intrinsic.c289
-rw-r--r--gcc/fortran/trans-io.c12
-rw-r--r--gcc/fortran/trans-openmp.c26
-rw-r--r--gcc/fortran/trans-stmt.c63
-rw-r--r--gcc/fortran/trans-types.c12
-rw-r--r--gcc/fortran/trans-types.h14
-rw-r--r--gcc/fortran/trans.c58
-rw-r--r--gcc/gcov-dump.c13
-rw-r--r--gcc/gcov.c1169
-rw-r--r--gcc/genmodes.c34
-rw-r--r--gcc/gimple-fold.c153
-rw-r--r--gcc/gimple-iterator.c9
-rw-r--r--gcc/gimple-pretty-print.c2
-rw-r--r--gcc/gimple-ssa-evrp.c624
-rw-r--r--gcc/gimple-ssa-store-merging.c601
-rw-r--r--gcc/gimple-streamer-in.c7
-rw-r--r--gcc/gimple.c1
-rw-r--r--gcc/gimple.h17
-rw-r--r--gcc/gimplify.c4
-rw-r--r--gcc/ginclude/tgmath.h82
-rw-r--r--gcc/go/ChangeLog4
-rw-r--r--gcc/go/go-gcc.cc4
-rw-r--r--gcc/go/gofrontend/MERGE2
-rw-r--r--gcc/go/gofrontend/backend.h2
-rw-r--r--gcc/go/gofrontend/escape.cc6
-rw-r--r--gcc/go/gofrontend/expressions.cc45
-rw-r--r--gcc/go/gofrontend/expressions.h14
-rw-r--r--gcc/go/gofrontend/gogo.cc7
-rw-r--r--gcc/go/gofrontend/names.cc4
-rw-r--r--gcc/go/gofrontend/operator.h6
-rw-r--r--gcc/go/gofrontend/statements.cc97
-rw-r--r--gcc/go/gofrontend/types.cc6
-rw-r--r--gcc/go/gofrontend/wb.cc1
-rw-r--r--gcc/graphite-scop-detection.c2
-rw-r--r--gcc/hash-map-traits.h7
-rw-r--r--gcc/hash-map.h21
-rw-r--r--gcc/hash-table.h7
-rw-r--r--gcc/hash-traits.h9
-rw-r--r--gcc/hooks.c12
-rw-r--r--gcc/hooks.h1
-rw-r--r--gcc/internal-fn.c407
-rw-r--r--gcc/internal-fn.def48
-rw-r--r--gcc/internal-fn.h11
-rw-r--r--gcc/ipa-chkp.c2
-rw-r--r--gcc/ipa-cp.c28
-rw-r--r--gcc/ipa-devirt.c5
-rw-r--r--gcc/ipa-fnsummary.c26
-rw-r--r--gcc/ipa-fnsummary.h2
-rw-r--r--gcc/ipa-inline-analysis.c8
-rw-r--r--gcc/ipa-inline-transform.c54
-rw-r--r--gcc/ipa-inline.c208
-rw-r--r--gcc/ipa-inline.h3
-rw-r--r--gcc/ipa-param-manipulation.c766
-rw-r--r--gcc/ipa-param-manipulation.h120
-rw-r--r--gcc/ipa-profile.c12
-rw-r--r--gcc/ipa-prop.c726
-rw-r--r--gcc/ipa-prop.h94
-rw-r--r--gcc/ipa-split.c43
-rw-r--r--gcc/ipa-utils.c40
-rw-r--r--gcc/lto-cgraph.c18
-rw-r--r--gcc/lto-opts.c67
-rw-r--r--gcc/lto-streamer-in.c2
-rw-r--r--gcc/lto/ChangeLog4
-rw-r--r--gcc/lto/lto-partition.c4
-rw-r--r--gcc/machmode.def15
-rw-r--r--gcc/machmode.h16
-rw-r--r--gcc/match.pd94
-rw-r--r--gcc/objc/ChangeLog5
-rw-r--r--gcc/objc/objc-encoding.c2
-rw-r--r--gcc/omp-expand.c2
-rw-r--r--gcc/omp-simd-clone.c6
-rw-r--r--gcc/optabs-query.c12
-rw-r--r--gcc/optabs-tree.c30
-rw-r--r--gcc/optabs-tree.h1
-rw-r--r--gcc/optabs.c4
-rw-r--r--gcc/optabs.def26
-rw-r--r--gcc/passes.def1
-rw-r--r--gcc/poly-int-types.h12
-rw-r--r--gcc/poly-int.h236
-rw-r--r--gcc/predict.c93
-rw-r--r--gcc/predict.h2
-rw-r--r--gcc/prefix.c2
-rw-r--r--gcc/profile-count.c53
-rw-r--r--gcc/profile-count.h10
-rw-r--r--gcc/profile.c8
-rw-r--r--gcc/reg-stack.c5
-rw-r--r--gcc/sbitmap.h2
-rw-r--r--gcc/shrink-wrap.c18
-rw-r--r--gcc/simplify-rtx.c53
-rw-r--r--gcc/stor-layout.c3
-rw-r--r--gcc/target.def35
-rw-r--r--gcc/target.h11
-rw-r--r--gcc/targhooks.c8
-rw-r--r--gcc/targhooks.h1
-rw-r--r--gcc/testsuite/ChangeLog1755
-rw-r--r--gcc/testsuite/c-c++-common/Wimplicit-fallthrough-8.c12
-rw-r--r--gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess2.c15
-rw-r--r--gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess3.c132
-rw-r--r--gcc/testsuite/c-c++-common/Wstringop-overflow.c158
-rw-r--r--gcc/testsuite/c-c++-common/Wstringop-truncation.c449
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr63638.c2
-rw-r--r--gcc/testsuite/c-c++-common/attr-nonstring-1.c60
-rw-r--r--gcc/testsuite/c-c++-common/attr-nonstring-2.c123
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541-2.c4
-rwxr-xr-xgcc/testsuite/c-c++-common/cilk-plus/AN/pr57541.c9
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/CK/cilk_for_grain_errors.c2
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/CK/errors.c2
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/CK/pr60197.c2
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/CK/spawn_in_return.c2
-rw-r--r--gcc/testsuite/c-c++-common/cpp/pr58844-1.c4
-rw-r--r--gcc/testsuite/c-c++-common/cpp/pr58844-2.c4
-rw-r--r--gcc/testsuite/c-c++-common/cpp/va-opt-error.c28
-rw-r--r--gcc/testsuite/c-c++-common/cpp/va-opt-pedantic.c5
-rw-r--r--gcc/testsuite/c-c++-common/cpp/va-opt.c42
-rw-r--r--gcc/testsuite/c-c++-common/cpp/warning-zero-location.c2
-rw-r--r--gcc/testsuite/c-c++-common/dfp/call-by-value.c6
-rw-r--r--gcc/testsuite/c-c++-common/fold-masked-cmp-1.c4
-rw-r--r--gcc/testsuite/c-c++-common/fold-masked-cmp-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/goacc/parallel-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/gomp/sink-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/missing-symbol.c2
-rw-r--r--gcc/testsuite/c-c++-common/pr36513-2.c1
-rw-r--r--gcc/testsuite/c-c++-common/pr36513.c2
-rw-r--r--gcc/testsuite/c-c++-common/pr49706-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/pr65120.c4
-rw-r--r--gcc/testsuite/c-c++-common/tm/volatile-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/torture/aarch64-vect-lane-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/vector-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/vector-2.c2
-rw-r--r--gcc/testsuite/g++.dg/abi/abi-tag14.C8
-rw-r--r--gcc/testsuite/g++.dg/abi/abi-tag18.C2
-rw-r--r--gcc/testsuite/g++.dg/abi/abi-tag18a.C2
-rw-r--r--gcc/testsuite/g++.dg/abi/covariant2.C4
-rw-r--r--gcc/testsuite/g++.dg/abi/covariant3.C2
-rw-r--r--gcc/testsuite/g++.dg/abi/mangle7.C2
-rw-r--r--gcc/testsuite/g++.dg/asan/pr81340.C4
-rw-r--r--gcc/testsuite/g++.dg/asan/pr82792.C32
-rw-r--r--gcc/testsuite/g++.dg/bprob/g++-bprob-1.C2
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/AN/builtin_fn_mutating_tplt.cc2
-rw-r--r--gcc/testsuite/g++.dg/cilk-plus/CK/pr68997.cc2
-rw-r--r--gcc/testsuite/g++.dg/concepts/fn8.C2
-rw-r--r--gcc/testsuite/g++.dg/concepts/pr65575.C2
-rw-r--r--gcc/testsuite/g++.dg/concepts/template-parm11.C2
-rw-r--r--gcc/testsuite/g++.dg/conversion/op6.C6
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/Wunused-variable-1.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/access01.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/alignas3.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/auto2.C5
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-array17.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-defarg2.C6
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-memfn1.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-template11.C16
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/dc1.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/dc3.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/decltype12.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/decltype17.C3
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/decltype3.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/decltype41.C8
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/defaulted28.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/enum_base3.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/gen-attrs-4.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/initlist96.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-58566.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv10.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv12.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-defarg3.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice3.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice5.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nested2.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-switch.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template12.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template2.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-this12.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/nolinkage1.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/nolinkage1a.cc2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/nsdmi-template5.C6
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/parse1.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/pr34054.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/pr47416.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/pr58781.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/pr70538.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/pr81325.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/range-for13.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/range-for14.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/range-for6.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/rv-trivial-bug.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/rv2n.C8
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/rv3n.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/static_assert10.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/static_assert11.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/static_assert12.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/static_assert13.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/trailing1.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/trailing5.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/udlit-macros.C31
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/udlit-template.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic114.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic57.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic65.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic66.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic97.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic98.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn11.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn29.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn38.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/constexpr-return2.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/lambda-init7.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/pr63996.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/pr65202.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/pr66443-cxx14.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/pr79253.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/pr81574.C13
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/static_assert1.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/static_assert2.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/var-templ44.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/eval-order3.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/fold6.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/inline-var2.C1
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/lambda-this1.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/static_assert-nomsg.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/ptrmem1a.C24
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf-eh-personality-1.C2
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/dwarf4-typedef.C2
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/icf.C4
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/pr61433.C2
-rw-r--r--gcc/testsuite/g++.dg/debug/nullptr01.C1
-rw-r--r--gcc/testsuite/g++.dg/debug/pr16792.C2
-rw-r--r--gcc/testsuite/g++.dg/debug/pr46241.C2
-rw-r--r--gcc/testsuite/g++.dg/debug/pr46338.C1
-rw-r--r--gcc/testsuite/g++.dg/debug/pr47106.C6
-rw-r--r--gcc/testsuite/g++.dg/debug/pr71057.C2
-rw-r--r--gcc/testsuite/g++.dg/debug/pr71432.C13
-rw-r--r--gcc/testsuite/g++.dg/debug/pr80461.C2
-rw-r--r--gcc/testsuite/g++.dg/dfp/44473-1.C2
-rw-r--r--gcc/testsuite/g++.dg/dfp/44473-2.C4
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/pr77949.C2
-rw-r--r--gcc/testsuite/g++.dg/eh/builtin1.C4
-rw-r--r--gcc/testsuite/g++.dg/eh/builtin2.C4
-rw-r--r--gcc/testsuite/g++.dg/eh/builtin3.C2
-rw-r--r--gcc/testsuite/g++.dg/eh/pr45569.C2
-rw-r--r--gcc/testsuite/g++.dg/eh/sighandle.C1
-rw-r--r--gcc/testsuite/g++.dg/eh/unwind2.C2
-rw-r--r--gcc/testsuite/g++.dg/expr/bitfield11.C2
-rw-r--r--gcc/testsuite/g++.dg/expr/cond12.C8
-rw-r--r--gcc/testsuite/g++.dg/expr/static_cast7.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/altivec-14.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/asm13.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/builtin-object-size3.C4
-rw-r--r--gcc/testsuite/g++.dg/ext/has_nothrow_assign_odr.C5
-rw-r--r--gcc/testsuite/g++.dg/ext/label7.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/label8.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/pr57735.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/pr81706.C4
-rw-r--r--gcc/testsuite/g++.dg/ext/tmplattr7.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/vector14.C8
-rw-r--r--gcc/testsuite/g++.dg/ext/vector8.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/visibility/anon1.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/visibility/anon2.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/visibility/namespace1.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/vla16.C2
-rw-r--r--gcc/testsuite/g++.dg/gcov/gcov-1.C2
-rw-r--r--gcc/testsuite/g++.dg/gcov/gcov-threads-1.C2
-rw-r--r--gcc/testsuite/g++.dg/goacc/reference.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/macro-4.C8
-rw-r--r--gcc/testsuite/g++.dg/gomp/pr37189.C2
-rw-r--r--gcc/testsuite/g++.dg/gomp/pr39495-1.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/pr39495-2.C2
-rw-r--r--gcc/testsuite/g++.dg/gomp/pr82054.C2
-rw-r--r--gcc/testsuite/g++.dg/graphite/pr41305.C2
-rw-r--r--gcc/testsuite/g++.dg/graphite/pr42930.C2
-rw-r--r--gcc/testsuite/g++.dg/inherit/covariant10.C4
-rw-r--r--gcc/testsuite/g++.dg/inherit/covariant11.C8
-rw-r--r--gcc/testsuite/g++.dg/inherit/protected1.C2
-rw-r--r--gcc/testsuite/g++.dg/init/inline1.C2
-rw-r--r--gcc/testsuite/g++.dg/init/new18.C1
-rw-r--r--gcc/testsuite/g++.dg/init/pr35878_1.C2
-rw-r--r--gcc/testsuite/g++.dg/init/pr35878_4.C23
-rw-r--r--gcc/testsuite/g++.dg/init/reference2.C2
-rw-r--r--gcc/testsuite/g++.dg/init/reference3.C1
-rw-r--r--gcc/testsuite/g++.dg/init/switch1.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-10.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-13.C3
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-14.C3
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-15.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-16.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-17.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-18.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-19.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-21.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-23.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-34.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-38.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-40.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-41.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-42.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-44.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-45.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-48.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-52.C1
-rw-r--r--gcc/testsuite/g++.dg/ipa/nothrow-1.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr43812.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr44372.C1
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr45572-1.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr58371.C1
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr59176.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr60640-1.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr61540.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr63470.C1
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr63587-1.C3
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr63587-2.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr63838.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr63894.C1
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr64068.C6
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr64896.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr65002.C4
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr65008.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr65465.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr66896.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr68851.C6
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr78211.C3
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr79931.C1
-rw-r--r--gcc/testsuite/g++.dg/ipa/pure-const-1.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pure-const-2.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/pure-const-3.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/remref-1.C2
-rw-r--r--gcc/testsuite/g++.dg/ipa/remref-2.C2
-rw-r--r--gcc/testsuite/g++.dg/lookup/builtin2.C2
-rw-r--r--gcc/testsuite/g++.dg/lookup/crash3.C6
-rw-r--r--gcc/testsuite/g++.dg/lookup/friend20.C1
-rw-r--r--gcc/testsuite/g++.dg/lookup/pr80891-5.C1
-rw-r--r--gcc/testsuite/g++.dg/lookup/struct2.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20080709_0.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20080907_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20080915_0.C4
-rw-r--r--gcc/testsuite/g++.dg/lto/20080916_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20081022_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20081023_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20081118_0.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20081118_1.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20081120-1_0.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20081120-1_1.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20081127_1.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20081217-2_0.C3
-rw-r--r--gcc/testsuite/g++.dg/lto/20090303_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20090311-1_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20090312_0.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20090315_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20091002-1_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20091002-2_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20091002-3_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20091004-1_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20091004-2_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20091004-3_1.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20100721-1_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/20101010-1_0.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/20101010-2_0.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/pr45679-1_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/pr45679-1_1.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/pr45679-2_0.C3
-rw-r--r--gcc/testsuite/g++.dg/lto/pr48042_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/pr51650-1_0.C3
-rw-r--r--gcc/testsuite/g++.dg/lto/pr51650-3_0.C3
-rw-r--r--gcc/testsuite/g++.dg/lto/pr63270_1.C1
-rw-r--r--gcc/testsuite/g++.dg/lto/pr65193_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/pr65302_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/pr65316_0.C3
-rw-r--r--gcc/testsuite/g++.dg/lto/pr65475c_0.C3
-rw-r--r--gcc/testsuite/g++.dg/lto/pr65549_0.C6
-rw-r--r--gcc/testsuite/g++.dg/lto/pr69077_0.C2
-rw-r--r--gcc/testsuite/g++.dg/lto/pr69589_0.C2
-rw-r--r--gcc/testsuite/g++.dg/missing-return.C8
-rw-r--r--gcc/testsuite/g++.dg/opt/20050511-1.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/combine.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/complex3.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/const3.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/covariant1.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/declone3.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/dump1.C3
-rw-r--r--gcc/testsuite/g++.dg/opt/inline15.C3
-rw-r--r--gcc/testsuite/g++.dg/opt/local1.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/memcpy1.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/new1.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/nrv8.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/pr23299.C8
-rw-r--r--gcc/testsuite/g++.dg/opt/pr27826.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/pr44919.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/pr46640.C3
-rw-r--r--gcc/testsuite/g++.dg/opt/pr47615.C5
-rw-r--r--gcc/testsuite/g++.dg/opt/pr55329.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/pr61456.C1
-rw-r--r--gcc/testsuite/g++.dg/opt/pr65003.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/pr65554.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/pr69432.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/pr78373.C2
-rw-r--r--gcc/testsuite/g++.dg/opt/pr79267.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/pr82159-2.C1
-rw-r--r--gcc/testsuite/g++.dg/opt/pr82929.C30
-rw-r--r--gcc/testsuite/g++.dg/other/array3.C1
-rw-r--r--gcc/testsuite/g++.dg/other/copy2.C10
-rw-r--r--gcc/testsuite/g++.dg/other/crash-5.C2
-rw-r--r--gcc/testsuite/g++.dg/other/crash-8.C2
-rw-r--r--gcc/testsuite/g++.dg/other/error34.C2
-rw-r--r--gcc/testsuite/g++.dg/other/i386-8.C2
-rw-r--r--gcc/testsuite/g++.dg/other/pr22003.C1
-rw-r--r--gcc/testsuite/g++.dg/other/pr24623.C2
-rw-r--r--gcc/testsuite/g++.dg/other/pr29610.C5
-rw-r--r--gcc/testsuite/g++.dg/other/pr42645-1.C2
-rw-r--r--gcc/testsuite/g++.dg/other/pr42645-2.C3
-rw-r--r--gcc/testsuite/g++.dg/other/pr52048.C1
-rw-r--r--gcc/testsuite/g++.dg/other/typedef3.C2
-rw-r--r--gcc/testsuite/g++.dg/overload/addr1.C2
-rw-r--r--gcc/testsuite/g++.dg/overload/defarg4.C5
-rw-r--r--gcc/testsuite/g++.dg/overload/operator5.C2
-rw-r--r--gcc/testsuite/g++.dg/overload/ref-conv1.C1
-rw-r--r--gcc/testsuite/g++.dg/overload/template5.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/crash40.C4
-rw-r--r--gcc/testsuite/g++.dg/parse/crash61.C1
-rw-r--r--gcc/testsuite/g++.dg/parse/crash67.C1
-rw-r--r--gcc/testsuite/g++.dg/parse/ctor5.C1
-rw-r--r--gcc/testsuite/g++.dg/parse/defarg4.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/defarg6.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/error5.C12
-rw-r--r--gcc/testsuite/g++.dg/parse/expr2.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/friend7.C1
-rw-r--r--gcc/testsuite/g++.dg/parse/namespace1.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/namespace9.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/ret-type2.C2
-rw-r--r--gcc/testsuite/g++.dg/parse/typedef8.C2
-rw-r--r--gcc/testsuite/g++.dg/pch/static-1.C2
-rw-r--r--gcc/testsuite/g++.dg/plugin/diagnostic-test-expressions-1.C10
-rw-r--r--gcc/testsuite/g++.dg/plugin/dumb-plugin-test-1.C2
-rw-r--r--gcc/testsuite/g++.dg/plugin/self-assign-test-1.C2
-rw-r--r--gcc/testsuite/g++.dg/plugin/self-assign-test-2.C2
-rw-r--r--gcc/testsuite/g++.dg/plugin/self-assign-test-3.C2
-rw-r--r--gcc/testsuite/g++.dg/pr45788.C2
-rw-r--r--gcc/testsuite/g++.dg/pr48484.C3
-rw-r--r--gcc/testsuite/g++.dg/pr50763-3.C2
-rw-r--r--gcc/testsuite/g++.dg/pr55513.C2
-rw-r--r--gcc/testsuite/g++.dg/pr55604.C2
-rw-r--r--gcc/testsuite/g++.dg/pr57662.C1
-rw-r--r--gcc/testsuite/g++.dg/pr57878.C2
-rw-r--r--gcc/testsuite/g++.dg/pr58389.C2
-rw-r--r--gcc/testsuite/g++.dg/pr59510.C1
-rw-r--r--gcc/testsuite/g++.dg/pr64688.C5
-rw-r--r--gcc/testsuite/g++.dg/pr65032.C2
-rw-r--r--gcc/testsuite/g++.dg/pr67989.C1
-rw-r--r--gcc/testsuite/g++.dg/pr70590-2.C4
-rw-r--r--gcc/testsuite/g++.dg/pr70590.C4
-rw-r--r--gcc/testsuite/g++.dg/pr70965.C2
-rw-r--r--gcc/testsuite/g++.dg/pr71633.C1
-rw-r--r--gcc/testsuite/g++.dg/pr77550.C2
-rw-r--r--gcc/testsuite/g++.dg/pr80287.C2
-rw-r--r--gcc/testsuite/g++.dg/pr80707.C2
-rw-r--r--gcc/testsuite/g++.dg/pr81194.C1
-rw-r--r--gcc/testsuite/g++.dg/spellcheck-identifiers.C2
-rw-r--r--gcc/testsuite/g++.dg/stackprotectexplicit2.C3
-rw-r--r--gcc/testsuite/g++.dg/tc1/dr152.C2
-rw-r--r--gcc/testsuite/g++.dg/template/aggr-init1.C2
-rw-r--r--gcc/testsuite/g++.dg/template/anon1.C4
-rw-r--r--gcc/testsuite/g++.dg/template/array29.C1
-rw-r--r--gcc/testsuite/g++.dg/template/array7.C1
-rw-r--r--gcc/testsuite/g++.dg/template/canon-type-8.C1
-rw-r--r--gcc/testsuite/g++.dg/template/cast5.C8
-rw-r--r--gcc/testsuite/g++.dg/template/conv1.C4
-rw-r--r--gcc/testsuite/g++.dg/template/crash107.C1
-rw-r--r--gcc/testsuite/g++.dg/template/crash23.C2
-rw-r--r--gcc/testsuite/g++.dg/template/crash8.C2
-rw-r--r--gcc/testsuite/g++.dg/template/defarg4.C2
-rw-r--r--gcc/testsuite/g++.dg/template/dependent-expr9.C2
-rw-r--r--gcc/testsuite/g++.dg/template/error10.C1
-rw-r--r--gcc/testsuite/g++.dg/template/friend32.C1
-rw-r--r--gcc/testsuite/g++.dg/template/init6.C2
-rw-r--r--gcc/testsuite/g++.dg/template/memfriend7.C2
-rw-r--r--gcc/testsuite/g++.dg/template/new10.C1
-rw-r--r--gcc/testsuite/g++.dg/template/nontype12.C2
-rw-r--r--gcc/testsuite/g++.dg/template/overload12.C2
-rw-r--r--gcc/testsuite/g++.dg/template/overload5.C1
-rw-r--r--gcc/testsuite/g++.dg/template/overload8.C2
-rw-r--r--gcc/testsuite/g++.dg/template/partial10.C3
-rw-r--r--gcc/testsuite/g++.dg/template/partial9.C2
-rw-r--r--gcc/testsuite/g++.dg/template/qual1.C2
-rw-r--r--gcc/testsuite/g++.dg/template/show-template-tree-3.C1
-rw-r--r--gcc/testsuite/g++.dg/template/sizeof8.C2
-rw-r--r--gcc/testsuite/g++.dg/template/sizeof9.C2
-rw-r--r--gcc/testsuite/g++.dg/template/spec6.C2
-rw-r--r--gcc/testsuite/g++.dg/template/spec7.C2
-rw-r--r--gcc/testsuite/g++.dg/template/typedef8.C1
-rw-r--r--gcc/testsuite/g++.dg/template/using20.C1
-rw-r--r--gcc/testsuite/g++.dg/template/vla1.C1
-rw-r--r--gcc/testsuite/g++.dg/tls/thread_local3.C1
-rw-r--r--gcc/testsuite/g++.dg/tls/thread_local3g.C1
-rw-r--r--gcc/testsuite/g++.dg/tls/thread_local5.C1
-rw-r--r--gcc/testsuite/g++.dg/tls/thread_local5g.C1
-rw-r--r--gcc/testsuite/g++.dg/tls/thread_local6.C1
-rw-r--r--gcc/testsuite/g++.dg/tls/thread_local6g.C1
-rw-r--r--gcc/testsuite/g++.dg/tm/cgraph_edge.C1
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46646.C1
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47554.C2
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47573.C1
-rw-r--r--gcc/testsuite/g++.dg/tm/unsafe1.C2
-rw-r--r--gcc/testsuite/g++.dg/tm/unsafe2.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/20070621-1.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/20090329-1.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/20141013.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess1.C15
-rw-r--r--gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess2.C11
-rw-r--r--gcc/testsuite/g++.dg/torture/pr33134.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr33340.C4
-rw-r--r--gcc/testsuite/g++.dg/torture/pr33627.C6
-rw-r--r--gcc/testsuite/g++.dg/torture/pr34222.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr34241.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr34641.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr34850.C3
-rw-r--r--gcc/testsuite/g++.dg/torture/pr35164-1.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr36745.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr38705.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr38811.C4
-rw-r--r--gcc/testsuite/g++.dg/torture/pr39362.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr39732.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr40991.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr41775.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr42183.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr42450.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr42704.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr42760.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr42773.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr42883.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr43905.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr44148.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr44295.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr44357.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr44813.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr45580.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr45874.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr45877.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr46383.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr46469.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr47313.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr48271.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr48695.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr49615.C3
-rw-r--r--gcc/testsuite/g++.dg/torture/pr49770.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr49938.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr51436.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr51482.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr51737.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr51959.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr52772.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr52918-2.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr53011.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr53602.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr53752.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr54838.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr54902.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr56029.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr56768.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr57107.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr57140.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr57235.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr58252.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr58555.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr59208.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr60438-1.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr60746.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr61554.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr63419.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr63476.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr63512.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64282.C4
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64378.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64565.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64568-2.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64669.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64686.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64978.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr64995.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr65655.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr65851.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr67055.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr67191.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr68852.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr69264.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr70971.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr77674.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr77947.C3
-rw-r--r--gcc/testsuite/g++.dg/torture/pr78268.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr78507.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr78692.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr80171.C1
-rw-r--r--gcc/testsuite/g++.dg/torture/pr82154.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/pr82902.C21
-rw-r--r--gcc/testsuite/g++.dg/torture/pr82985.C458
-rw-r--r--gcc/testsuite/g++.dg/tree-prof/pr79259.C2
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/copyprop.C2
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr22444.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr23948.C2
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr24172.C2
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr24351-3.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr27283.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr27291.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr27548.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr31146-2.C7
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr33604.C2
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr34355.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr41428.C7
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr42337.C1
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr81408.C12
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pred-1.C4
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr65019.C2
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr65583.C1
-rw-r--r--gcc/testsuite/g++.dg/ubsan/vptr-12.C22
-rw-r--r--gcc/testsuite/g++.dg/vect/pr60836.cc2
-rw-r--r--gcc/testsuite/g++.dg/vect/pr68145.cc1
-rw-r--r--gcc/testsuite/g++.dg/vect/pr70729-nest.cc2
-rw-r--r--gcc/testsuite/g++.dg/vect/pr70729.cc2
-rw-r--r--gcc/testsuite/g++.dg/warn/Waddress-3.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wconversion-null-2.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wnull-conversion-2.C3
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-10.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-11.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-12.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-25.C4
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-6.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-7.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-8.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-9.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wshadow-5.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wtype-limits-Wextra.C4
-rw-r--r--gcc/testsuite/g++.dg/warn/Wtype-limits-no.C4
-rw-r--r--gcc/testsuite/g++.dg/warn/Wtype-limits.C4
-rw-r--r--gcc/testsuite/g++.dg/warn/Wunused-local-typedefs.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-5.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/pmf1.C1
-rw-r--r--gcc/testsuite/g++.dg/warn/string1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.benjamin/p13417.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.brendan/asm-extn1.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.brendan/crash24.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.ext/constructor.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.ext/namedret1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.ext/namedret3.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.ext/return1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/anon4.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/enum6.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/lineno2.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/lineno3.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/lineno4.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/new2.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/new4.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/shadow1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/tempcons.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.jason/thunk2.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/builtin1.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/enum9.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/except3.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/init6.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/profile1.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/shadow2.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/temps4.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.law/weak.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/bool2.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh1.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh10.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh13.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh16.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh17.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh2.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh23.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh24.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh25.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh26.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh27.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh28.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh29.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh30.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh31.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh35.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh36.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh37.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh38.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh39.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh40.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh47.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh50.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh51.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh7.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh8.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/eh9.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/mangle1.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/p5958.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/p6004.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/p700.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/p7912.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/p811.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.mike/virt4.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.oliva/nameret1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.oliva/nameret2.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.other/decl1.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.other/expr1.C3
-rw-r--r--gcc/testsuite/g++.old-deja/g++.other/inline8.C4
-rw-r--r--gcc/testsuite/g++.old-deja/g++.other/loop1.C1
-rw-r--r--gcc/testsuite/g++.old-deja/g++.other/syntax1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.pt/repo3.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.robertl/eb27.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.robertl/eb83.C2
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr82838.c12
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr82879.c11
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr82913.c23
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr23135.c3
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr82954.c22
-rw-r--r--gcc/testsuite/gcc.dg/Walloca-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wunknownprag.c8
-rw-r--r--gcc/testsuite/gcc.dg/builtin-redefine.c18
-rw-r--r--gcc/testsuite/gcc.dg/builtin-stpncpy.c9
-rw-r--r--gcc/testsuite/gcc.dg/builtin-tgmath-1.c322
-rw-r--r--gcc/testsuite/gcc.dg/builtin-tgmath-2.c51
-rw-r--r--gcc/testsuite/gcc.dg/builtin-tgmath-err-1.c76
-rw-r--r--gcc/testsuite/gcc.dg/builtin-tgmath-err-2.c19
-rw-r--r--gcc/testsuite/gcc.dg/cpp/Wunknown-pragmas-1.c18
-rw-r--r--gcc/testsuite/gcc.dg/cpp/Wunused.c6
-rw-r--r--gcc/testsuite/gcc.dg/cpp/macsyntx.c6
-rw-r--r--gcc/testsuite/gcc.dg/cpp/misspelled-directive-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/redef2.c20
-rw-r--r--gcc/testsuite/gcc.dg/cpp/redef3.c14
-rw-r--r--gcc/testsuite/gcc.dg/cpp/redef4.c520
-rw-r--r--gcc/testsuite/gcc.dg/cpp/sysmac1.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/Wunused.c6
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/argcount.c24
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/comment-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/comment.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/defined.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/directive.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/funlike-3.c4
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/funlike.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/literals-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/macro.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/pr65238-4.c12
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/recurse-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/recurse-2.c6
-rw-r--r--gcc/testsuite/gcc.dg/cpp/trad/redef2.c36
-rw-r--r--gcc/testsuite/gcc.dg/cpp/ucnid-11.c12
-rw-r--r--gcc/testsuite/gcc.dg/cpp/unc1.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/unc2.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/unc3.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/unc4.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/undef2.c10
-rw-r--r--gcc/testsuite/gcc.dg/cpp/warn-redefined-2.c10
-rw-r--r--gcc/testsuite/gcc.dg/cpp/warn-redefined.c10
-rw-r--r--gcc/testsuite/gcc.dg/cpp/warn-unused-macros-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/cpp/warn-unused-macros.c2
-rw-r--r--gcc/testsuite/gcc.dg/debug/dwarf2/pr82837.c29
-rw-r--r--gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp-err.c33
-rw-r--r--gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp.c263
-rw-r--r--gcc/testsuite/gcc.dg/div_neg.c10
-rw-r--r--gcc/testsuite/gcc.dg/empty-source-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/empty-source-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/gomp/macro-4.c8
-rw-r--r--gcc/testsuite/gcc.dg/noncompile/pr35447-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr20245-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr28419.c1
-rw-r--r--gcc/testsuite/gcc.dg/pr44545.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr80131-1.c31
-rw-r--r--gcc/testsuite/gcc.dg/pr82788.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr82863.c12
-rw-r--r--gcc/testsuite/gcc.dg/pr82916.c47
-rw-r--r--gcc/testsuite/gcc.dg/pr82929.c18
-rw-r--r--gcc/testsuite/gcc.dg/rtl/truncated-rtl-file.c2
-rw-r--r--gcc/testsuite/gcc.dg/store_merging_13.c141
-rw-r--r--gcc/testsuite/gcc.dg/store_merging_14.c62
-rw-r--r--gcc/testsuite/gcc.dg/store_merging_15.c56
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-33g.c1
-rw-r--r--gcc/testsuite/gcc.dg/strncpy-fix-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/torture/Wsizeof-pointer-memaccess1.c15
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr60092.c1
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr63554.c5
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr78305.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/bitops-1.c72
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/fnsplit-1.c1
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/fnsplit-2.c33
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/negminus.c21
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr82726.c26
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp101.c2
-rw-r--r--gcc/testsuite/gcc.dg/unclosed-init.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/no-fast-math-vect16.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr25413a.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr45752.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-10.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-12.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-13.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-14.c5
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-3.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-5.c10
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-6.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-9.c10
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr79920.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-13-big-array.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-13.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-16.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-19c.c6
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-23.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-35.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-37.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-perm-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-71.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-6.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-or_1.c10
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-or_2.c10
-rw-r--r--gcc/testsuite/gcc.misc-tests/gcov-3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/bsl-idiom.c88
-rw-r--r--gcc/testsuite/gcc.target/aarch64/construct_lane_zero_1.c37
-rw-r--r--gcc/testsuite/gcc.target/aarch64/copysign-bsl.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/dwarf-cfa-reg.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/load_v2vec_lanes_1.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/store_v2vec_lanes.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_cap_4.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_1_run.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_2.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_2_run.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_3.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_3_run.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_4.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_4_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_5.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_5_run.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_6.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_6_run.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_7.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_clastb_7_run.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_const_pred_1.C14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_const_pred_2.C10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_const_pred_3.C8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_const_pred_4.C8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_dup_lane_1.c64
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_ext_1.c64
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_ext_2.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_extract_1.c80
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_extract_2.c80
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_extract_3.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fdiv_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fmad_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fmla_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fmls_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fmsb_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fnmad_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fnmla_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fnmls_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_fnmsb_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_1.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_10.c72
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_11.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_2.c72
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_3.c63
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_3_run.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_4.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_4_run.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_5.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_5_run.c161
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_6.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_7.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_8.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_gather_load_9.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_index_offset_1.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_index_offset_1_run.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1.c49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1_run.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_ld1r_1.C56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_ld1r_1_run.C64
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_ld1r_2.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_ld1r_2_run.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_live_1.c52
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_live_1_run.c52
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_live_2.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_live_2_run.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_2.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_3.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_load_scalar_offset_1.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_loop_add_4_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mad_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1_run.c72
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2.c69
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2_run.c98
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3_run.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4_run.c37
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5.c156
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5_run.c177
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_6.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_7.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1.c173
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1_run.c186
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_2.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1_run.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2_run.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3_run.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_4.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_5.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_6.c5
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_7.c5
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_8.c5
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1_run.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2_run.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3_run.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_4.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mla_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mls_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_mov_rr_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_msb_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_nopeel_1.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1_run.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2_run.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3_run.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4_run.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.C48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.C47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.C48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.C59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_3.c (renamed from gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_3.C)23
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_rev_1.c49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_revb_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_revh_1.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_revw_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1.c134
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1_run.c155
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_2.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_3.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_4.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_5.c23
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_6.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_scatter_store_7.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_1.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_10.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_10_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_11.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_11_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_12.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_12_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_13.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_13_run.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_1_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_2.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_2_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_3.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_3_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_4.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_4_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_5.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_5_run.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_6.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_6_run.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_7.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_7_run.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_8.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_8_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_9.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_slp_9_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_speculative_3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_speculative_6.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_store_scalar_offset_1.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_1.c40
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_2.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_3.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_4.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_5.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_6.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_7.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_load_8.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_1.c40
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_2.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_3.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_4.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_5.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_6.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_strided_store_7.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_move_1.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_move_2.c84
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_move_3.c87
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_move_4.c116
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_move_5.c111
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_move_6.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11_run.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12_run.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13.c75
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13_run.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_14.c58
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_15.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_16.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17.c69
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17_run.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18_run.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19_run.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1_run.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20_run.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21_run.c49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22_run.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23_run.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7_run.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_trn1_1.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1_run.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1_run.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_uzp1_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_uzp1_1_run.c86
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_uzp2_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_uzp2_1_run.c86
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_var_stride_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_var_stride_4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vcond_1.C24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1.c60
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1_run.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_init_2.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_overrange_run.c176
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_run.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3_run.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4_run.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_overrun.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_run.c88
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1_run.c88
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1_run.c86
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_2.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_3.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_4.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_2.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_3.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve_zip1_1.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vector_initialization_nostack.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c41
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c37
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c46
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c38
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c40
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c43
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c96
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c55
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c68
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-4.x40
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-5.x36
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-6.x45
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-7.x36
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-8.x39
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-9.x42
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-and-union.x (renamed from gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c)19
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-13.x7
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-5.x7
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-7.x7
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-8.x7
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c41
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c37
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c46
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c38
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c40
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c43
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union.c20
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c11
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c13
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c11
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c13
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c13
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c13
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c55
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c68
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/union-1.x54
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/union-2.x67
-rw-r--r--gcc/testsuite/gcc.target/arm/copysign_softfloat_1.c1
-rw-r--r--gcc/testsuite/gcc.target/arm/lp1189445.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/pr54300.C2
-rw-r--r--gcc/testsuite/gcc.target/arm/pr67989.C3
-rw-r--r--gcc/testsuite/gcc.target/i386/avx-1.c10
-rw-r--r--gcc/testsuite/gcc.target/i386/avx-2.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/avx512dq-pr82855.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/avx512f-gf2p8affineqb-2.c74
-rw-r--r--gcc/testsuite/gcc.target/i386/avx512f-gf2p8mulb-2.c76
-rw-r--r--gcc/testsuite/gcc.target/i386/avx512vl-gf2p8affineqb-2.c17
-rw-r--r--gcc/testsuite/gcc.target/i386/avx512vl-gf2p8mulb-2.c17
-rw-r--r--gcc/testsuite/gcc.target/i386/force-indirect-call-1.c23
-rw-r--r--gcc/testsuite/gcc.target/i386/force-indirect-call-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/force-indirect-call-3.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/gfni-1.c12
-rw-r--r--gcc/testsuite/gcc.target/i386/gfni-2.c24
-rw-r--r--gcc/testsuite/gcc.target/i386/gfni-3.c8
-rw-r--r--gcc/testsuite/gcc.target/i386/gfni-4.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr80425-3.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr81706.c4
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82002-2a.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82002-2b.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82941-1.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82941-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82942-1.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82942-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-1.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-3.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-4.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-5.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-6.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82990-7.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-13.c7
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-14.c3
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-23.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/stack-check-12.c2
-rw-r--r--gcc/testsuite/gcc.target/mips/pr82981.c13
-rw-r--r--gcc/testsuite/gcc.target/powerpc/builtin-vec-sums-be-int.c16
-rw-r--r--gcc/testsuite/gcc.target/powerpc/builtins-3-p9.c13
-rw-r--r--gcc/testsuite/gcc.target/powerpc/builtins-6-p9-runnable.c1046
-rw-r--r--gcc/testsuite/gcc.target/powerpc/builtins-revb-runnable.c342
-rw-r--r--gcc/testsuite/gcc.target/powerpc/float128-hw4.c135
-rw-r--r--gcc/testsuite/gcc.target/powerpc/float128-minmax.c15
-rw-r--r--gcc/testsuite/gcc.target/powerpc/p9-xxbr-1.c11
-rw-r--r--gcc/testsuite/gcc.target/powerpc/p9-xxbr-3.c99
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr82748-1.c82
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr82748-2.c46
-rw-r--r--gcc/testsuite/gcc.target/powerpc/sad-vectorize-1.c36
-rw-r--r--gcc/testsuite/gcc.target/powerpc/sad-vectorize-2.c36
-rw-r--r--gcc/testsuite/gcc.target/powerpc/sad-vectorize-3.c57
-rw-r--r--gcc/testsuite/gcc.target/powerpc/sad-vectorize-4.c57
-rw-r--r--gcc/testsuite/gcc.target/powerpc/swaps-p8-26.c6
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vec-cmp-sel.c5
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-0.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-1.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-2.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-3.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-4.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-5.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-6.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/vsu/vec-cnttz-lsbb-2.c2
-rw-r--r--gcc/testsuite/gfortran.dg/alloc_comp_basics_1.f902
-rw-r--r--gcc/testsuite/gfortran.dg/allocatable_scalar_9.f9016
-rw-r--r--gcc/testsuite/gfortran.dg/allocate_assumed_charlen_1.f9028
-rw-r--r--gcc/testsuite/gfortran.dg/auto_dealloc_1.f904
-rw-r--r--gcc/testsuite/gfortran.dg/class_65.f9041
-rw-r--r--gcc/testsuite/gfortran.dg/class_66.f9028
-rw-r--r--gcc/testsuite/gfortran.dg/coarray/send_char_array_1.f9054
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_lib_realloc_1.f908
-rw-r--r--gcc/testsuite/gfortran.dg/dec_structure_23.f9019
-rw-r--r--gcc/testsuite/gfortran.dg/finalize_28.f902
-rw-r--r--gcc/testsuite/gfortran.dg/hollerith_character_array_constructor.f9011
-rw-r--r--gcc/testsuite/gfortran.dg/init_flag_16.f0325
-rw-r--r--gcc/testsuite/gfortran.dg/interface_40.f908
-rw-r--r--gcc/testsuite/gfortran.dg/logical_temp_io.f9013
-rw-r--r--gcc/testsuite/gfortran.dg/logical_temp_io_kind8.f9014
-rw-r--r--gcc/testsuite/gfortran.dg/loop_interchange_1.f9022
-rw-r--r--gcc/testsuite/gfortran.dg/minmaxloc_8.f9048
-rw-r--r--gcc/testsuite/gfortran.dg/move_alloc_15.f904
-rw-r--r--gcc/testsuite/gfortran.dg/pr69739.f9039
-rw-r--r--gcc/testsuite/gfortran.dg/pr70330.f907
-rw-r--r--gcc/testsuite/gfortran.dg/pr78240.f9015
-rw-r--r--gcc/testsuite/gfortran.dg/pr78619.f9021
-rw-r--r--gcc/testsuite/gfortran.dg/transfer_simplify_11.f908
-rw-r--r--gcc/testsuite/gfortran.dg/typebound_call_29.f9046
-rw-r--r--gcc/testsuite/gfortran.dg/typebound_proc_27.f038
-rw-r--r--gcc/testsuite/gfortran.dg/vect/vect-8.f904
-rw-r--r--gcc/testsuite/gnat.dg/controlled2.adb3
-rw-r--r--gcc/testsuite/gnat.dg/controlled4.adb3
-rw-r--r--gcc/testsuite/gnat.dg/delta_aggr.adb51
-rw-r--r--gcc/testsuite/gnat.dg/elab3.adb9
-rw-r--r--gcc/testsuite/gnat.dg/elab3.ads3
-rw-r--r--gcc/testsuite/gnat.dg/elab3_pkg.adb11
-rw-r--r--gcc/testsuite/gnat.dg/elab3_pkg.ads7
-rw-r--r--gcc/testsuite/gnat.dg/finalized.adb1
-rw-r--r--gcc/testsuite/gnat.dg/gcov/check.adb27
-rw-r--r--gcc/testsuite/gnat.dg/gcov/gcov.exp44
-rw-r--r--gcc/testsuite/gnat.dg/opt69.adb28
-rw-r--r--gcc/testsuite/gnat.dg/out_param.adb21
-rw-r--r--gcc/testsuite/gnat.dg/overriding_ops2.adb8
-rw-r--r--gcc/testsuite/gnat.dg/overriding_ops2.ads12
-rw-r--r--gcc/testsuite/gnat.dg/overriding_ops2_pkg-high.ads5
-rw-r--r--gcc/testsuite/gnat.dg/overriding_ops2_pkg.ads9
-rw-r--r--gcc/testsuite/gnat.dg/unreferenced.adb11
-rw-r--r--gcc/testsuite/gnat.dg/vect18.adb2
-rw-r--r--gcc/testsuite/lib/gcc-dg.exp17
-rw-r--r--gcc/testsuite/lib/scanasm.exp44
-rw-r--r--gcc/testsuite/lib/scandump.exp20
-rw-r--r--gcc/testsuite/lib/target-supports.exp74
-rw-r--r--gcc/testsuite/obj-c++.dg/comp-types-8.mm1
-rw-r--r--gcc/testsuite/obj-c++.dg/demangle-3.mm1
-rw-r--r--gcc/testsuite/obj-c++.dg/super-class-1.mm1
-rw-r--r--gcc/toplev.c9
-rw-r--r--gcc/tracer.c2
-rw-r--r--gcc/trans-mem.c8
-rw-r--r--gcc/tree-cfg.c21
-rw-r--r--gcc/tree-cfgcleanup.c89
-rw-r--r--gcc/tree-chkp.c8
-rw-r--r--gcc/tree-data-ref.c6
-rw-r--r--gcc/tree-data-ref.h5
-rw-r--r--gcc/tree-emutls.c2
-rw-r--r--gcc/tree-inline.c108
-rw-r--r--gcc/tree-parloops.c2
-rw-r--r--gcc/tree-pass.h1
-rw-r--r--gcc/tree-predcom.c364
-rw-r--r--gcc/tree-pretty-print.c18
-rw-r--r--gcc/tree-sra.c1
-rw-r--r--gcc/tree-ssa-alias.c8
-rw-r--r--gcc/tree-ssa-coalesce.c3
-rw-r--r--gcc/tree-ssa-loop-im.c2
-rw-r--r--gcc/tree-ssa-loop-ivopts.c16
-rw-r--r--gcc/tree-ssa-phiprop.c2
-rw-r--r--gcc/tree-ssa-sink.c6
-rw-r--r--gcc/tree-ssa-strlen.c395
-rw-r--r--gcc/tree-ssa-tail-merge.c65
-rw-r--r--gcc/tree-ssa-threadupdate.c48
-rw-r--r--gcc/tree-vect-data-refs.c458
-rw-r--r--gcc/tree-vect-loop-manip.c99
-rw-r--r--gcc/tree-vect-loop.c526
-rw-r--r--gcc/tree-vect-patterns.c206
-rw-r--r--gcc/tree-vect-slp.c71
-rw-r--r--gcc/tree-vect-stmts.c3343
-rw-r--r--gcc/tree-vectorizer.h110
-rw-r--r--gcc/tree-vrp.c4972
-rw-r--r--gcc/tree-vrp.h68
-rw-r--r--gcc/tree.c10
-rw-r--r--gcc/tree.def2
-rw-r--r--gcc/tree.h7
-rw-r--r--gcc/ubsan.c4
-rw-r--r--gcc/value-prof.c5
-rw-r--r--gcc/vr-values.c4183
-rw-r--r--gcc/vr-values.h121
-rw-r--r--include/ChangeLog5
-rw-r--r--include/plugin-api.h18
-rw-r--r--intl/ChangeLog5
-rwxr-xr-xintl/configure40
-rw-r--r--intl/configure.ac2
-rw-r--r--libcpp/ChangeLog38
-rw-r--r--libcpp/identifiers.c2
-rw-r--r--libcpp/include/cpplib.h3
-rw-r--r--libcpp/include/line-map.h46
-rw-r--r--libcpp/init.c44
-rw-r--r--libcpp/internal.h3
-rw-r--r--libcpp/lex.c42
-rw-r--r--libcpp/macro.c170
-rw-r--r--libcpp/pch.c1
-rw-r--r--libgcc/ChangeLog31
-rw-r--r--libgcc/config.host2
-rw-r--r--libgcc/config/aarch64/sfp-machine.h2
-rw-r--r--libgcc/config/i386/freebsd-unwind.h43
-rw-r--r--libgcc/config/i386/sfp-machine.h2
-rw-r--r--libgcc/config/ia64/sfp-machine.h2
-rw-r--r--libgcc/config/mips/sfp-machine.h2
-rw-r--r--libgcc/config/rs6000/aix-unwind.h2
-rw-r--r--libgcc/config/rs6000/sfp-machine.h2
-rw-r--r--libgcc/config/sol2/crtpg.c9
-rw-r--r--libgcc/configure2
-rw-r--r--libgcc/configure.ac2
-rw-r--r--libgo/Makefile.am6
-rw-r--r--libgo/Makefile.in6
-rwxr-xr-xlibgo/configure4
-rw-r--r--libgo/configure.ac2
-rw-r--r--libgo/go/runtime/internal/atomic/atomic.c14
-rw-r--r--libgo/go/runtime/panic.go1
-rw-r--r--libgo/go/sync/atomic/atomic.c12
-rwxr-xr-xlibgo/mkrsysinfo.sh8
-rwxr-xr-xlibgo/mksysinfo.sh8
-rw-r--r--libgo/runtime/runtime.h2
-rw-r--r--libgomp/ChangeLog30
-rw-r--r--libgomp/testsuite/libgomp.c++/loop-2.C1
-rw-r--r--libgomp/testsuite/libgomp.c++/loop-4.C1
-rw-r--r--libgomp/testsuite/libgomp.c++/parallel-1.C1
-rw-r--r--libgomp/testsuite/libgomp.c++/pr82835.C34
-rw-r--r--libgomp/testsuite/libgomp.c++/shared-1.C1
-rw-r--r--libgomp/testsuite/libgomp.c++/single-1.C1
-rw-r--r--libgomp/testsuite/libgomp.c++/single-2.C1
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/asyncwait-1.c16
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-1.c297
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-2.c61
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-3.c63
-rw-r--r--libquadmath/ChangeLog6
-rw-r--r--libquadmath/printf/gmp-impl.h2
-rw-r--r--libsanitizer/ChangeLog14
-rw-r--r--libsanitizer/asan/asan_poisoning.cc2
-rw-r--r--libsanitizer/lsan/lsan_common.cc4
-rw-r--r--libsanitizer/ubsan/Makefile.am5
-rw-r--r--libsanitizer/ubsan/Makefile.in11
-rw-r--r--libstdc++-v3/ChangeLog79
-rw-r--r--libstdc++-v3/doc/xml/manual/abi.xml4
-rw-r--r--libstdc++-v3/include/bits/locale_conv.h2
-rw-r--r--libstdc++-v3/include/bits/range_access.h14
-rw-r--r--libstdc++-v3/include/experimental/numeric20
-rw-r--r--libstdc++-v3/include/std/fstream6
-rw-r--r--libstdc++-v3/include/std/numeric20
-rw-r--r--libstdc++-v3/src/filesystem/ops-common.h2
-rw-r--r--libstdc++-v3/testsuite/20_util/optional/cons/deduction.cc4
-rw-r--r--libstdc++-v3/testsuite/20_util/pair/cons/deduction.cc4
-rw-r--r--libstdc++-v3/testsuite/20_util/pair/traits.cc4
-rw-r--r--libstdc++-v3/testsuite/20_util/tuple/cons/deduction.cc4
-rw-r--r--libstdc++-v3/testsuite/20_util/variant/compile.cc92
-rw-r--r--libstdc++-v3/testsuite/22_locale/conversions/buffer/3.cc58
-rw-r--r--libstdc++-v3/testsuite/23_containers/map/modifiers/try_emplace/1.cc1
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/try_emplace.cc1
-rw-r--r--libstdc++-v3/testsuite/26_numerics/gcd/gcd_neg.cc17
-rw-r--r--libstdc++-v3/testsuite/26_numerics/lcm/lcm_neg.cc23
-rw-r--r--libstdc++-v3/testsuite/27_io/basic_fstream/cons/char/path.cc1
-rw-r--r--libstdc++-v3/testsuite/27_io/basic_ifstream/cons/char/path.cc1
-rw-r--r--libstdc++-v3/testsuite/27_io/basic_ofstream/open/char/path.cc1
-rw-r--r--libstdc++-v3/testsuite/27_io/filesystem/iterators/directory_iterator.cc1
-rw-r--r--libstdc++-v3/testsuite/27_io/filesystem/iterators/recursive_directory_iterator.cc2
-rw-r--r--libstdc++-v3/testsuite/experimental/filesystem/iterators/recursive_directory_iterator.cc2
-rw-r--r--libstdc++-v3/testsuite/libstdc++-prettyprinters/tr1.cc2
-rw-r--r--libstdc++-v3/testsuite/util/testsuite_tr1.h2
1677 files changed, 43039 insertions, 25246 deletions
diff --git a/ChangeLog b/ChangeLog
index abf3341c68c..13b0321b7b1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2017-11-06 Palmer Dabbelt <palmer@sifive.com>
+
+ * MAINTAINERS (RISC-V): Add Jim Wilson as a maintainer.
+ Use my SiFive email address.
+
+2017-11-15 Sebastian Peryt <sebastian.peryt@intel.com>
+
+ * MAINTAINERS (write after approval): Add myself.
+
2017-10-27 Martin Liska <mliska@suse.cz>
* Makefile.tpl: Use proper name of folder as it was renamed
diff --git a/MAINTAINERS b/MAINTAINERS
index 9c3a56ea094..d207b58d1fc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -93,8 +93,9 @@ pdp11 port Paul Koning <ni1d@arrl.net>
picochip port Daniel Towner <dant@picochip.com>
powerpcspe port Andrew Jenner <andrew@codesourcery.com>
riscv port Kito Cheng <kito.cheng@gmail.com>
-riscv port Palmer Dabbelt <palmer@dabbelt.com>
+riscv port Palmer Dabbelt <palmer@sifive.com>
riscv port Andrew Waterman <andrew@sifive.com>
+riscv port Jim Wilson <jimw@sifive.com>
rl78 port DJ Delorie <dj@redhat.com>
rs6000/powerpc port David Edelsohn <dje.gcc@gmail.com>
rs6000/powerpc port Segher Boessenkool <segher@kernel.crashing.org>
@@ -532,6 +533,7 @@ Patrick Palka <ppalka@gcc.gnu.org>
Devang Patel <dpatel@apple.com>
Andris Pavenis <andris.pavenis@iki.fi>
Fernando Pereira <pronesto@gmail.com>
+Sebastian Peryt <sebastian.peryt@intel.com>
Kaushik Phatak <kaushik.phatak@kpitcummins.com>
Nicolas Pitre <nico@cam.org>
Paul Pluzhnikov <ppluzhnikov@google.com>
diff --git a/config/ChangeLog b/config/ChangeLog
index 2e3ead0a26b..90e1af4779d 100644
--- a/config/ChangeLog
+++ b/config/ChangeLog
@@ -1,3 +1,8 @@
+2017-11-15 Alexandre Oliva <aoliva@redhat.com>
+
+ * bootstrap-debug-lean.mk (do-compare): Use the
+ contrib/compare-debug script.
+
2017-10-24 H.J. Lu <hongjiu.lu@intel.com>
* bootstrap-cet.mk: New file.
diff --git a/config/bootstrap-debug-lean.mk b/config/bootstrap-debug-lean.mk
index e215280b09f..5f2db80687f 100644
--- a/config/bootstrap-debug-lean.mk
+++ b/config/bootstrap-debug-lean.mk
@@ -9,3 +9,4 @@
STAGE2_CFLAGS += -fcompare-debug=
STAGE3_CFLAGS += -fcompare-debug
+do-compare = $(SHELL) $(srcdir)/contrib/compare-debug $$f1 $$f2
diff --git a/contrib/ChangeLog b/contrib/ChangeLog
index df4b1bc0b80..a2828b8d793 100644
--- a/contrib/ChangeLog
+++ b/contrib/ChangeLog
@@ -1,3 +1,7 @@
+2017-11-08 Martin Liska <mliska@suse.cz>
+
+ * analyze_brprob.py: Fix abbreviations for SI units.
+
2017-10-02 Thomas Schwinge <thomas@codesourcery.com>
* gcc_update (files_and_dependencies): Handle libbacktrace.
diff --git a/contrib/analyze_brprob.py b/contrib/analyze_brprob.py
index 8f83b5a0221..e03d1da1cde 100755
--- a/contrib/analyze_brprob.py
+++ b/contrib/analyze_brprob.py
@@ -149,7 +149,7 @@ class Summary:
def count_formatted(self):
v = self.count
- for unit in ['','K','M','G','T','P','E','Z']:
+ for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']:
if v < 1000:
return "%3.2f%s" % (v, unit)
v /= 1000.0
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 806732359b6..0068c53d062 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,1865 @@
+2017-11-16 Julia Koval <julia.koval@intel.com>
+
+ PR target/82983
+ * config/i386/gfniintrin.h: Add sse check.
+ * config/i386/i386.c (ix86_expand_builtin): Fix gfni check.
+
+2017-11-16 Julia Koval <julia.koval@intel.com>
+
+ * common/config/i386/i386-common.c (OPTION_MASK_ISA_AVX512VBMI2_SET,
+ OPTION_MASK_ISA_AVX512VBMI2_UNSET): New.
+ (ix86_handle_option): Handle -mavx512vbmi2.
+ * config/i386/cpuid.h: Add bit_AVX512VBMI2.
+ * config/i386/driver-i386.c (host_detect_local_cpu): Handle new bit.
+ * config/i386/i386-c.c (__AVX512VBMI2__): New.
+ * config/i386/i386.c (ix86_target_string): Handle -mavx512vbmi2.
+ (ix86_valid_target_attribute_inner_p): Ditto.
+ * config/i386/i386.h (TARGET_AVX512VBMI2, TARGET_AVX512VBMI2_P): New.
+ * config/i386/i386.opt (mavx512vbmi2): New option.
+ * doc/invoke.texi: Add new option.
+
+[2017-11-16 Julia Koval <julia.koval@intel.com>
+
+ * config/i386/gfniintrin.h (_mm_gf2p8mul_epi8, _mm256_gf2p8mul_epi8,
+ _mm_mask_gf2p8mul_epi8, _mm_maskz_gf2p8mul_epi8,
+ _mm256_mask_gf2p8mul_epi8, _mm256_maskz_gf2p8mul_epi8,
+ _mm512_mask_gf2p8mul_epi8, _mm512_maskz_gf2p8mul_epi8,
+ _mm512_gf2p8mul_epi8): New intrinsics.
+ * config/i386/i386-builtin-types.def
+ (V64QI_FTYPE_V64QI_V64QI): New type.
+ * config/i386/i386-builtin.def (__builtin_ia32_vgf2p8mulb_v64qi,
+ __builtin_ia32_vgf2p8mulb_v64qi_mask, __builtin_ia32_vgf2p8mulb_v32qi,
+ __builtin_ia32_vgf2p8mulb_v32qi_mask, __builtin_ia32_vgf2p8mulb_v16qi,
+ __builtin_ia32_vgf2p8mulb_v16qi_mask): New builtins.
+ * config/i386/sse.md (vgf2p8mulb_*): New pattern.
+ * config/i386/i386.c (ix86_expand_args_builtin): Handle new type.
+
+2017-11-15 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.c (x86_print_call_or_nop): Emit 5 byte nop
+ explicitly as a stream of bytes.
+
+2017-11-15 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.h (vec_xst_be): New #define.
+ * config/rs6000/altivec.md (altivec_vperm_<mode>_direct): Rename
+ and externalize from *altivec_vperm_<mode>_internal.
+ * config/rs6000/rs6000-builtin.def (XL_BE_V16QI): Remove macro
+ instantiation.
+ (XL_BE_V8HI): Likewise.
+ (XL_BE_V4SI): Likewise.
+ (XL_BE_V4SI): Likewise.
+ (XL_BE_V2DI): Likewise.
+ (XL_BE_V4SF): Likewise.
+ (XL_BE_V2DF): Likewise.
+ (XST_BE): Add BU_VSX_OVERLOAD_X macro instantiation.
+ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Correct
+ all array entries with these keys: VSX_BUILTIN_VEC_XL,
+ VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_VEC_XST. Add entries for key
+ VSX_BUILTIN_VEC_XST_BE.
+ * config/rs6000/rs6000.c (altivec_expand_xl_be_builtin): Remove.
+ (altivec_expand_builtin): Remove handling for VSX_BUILTIN_XL_BE_*
+ built-ins.
+ (altivec_init_builtins): Replace conditional calls to def_builtin
+ for __builtin_vsx_ld_elemrev_{v8hi,v16qi} and
+ __builtin_vsx_st_elemrev_{v8hi,v16qi} based on TARGET_P9_VECTOR
+ with unconditional calls. Remove calls to def_builtin for
+ __builtin_vsx_le_be_<mode>. Add a call to def_builtin for
+ __builtin_vec_xst_be.
+ * config/rs6000/vsx.md (vsx_ld_elemrev_v8hi): Convert define_insn
+ to define_expand, and add alternate RTL generation for P8.
+ (*vsx_ld_elemrev_v8hi_internal): New define_insn based on
+ vsx_ld_elemrev_v8hi.
+ (vsx_ld_elemrev_v16qi): Convert define_insn to define_expand, and
+ add alternate RTL generation for P8.
+ (*vsx_ld_elemrev_v16qi_internal): New define_insn based on
+ vsx_ld_elemrev_v16qi.
+ (vsx_st_elemrev_v8hi): Convert define_insn
+ to define_expand, and add alternate RTL generation for P8.
+ (*vsx_st_elemrev_v8hi_internal): New define_insn based on
+ vsx_st_elemrev_v8hi.
+ (vsx_st_elemrev_v16qi): Convert define_insn to define_expand, and
+ add alternate RTL generation for P8.
+ (*vsx_st_elemrev_v16qi_internal): New define_insn based on
+ vsx_st_elemrev_v16qi.
+
+2017-11-15 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/82990
+ * config/i386/i386.c (pass_insert_vzeroupper::gate): Remove
+ TARGET_AVX512ER check.
+ (ix86_option_override_internal): Set MASK_VZEROUPPER if
+ neither -mzeroupper nor -mno-zeroupper is used and
+ TARGET_EMIT_VZEROUPPER is set.
+ * config/i386/i386.h (TARGET_EMIT_VZEROUPPER): New.
+ * config/i386/x86-tune.def: Add X86_TUNE_EMIT_VZEROUPPER.
+
+2017-11-15 Will Schmidt <will_schmidt@vnet.ibm.com>
+
+ * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin): Add support for
+ folding of vector compares.
+ (fold_build_vec_cmp): New helper function.
+ (fold_compare_helper): New helper function.
+ (builtin_function_type): Add compare builtins to the list of functions
+ having unsigned arguments. Cosmetic updates to comment indentation.
+ * config/rs6000/vsx.md (vcmpneb, vcmpneh, vcmpnew): Update to specify
+ the not+eq combination.
+
+2017-11-15 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/82726
+ PR tree-optimization/70754
+ * tree-predcom.c (order_drefs_by_pos): New function.
+ (combine_chains): Move code setting has_max_use_after to...
+ (try_combine_chains): ...here. New parameter. Sort combined chains
+ according to position information.
+ (tree_predictive_commoning_loop): Update call to above function.
+ (update_pos_for_combined_chains, pcom_stmt_dominates_stmt_p): New.
+
+2017-11-15 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/82726
+ Revert
+ 2017-01-23 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/70754
+ * tree-predcom.c (stmt_combining_refs): New parameter INSERT_BEFORE.
+ (reassociate_to_the_same_stmt): New parameter INSERT_BEFORE. Insert
+ combined stmt before it if not NULL.
+ (combine_chains): Process refs reversely and compute dominance point
+ for root ref.
+
+ Revert
+ 2017-02-23 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/79663
+ * tree-predcom.c (combine_chains): Process refs in reverse order
+ only for ZERO length chains, and add explaining comment.
+
+2017-11-15 Tamar Christina <tamar.christina@arm.com>
+
+ * config/arm/arm-cpus.in (armv8_3, ARMv8_3a, armv8.3-a): New
+ * config/arm/arm-tables.opt (armv8.3-a): Regenerated.
+ * doc/invoke.texi (ARM Options): Add armv8.3-a.
+
+2017-11-15 Tamar Christina <tamar.christina@arm.com>
+
+ * config/arm/arm.h (TARGET_DOTPROD): Add arm_arch8_2.
+
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ * tree-cfg.c (pass_warn_function_return::execute):
+ Compare warn_return_type for greater than zero.
+
+2017-11-15 Sebastian Peryt <sebastian.peryt@intel.com>
+
+ PR target/82941
+ PR target/82942
+ * config/i386/i386.c (pass_insert_vzeroupper): Modify gate condition
+ to return true on Xeon and not on Xeon Phi.
+ (ix86_check_avx256_register): Changed to ...
+ (ix86_check_avx_upper_register): ... this. Add extra check for
+ VALID_AVX512F_REG_OR_XI_MODE.
+ (ix86_avx_u128_mode_needed): Changed
+ ix86_check_avx256_register to ix86_check_avx_upper_register.
+ (ix86_check_avx256_stores): Changed to ...
+ (ix86_check_avx_upper_stores): ... this. Changed
+ ix86_check_avx256_register to ix86_check_avx_upper_register.
+ (ix86_avx_u128_mode_after): Changed
+ avx_reg256_found to avx_upper_reg_found. Changed
+ ix86_check_avx256_stores to ix86_check_avx_upper_stores.
+ (ix86_avx_u128_mode_entry): Changed
+ ix86_check_avx256_register to ix86_check_avx_upper_register.
+ (ix86_avx_u128_mode_exit): Ditto.
+ * config/i386/i386.h: (host_detect_local_cpu): New define.
+
+2017-11-15 Dominik Infuehr <dominik.infuehr@theobroma-systems.com>
+
+ * config/arm/xgene1.md (xgene1): Split into automatons
+ xgene1_main, xgene1_decoder, xgene1_div, xgene1_simd.
+ (xgene1_f_load): Adjust reservations and/or types.
+ (xgene1_f_store): Likewise.
+ (xgene1_load_pair): Likewise.
+ (xgene1_store_pair): Likewise.
+ (xgene1_fp_load1): Likewise.
+ (xgene1_load1): Likewise.
+ (xgene1_store1): Likewise.
+ (xgene1_move): Likewise.
+ (xgene1_alu): Likewise.
+ (xgene1_simd): Likewise.
+ (xgene1_bfm): Likewise.
+ (xgene1_neon_load1): Likewise.
+ (xgene1_neon_store1): Likewise.
+ (xgene1_neon_logic): Likewise.
+ (xgene1_neon_st1): Likewise.
+ (xgene1_neon_ld1r): Likewise.
+ (xgene1_alu_cond): Added.
+ (xgene1_shift_reg): Likwise.
+ (xgene1_bfx): Likewise.
+ (xgene1_mul): Split into xgene1_mul32, xgene1_mul64.
+
+2017-11-15 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/82981
+ * internal-fn.c: Include gimple-ssa.h, tree-phinodes.h and
+ ssa-iterators.h.
+ (can_widen_mult_without_libcall): New function.
+ (expand_mul_overflow): If only checking unsigned mul overflow,
+ not result, and can do efficiently MULT_HIGHPART_EXPR, emit that.
+ Don't use WIDEN_MULT_EXPR if it would involve a libcall, unless
+ no other way works. Add MULT_HIGHPART_EXPR + MULT_EXPR support.
+ (expand_DIVMOD): Formatting fix.
+ * expmed.h (expand_mult): Add NO_LIBCALL argument.
+ * expmed.c (expand_mult): Likewise. Use OPTAB_WIDEN rather
+ than OPTAB_LIB_WIDEN if NO_LIBCALL is true, and allow it to fail.
+
+ PR tree-optimization/82977
+ * tree-ssa-strlen.c (strlen_optimize_stmt): Pass a reference to a copy
+ constructed temporary to strlen_to_stridx.put.
+
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ * configure.ac: Remove -fkeep-inline-functions from coverage_flags.
+ * configure: Regenerate.
+
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ PR target/82927
+ * config/sh/sh-mem.cc: Use proper probability for
+ REG_BR_PROB_NOTE.
+
+2017-11-14 Jeff Law <law@redhat.com>
+
+ * explow.c (anti_adjust_stack_and_probe_stack_clash): Avoid probing
+ the red zone for stack_clash_protection_final_dynamic_probe targets
+ when the total dynamic stack size is zero bytes.
+
+ * tree-ssa-threadupdate.c (thread_through_all_blocks): Thread
+ blocks is post order.
+
+2017-11-15 Alexandre Oliva <aoliva@redhat.com>
+
+ * dumpfile.h (TDF_COMPARE_DEBUG): New.
+ * final.c (rest_of_clean_state): Set it for the
+ -fcompare-debug dump.
+ * tree-pretty-print.c (dump_generic_node): Omit OBJ_TYPE_REF
+ class when TDF_COMPARE_DEBUG is set.
+
+ * dwarf2out.c (gen_producer_string): Discard
+ OPT_fcompare_debug.
+
+2017-11-15 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/81156
+ * doc/extend.texi (Other Builtins): Document __builtin_tgmath.
+ * ginclude/tgmath.h (__tg_cplx, __tg_ldbl, __tg_dbl, __tg_choose)
+ (__tg_choose_2, __tg_choose_3, __TGMATH_REAL_1_2)
+ (__TGMATH_REAL_2_3): Remove macros.
+ (__TGMATH_CPLX, __TGMATH_CPLX_2, __TGMATH_REAL, __TGMATH_REAL_2)
+ (__TGMATH_REAL_3, __TGMATH_CPLX_ONLY): Define using
+ __builtin_tgmath.
+ (frexp, ldexp, nexttoward, scalbn, scalbln): Define using
+ __TGMATH_REAL_2.
+ (remquo): Define using __TGMATH_REAL_3.
+
+2017-11-14 Jeff Law <law@redhat.com>
+
+ * vr-values.c: New file with contents extracted from tree-vrp.c.
+ * Makefile.in (OBJS): Add vr-values.o
+ * tree-vrp.h (set_value_range_to_nonnull): Prototype.
+ (set_value_range, set_and_canonicalize_value_range): Likewise.
+ (vrp_bitmap_equal_p, range_is_nonnull): Likewise.
+ (value_range_constant_singleton, symbolic_range_p): Likewise.
+ (compare_values, compare_values_warnv, vrp_val_is_min): Likewise.
+ (vrp_val_is_max, copy_value_range, set_value_range_to_value): Likewise.
+ (extract_range_from_binary_expr_1, vrp_val_min, vrp_val_max): Likewise.
+ (set_value_range_to_null, range_int_cst_p, opreand_less_p): Likewise.
+ (find_case_label_range, find_case_label_index): Likewise.
+ (zero_nonzero_bits_from_vr, overflow_comparison_p): Likewise.
+ (range_int_cst_singleton_p, value_inside_range): Likewise.
+ (get_single_symbol): Likewise.
+ (switch_update): Move structure definition here.
+ (to_remove_edges, to_update_switch_stmts): Provide externs.
+ * tree-vrp.c: Move all methods for vr-values class to vr-values.c
+ (vrp_val_max, vrp_val_min, vrp_val_is_max): Make externally visible.
+ (vrp_val_is_min, set_value_range): Likewise.
+ (set_and_canonicalize_value_range, copy_value_range): Likewise.
+ (set_value_range_to_value, set_value_range_to_nonnull): Likewise.
+ (set_value_range_to_null, vrp_bitmap_equal_p): Likewise.
+ (range_is_nonnull, range_int_cst_p): Likewwise.
+ (range_int_cst_singleton_p, symbolic_range_p): Likewise.
+ (get_single_symbol, operand_less_p): Likewise
+ (compare_values_warnv, compare_values): Likewise.
+ (value_inside_range, value_range_constant_singleton): Likewise.
+ (zero_nonzero_bitgs_from_vr): Likewise.
+ (extract_range_from_binary_expr_1): Likewise.
+ (overflow_comparison_p): Likewise.
+ (to_remove_edges, to_update_switch_stmts): Likewise.
+ (find_case_label-index, find_case_label_range): Likewise.
+ (switch_update, set_value_range_to_nonnegative): Remove.
+ (set_value_range_to_truthvalue): Likewise.
+ (symbolic_range_based_on_p, gimple_assign_nonzero_p): Likewise.
+ (gimple_stmt_nonzero_p, compare_ranges): Likewise.
+ (compare_range_with_value, vrp_valueize, vrp_valueize_1): Likewise.
+ (find_case_label_ranges, test_for_singularity): Likewise.
+ (range_fits_type_p, simplify_conversion_using_ranges): LIkewise.
+ (x_vr_values): Move to its remaining use site.
+
+2017-11-10 Jeff Law <law@redhat.com>
+
+ * vr-values.h (VR_INITIALIZER): Move #define here.
+ * gimple-ssa-evrp.c: New file with contents extracted from tree-vrp.c
+ * Makefile.in (OBJS): Add tree-evrp.o
+ * tree-vrp.h (assert_info): Move structure definition here.
+ (set_value_range_to_varying): Prototype.
+ (vrp_operand_equal_p, range_includes_zero_p): Likewise.
+ (infer_value_range, register_edge_assert_for): Likewise.
+ (stmt_interesting_for_vrp): Likewise.
+ * tree-vrp.c: Move all methods for evrp class into gimple-ssa-evrp.c.
+ (set_value_range_to_varying): No longer static.
+ (vrp_operand_equal_p, range_includes_zero_p): Likewise.
+ (infer_value_range, register_edge_assert_for): Likewise.
+
+2017-11-14 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * config/rs6000/rs6000.md (bswapdi2): On 32-bit ISA 3.0, don't
+ generate the XXBRD instruction.
+
+ * config/rs6000/rs6000-c.c (is_float128_p): New helper function.
+ (rs6000_builtin_type_compatible): Treat _Float128 and long double
+ as being compatible if -mabi=ieeelongdouble.
+ * config/rs6000/rs6000-builtin.def (BU_FLOAT128_HW_1): New macros
+ to setup float128 built-ins with hardware support.
+ (BU_FLOAT128_HW_2): Likewise.
+ (BU_FLOAT128_HW_3): Likewise.
+ (BU_FLOAT128_HW_VSX_1): Likewise.
+ (BU_FLOAT128_HW_VSX_2): Likewise.
+ (scalar_extract_expq): Change float128 built-in functions to
+ accommodate having both KFmode and TFmode functions. Use the
+ KFmode variant as the default.
+ (scalar_extract_sigq): Likewise.
+ (scalar_test_neg_qp): Likewise.
+ (scalar_insert_exp_q): Likewise.
+ (scalar_insert_exp_qp): Likewise.
+ (scalar_test_data_class_qp): Likewise.
+ (sqrtf128_round_to_odd): Delete processing the round to odd
+ built-in functions as special built-in functions, and define them
+ as float128 built-ins. Use the KFmode variant as the default.
+ (truncf128_round_to_odd): Likewise.
+ (addf128_round_to_odd): Likewise.
+ (subf128_round_to_odd): Likewise.
+ (mulf128_round_to_odd): Likewise.
+ (divf128_round_to_odd): Likewise.
+ (fmaf128_round_to_odd): Likewise.
+ * config/rs6000/rs6000.c (rs6000_expand_binop_builtin): Add
+ support for KFmode and TFmode xststdcqp calls.
+ (rs6000_expand_builtin): If long double is IEEE 128-bit floating
+ point, switch the built-in handlers for the get/set float128
+ exponent, get float128 mantissa, float128 test built-ins, and the
+ float128 round to odd built-in functions. Eliminate creating the
+ float128 round to odd built-in functions as special built-ins.
+ (rs6000_init_builtins): Eliminate special creation of the float128
+ round to odd built-in functions.
+ * config/rs6000/vsx.md (xsxexpqp_<mode>): Change float128 built-in
+ function insns to support both TFmode and KFmode variants.
+ (xsxsigqp_<mode>): Likewise.
+ (xsiexpqpf_<mode>): Likewise.
+ (xsiexpqp_<mode>): Likewise.
+ (xststdcqp_<mode>): Likewise.
+ (xststdcnegqp_<mode>): Likewise.
+ (xststdcqp_<mode>): Likewise.
+
+2017-11-14 Jan Hubicka <hubicka@ucw.cz>
+
+ * tree-ssa-threadupdate.c (compute_path_counts): Remove
+ unused path_in_freq_ptr parameter.
+ (ssa_fix_duplicate_block_edges): Do not pass around path_in_freq
+
+2017-11-14 Jan Hubicka <hubicka@ucw.cz>
+
+ * ipa-inline.c (edge_badness): Dump sreal frequency.
+ (compute_inlined_call_time): Match natural implementaiton ...
+ * ipa-fnsummary.c (estimate_edge_size_and_time): ... here; remove
+ forgotten division by CGRAPH_FREQ_BASE.
+
+2017-11-14 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config.gcc (*-*-solaris2*): Enable default_use_cxa_atexit since
+ Solaris 11. Update comment.
+ * configure.ac (gcc_cv_ld_pid): Adapt comment for Solaris 12
+ renaming.
+ * config/sol2.h (STARTFILE_SPEC): Likewise.
+ * configure: Regenerate.
+
+2017-11-14 Carl Love <cel@us.ibm.com>
+
+ * config/rs6000/rs6000.c (swap_endian_selector_for_mode): Remove
+ le_ and be_ prefixes to swap* variables. Remove
+ if (VECTOR_ELT_ORDER_BIG) statement. Remove E_V16QImode case
+ statements.
+
+2017-11-14 Jason Merrill <jason@redhat.com>
+
+ Support GTY((cache)) on hash_map.
+ * hash-traits.h (ggc_remove): Add ggc_maybe_mx member function.
+ (ggc_cache_remove): Override it instead of ggc_mx.
+ * hash-table.h (gt_ggc_mx): Call it instead of ggc_mx.
+ (gt_cleare_cache): Call ggc_mx instead of gt_ggc_mx.
+ * hash-map-traits.h (simple_hashmap_traits): Add maybe_mx member.
+ (simple_cache_map_traits): Override maybe_mx.
+ * hash-map.h (hash_entry): Add ggc_maybe_mx and keep_cache_entry.
+ (hash_map): Friend gt_cleare_cache.
+ (gt_cleare_cache): New.
+ * tree.h (tree_cache_traits): New hash_map traits class.
+ (tree_cache_map): New typedef.
+
+2017-11-14 Richard Biener <rguenther@suse.de>
+
+ * tree-cfgcleanup.c (cleanup_control_expr_graph): Remove first_p
+ paramter and handling.
+ (cleanup_control_flow_bb): Likewise.
+ (cleanup_control_flow_pre): New helper performing a DFS walk
+ to call cleanup_control_flow_bb in PRE order.
+ (cleanup_tree_cfg_1): Do the first phase of cleanup_control_flow_bb
+ via cleanup_control_flow_pre.
+
+2017-11-14 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64-simd.md
+ (aarch64_simd_bsl<mode>_internal): Remove DImode.
+ (*aarch64_simd_bsl<mode>_alt): Likewise.
+ (aarch64_simd_bsldi_internal): New.
+ (aarch64_simd_bsldi_alt): Likewise.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * tracer.c (better_p): Do not compare frequencies.
+ * reg-stack.c (better_edge): Likewise.
+ * shrink-wrap.c (try_shrink_wrapping): Do not convert to gcov counts
+ and back.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * auto-profile.c (afdo_annotate_cfg): Use update_max_bb_count.
+ * cgraphunit.c (cgraph_node::expand_thunk): Use update_max_bb_count.
+ * ipa-utils.c (ipa_merge_profiles): Use update_max_bb_count.
+ * lto-streamer-in.c (input_function): Use update_max_bb_count.
+ * omp-expand.c (expand_omp_taskreg): Use update_max_bb_count.
+ * predict.c (maybe_hot_frequency_p): Inline to ...
+ (maybe_hot_count_p): ... here; rewrite to counts.
+ (counts_to_freqs): Rename to ...
+ (update_max_bb_count): ... this one.
+ (expensive_function_p): Use counts.
+ (estimate_bb_frequencies): Update.
+ (rebuild_frequencies): Update.
+ * predict.h (counts_to_freqs): Rename to ...
+ (update_max_bb_count): ... this one.
+ * profile.c (compute_branch_probabilities): Add debug info
+ * tree-inline.c (expand_call_inline): Update debug info.
+ (optimize_inline_calls): Use update_max_bb_count..
+ (tree_function_versioning): Use update_max_bb_count..
+ * value-prof.c (gimple_value_profile_transformations):
+ Do not use update_max_bb_count.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * ipa-inline.c (compute_uninlined_call_time, compute_inlined_call_time):
+ always use frequencies.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * bb-reorder.c: Remove frequencies from comments.
+ (better_edge_p): Use profile counts.
+ (find_traces): Dump profile counts.
+ (rotate_loop): Use profile counts.
+ (find_traces_1_round): Likewise.
+ (connect_better_edge_p): Use counts instead of probabilities for
+ reverse walk.
+ (copy_bb_p): Drop early check for non-0 frequency.
+ (sanitize_hot_paths): Update comments.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * ipa-split.c (struct split_point): Add count.
+ (consider_split): Do not compute incoming frequency; compute incoming
+ count and store it to split_point.
+ (split_function): Set count of the call to split part correctly.
+
+2017-11-13 Carl Love <cel@us.ibm.com>
+
+ * config/rs6000/altivec.md (altivec_vsumsws_be): Add define_expand.
+
+2017-11-13 Tom Tromey <tom@tromey.com>
+
+ * doc/cpp.texi (Variadic Macros): Document __VA_OPT__.
+
+2017-11-13 Carl Love <cel@us.ibm.com>
+
+ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins):
+ Add support for builtins:
+ unsigned int vec_first_{,miss}_match_{,or_eos}index,
+ vector {un,}signed {char,int,short},
+ vector {un,}signed {char,int,short}) arguments.
+ * config/rs6000/rs6000-builtin.def (VFIRSTMATCHINDEX,
+ VFIRSTMATCHOREOSINDEX, VFIRSTMISMATCHINDEX, VFIRSTMISMATCHOREOSINDEX):
+ Add BU_P9V_AV_2 expansions for the builtins.
+ * config/rs6000/altivec.h (vec_first_match_index,
+ vec_first_mismatch_index, vec_first_match_or_eos_index,
+ vec_first_mismatch_or_eos_index): Add #defines for the builtins.
+ * config/rs6000/rs6000-protos.h (bytes_in_mode): Add
+ new extern declaration.
+ * config/rs6000/rs6000.c (bytes_in_mode): Add new function.
+ * config/rs6000/vsx.md (first_match_index_<mode>,
+ first_match_or_eos_index_<mode>, first_mismatch_index_<mode>,
+ first_mismatch_or_eos_index_<mode>): Add define expand.
+ (vctzlsbb_<mode>): Add mode field to define_insn for vctzlsbb.
+ * doc/extend.texi: Update the built-in documenation file for the new
+ built-in functions.
+
+2017-11-13 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * match.pd: Convert fminf<N>, fminf<N>x, fmax<N>, and fmax<N>x
+ into the min/max operations for _Float<N> and _Float<N>X types.
+
+2017-11-13 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR lto/81351
+ * dwarf2out.c (do_eh_frame): New static variable.
+ (dwarf2out_begin_prologue): Set it.
+ (dwarf2out_frame_finish): Test it instead of dwarf2out_do_eh_frame.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * tree-ssa-coalesce.c (coalesce_cost): Fix formating.
+
+ * tree-ssa-sink.c (select_best_block): Do not use frequencies.
+
+2017-11-13 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR lto/81351
+ * debug.h (dwarf2out_do_eh_frame): Declare.
+ * dwarf2cfi.c (dwarf2out_do_eh_frame): New predicate.
+ (dwarf2out_do_frame): Use it.
+ (dwarf2out_do_cfi_asm): Likewise.
+ * dwarf2out.c (dwarf2out_frame_finish): Likewise.
+ (dwarf2out_assembly_start): Likewise.
+ (dwarf2out_begin_prologue): Fix comment.
+ * toplev.c (compile_file): Always call dwarf2out_frame_finish
+ if the target needs either debug or unwind DWARF2 info.
+ * lto-opts.c (lto_write_options): Do not save -fexceptions,
+ -fnon-call-exceptions, -ffp-contract, -fmath-errno, -fsigned-zeros,
+ -ftrapping-math, -ftrapv and -fwrapv.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * cgraph.c (cgraph_edge::sreal_frequency): New function.
+ * cgraph.h (cgraph_edge::sreal_frequency): Declare.
+ * ipa-fnsummary.c (dump_ipa_call_summary): Use sreal_frequency.
+ (estimate_edge_size_and_time): Likewise.
+ (ipa_merge_fn_summary_after_inlining): Likewise.
+ * ipa-inline.c (cgraph_freq_base_rec): Remove.
+ (compute_uninlined_call_time): Use sreal_frequency.
+ (compute_inlined_call_time): Likewise.
+ (ipa_inline): Do not initialize cgraph_freq_base_rec.
+ * profile-count.c: Include sreal.h.
+ (profile_count::to_sreal_scale): New.
+ * profile-count.h: Forward declare sreal.
+ (profile_count::to_sreal_scale): Declare.
+
+2017-11-13 Nathan Sidwell <nathan@acm.org>
+
+ * diagnostic.c (maybe_line_and_column): New.
+ (diagnostic_get_location_text): Use it.
+ (diagnostic_report_current_module): Likewise.
+ (test_diagnostic_get_location_text): Add tests.
+
+2017-11-13 Luis Machado <luis.machado@linaro.org>
+
+ * doc/md.texi (Specifying processor pipeline description): Fix
+ incorrect latency for the div instruction example.
+
+2017-11-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/78821
+ * gimple-ssa-store-merging.c (compatible_load_p): Don't require
+ that bit_not_p is the same.
+ (imm_store_chain_info::coalesce_immediate_stores): Likewise.
+ (split_group): Count precisely bit_not_p bits in each statement.
+ (invert_op): New function.
+ (imm_store_chain_info::output_merged_store): Use invert_op to
+ emit BIT_XOR_EXPR with a xor_mask instead of BIT_NOT_EXPR if some
+ but not all orig_stores have BIT_NOT_EXPR in the corresponding spots.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (struct coverage_info): Remove typedef of coverage_t.
+ (struct source_info): Likewise.
+ (add_branch_counts): Likewise.
+ (add_line_counts): Likewise.
+ (function_summary): Likewise.
+ (output_intermediate_line): Likewise.
+ (generate_results): Likewise.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (struct block_info): Remove typedef for block_t.
+ (struct line_info): Likewise.
+ (line_info::has_block): Likewise.
+ (EXIT_BLOCK): Likewise.
+ (unblock): Likewise.
+ (circuit): Likewise.
+ (get_cycles_count): Likewise.
+ (process_file): Likewise.
+ (read_graph_file): Likewise.
+ (solve_flow_graph): Likewise.
+ (find_exception_blocks): Likewise.
+ (add_line_counts): Likewise.
+ (accumulate_line_info): Likewise.
+ (output_line_details): Likewise.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (struct arc_info): Remove typedef for arc_t.
+ (struct line_info): Likewise.
+ (add_branch_counts): Likewise.
+ (output_branch_count): Likewise.
+ (function_info::~function_info): Likewise.
+ (circuit): Likewise.
+ (output_intermediate_line): Likewise.
+ (read_graph_file): Likewise.
+ (solve_flow_graph): Likewise.
+ (find_exception_blocks): Likewise.
+ (add_line_counts): Likewise.
+ (accumulate_line_info): Likewise.
+ (output_line_details): Likewise.
+ (output_function_details): Likewise.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (struct function_info): Remove typedef for function_t.
+ (struct source_info): Likewise.
+ (source_info::get_functions_at_location): Likewise.
+ (solve_flow_graph): Likewise.
+ (find_exception_blocks): Likewise.
+ (add_line_counts): Likewise.
+ (output_intermediate_file): Likewise.
+ (process_file): Likewise.
+ (generate_results): Likewise.
+ (release_structures): Likewise.
+ (read_graph_file): Likewise.
+ (read_count_file): Likewise.
+ (accumulate_line_counts): Likewise.
+ (output_lines): Likewise.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (function_info::function_info): Remove num_counts
+ and add vector<gcov_type>.
+ (function_info::~function_info): Use the vector.
+ (process_file): Likewise.
+ (read_graph_file): Likewise.
+ (read_count_file): Likewise.
+ (solve_flow_graph): Likewise.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (function_info::is_artificial): New function.
+ (process_file): Erase all artificial early.
+ (generate_results): Skip as all artificial are already
+ removed.
+
+2017-11-13 Martin Liska <mliska@suse.cz>
+
+ * gcov.c (read_graph_file): Store to global vector of functions.
+ (read_count_file): Iterate the vector.
+ (process_file): Likewise.
+ (generate_results): Likewise.
+ (release_structures): Likewise.
+
+2017-11-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/82954
+ * gimple-ssa-store-merging.c
+ (imm_store_chain_info::coalesce_immediate_stores): If
+ !infof->ops[N].base_addr, split group if info->ops[N].base_addr.
+
+2017-11-13 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * config/aarch64/aarch64-simd.md (aarch64_store_lane0<mode>):
+ Upddate call to ENDIAN_LANE_N.
+ (aarch64_<sur>dot_lane<vsi2qi>): Use aarch64_endian_lane_rtx.
+ (aarch64_<sur>dot_laneq<vsi2qi>): Likewise.
+ (*aarch64_simd_vec_copy_lane<mode>): Update calls to ENDIAN_LANE_N
+ and use aarch64_endian_lane_rtx.
+ (*aarch64_simd_vec_copy_lane_<vswap_width_name><mode>): Likewise.
+
+2017-11-12 Tom de Vries <tom@codesourcery.com>
+
+ * config/riscv/riscv.h (ASM_OUTPUT_LABELREF): Wrap in do {} while (0).
+
+2017-11-12 Tom de Vries <tom@codesourcery.com>
+
+ * config/elfos.h (ASM_OUTPUT_ASCII): Remove semicolon after macro body.
+
+2017-11-12 Tom de Vries <tom@codesourcery.com>
+
+ * config/cr16/cr16.h (ASM_OUTPUT_LABELREF): Remove semicolon after macro
+ body.
+ * config/powerpcspe/xcoff.h (ASM_OUTPUT_LABELREF): Same.
+ * config/rs6000/xcoff.h (ASM_OUTPUT_LABELREF): Same.
+ * defaults.h (ASM_OUTPUT_LABELREF): Same.
+
+2017-11-11 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * doc/extend.texi (attribute nonstring): Remove spurious argument.
+
+ PR bootstrap/82948
+ * prefic.c (translate_name): Replace strncpy with memcpy to
+ avoid -Wstringop-truncation.
+
+2017-11-10 Jan Hubicka <hubicka@ucw.cz>
+
+ * tree-ssa-loop-im.c (execute_sm_if_changed): Do not compute freq_sum.
+
+2017-11-10 Jan Hubicka <hubicka@ucw.cz>
+
+ * predict.c (maybe_hot_frequency_p): Do not use cfun.
+
+2017-11-10 Jan Hubicka <hubicka@ucw.cz>
+
+ * tree-ssa-tail-merge.c (replace_block_by): Fix and re-enable profile
+ merging.
+
+2017-11-10 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * config/rs6000/rs6000.md (bswaphi2_reg): On ISA 3.0 systems,
+ enable generating XXBRH if the value is in a vector register.
+ (bswapsi2_reg): On ISA 3.0 systems, enable generating XXBRW if the
+ value is in a vector register.
+ (bswapdi2_reg): On ISA 3.0 systems, always use XXBRD to do
+ register to register bswap64's instead of doing the GPR sequence
+ used on previous machines.
+ (bswapdi2_xxbrd): New insn.
+ (bswapdi2_reg): Disallow on ISA 3.0.
+ (register to register bswap64 splitter): Do not split the insn on
+ ISA 3.0 systems that use XXBRD.
+
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * config/darwin-c.c (framework_construct_pathname): Replace strncpy
+ with memcpy.
+ (find_subframework_file): Same.
+
+2017-11-10 Jan Hubicka <hubicka@ucw.cz>
+
+ * auto-profile.c (afdo_indirect_call): Drop frequency.
+ * cgraph.c (symbol_table::create_edge): Drop frequency argument.
+ (cgraph_node::create_edge): Drop frequency argument.
+ (cgraph_node::create_indirect_edge): Drop frequency argument.
+ (cgraph_edge::make_speculative): Drop frequency arguments.
+ (cgraph_edge::resolve_speculation): Do not update frequencies
+ (cgraph_edge::dump_edge_flags): Do not dump frequency.
+ (cgraph_node::dump): Check consistency in IPA mode.
+ (cgraph_edge::maybe_hot_p): Use IPA counter.
+ (cgraph_edge::verify_count_and_frequency): Rename to ...
+ (cgraph_edge::verify_count): ... this one; drop frequency checking.
+ (cgraph_node::verify_node): Update.
+ * cgraph.h (struct cgraph_edge): Drop frequency.
+ (cgraph_edge::frequency): New function.
+ * cgraphbuild.c (pass_build_cgraph_edges::execute): Donot pass
+ frequencies.
+ (cgraph_edge::rebuild_edges): Likewise.
+ * cgraphclones.c (cgraph_edge::clone): Scale only counts.
+ (duplicate_thunk_for_node): Do not pass frequency.
+ (cgraph_node::create_clone): Scale only counts.
+ (cgraph_node::create_virtual_clone): Do not pass frequency.
+ (cgraph_node::create_edge_including_clones): Do not pass frequency.
+ (cgraph_node::create_version_clone): Do not pass frequency.
+ * cgraphunit.c (cgraph_node::analyze): Do not pass frequency.
+ (cgraph_node::expand_thunk): Do not pass frequency.
+ (cgraph_node::create_wrapper): Do not pass frequency.
+ * gimple-iterator.c (update_call_edge_frequencies): Do not pass
+ frequency.
+ * gimple-streamer-in.c (input_bb): Scale only IPA counts.
+ * ipa-chkp.c (chkp_produce_thunks): Do not pass frequency.
+ * ipa-cp.c (ipcp_lattice::print): Use frequency function.
+ (gather_caller_stats): Use frequency function.
+ (ipcp_cloning_candidate_p): Use frequency function.
+ (ipcp_propagate_stage): Use frequency function.
+ (get_info_about_necessary_edges): Use frequency function.
+ (update_profiling_info): Update only IPA profile.
+ (update_specialized_profile): Use frequency functoin.
+ (perhaps_add_new_callers): Update only IPA profile.
+ * ipa-devirt.c (ipa_devirt): Use IPA profile.
+ * ipa-fnsummary.c (redirect_to_unreachable): Do not set frequrency.
+ (dump_ipa_call_summary): Use frequency function.
+ (estimate_edge_size_and_time): Use frequency function.
+ (ipa_merge_fn_summary_after_inlining): Use frequency function.
+ * ipa-inline-analysis.c (do_estimate_edge_time): Use IPA profile.
+ * ipa-inline-transform.c (update_noncloned_frequencies): Rename to ..
+ (update_noncloned_counts): ... ths one; scale counts only.
+ (clone_inlined_nodes): Do not scale frequency.
+ (inline_call): Do not pass frequency.
+ * ipa-inline.c (compute_uninlined_call_time): Use IPA profile.
+ (compute_inlined_call_time): Use IPA profile.
+ (want_inline_small_function_p): Use IPA profile.
+ (want_inline_self_recursive_call_p): Use IPA profile.
+ (edge_badness): Use IPA profile.
+ (lookup_recursive_calls): Use IPA profile.
+ (recursive_inlining): Do not pass frequency.
+ (resolve_noninline_speculation): Do not update frequency.
+ (inline_small_functions): Collect max of IPA profile.
+ (dump_overall_stats): Dump IPA porfile.
+ (dump_inline_stats): Dump IPA porfile.
+ (ipa_inline): Collect IPA stats.
+ * ipa-inline.h (clone_inlined_nodes): Update prototype.
+ * ipa-profile.c (ipa_propagate_frequency_1): Use frequency function.
+ (ipa_propagate_frequency): Use frequency function.
+ (ipa_profile): Cleanup.
+ * ipa-prop.c (ipa_make_edge_direct_to_target): Do not pass frequency
+ * ipa-utils.c (ipa_merge_profiles): Merge all profiles.
+ * lto-cgraph.c (lto_output_edge): Do not stream frequency.
+ (input_node): Do not stream frequency.
+ (input_edge): Do not stream frequency.
+ (merge_profile_summaries): Scale only IPA profiles.
+ * omp-simd-clone.c (simd_clone_adjust): Do not pass frequency.
+ * predict.c (drop_profile): Do not recompute frequency.
+ * trans-mem.c (ipa_tm_insert_irr_call): Do not pass frequency.
+ (ipa_tm_insert_gettmclone_call): Do not pass frequency.
+ * tree-cfg.c (execute_fixup_cfg): Drop profile to global0 if needed.
+ * tree-chkp.c (chkp_copy_bounds_for_assign): Do not pass frequency.
+ * tree-emutls.c (gen_emutls_addr): Do not pass frequency.
+ * tree-inline.c (copy_bb): Do not scale frequency.
+ (expand_call_inline): Do not scale frequency.
+ (tree_function_versioning): Do not scale frequency.
+ * ubsan.c (ubsan_create_edge): Do not pass frequency.
+
+2017-11-10 Julia Koval <julia.koval@intel.com>
+
+ * config/i386/gfniintrin.h (_mm_gf2p8affine_epi64_epi8)
+ (_mm256_gf2p8affine_epi64_epi8, _mm_mask_gf2p8affine_epi64_epi8)
+ (_mm_maskz_gf2p8affine_epi64_epi8, _mm256_mask_gf2p8affine_epi64_epi8)
+ (_mm256_maskz_gf2p8affine_epi64_epi8)
+ (_mm512_mask_gf2p8affine_epi64_epi8, _mm512_gf2p8affine_epi64_epi8)
+ (_mm512_maskz_gf2p8affine_epi64_epi8): New intrinsics.
+ * config/i386/i386-builtin.def (__builtin_ia32_vgf2p8affineqb_v64qi)
+ (__builtin_ia32_vgf2p8affineqb_v32qi)
+ (__builtin_ia32_vgf2p8affineqb_v16qi): New builtins.
+ * config/i386/sse.md (vgf2p8affineqb_<mode><mask_name>): New pattern.
+
+2017-11-10 Tamar Christina <tamar.christina@arm.com>
+
+ PR target/82641
+ * config/arm/arm.c
+ (arm_option_override): Refactor.
+ (arm_option_reconfigure_globals): New.
+ (arm_options_perform_arch_sanity_checks): New.
+ * config/arm/arm-protos.h (arm_option_reconfigure_globals):
+ New prototype.
+ (arm_options_perform_arch_sanity_checks): Likewise
+
+2017-11-10 Pat Haugen <pthaugen@us.ibm.com>
+
+ * rs6000/power9.md (power9-qpdiv): Correct DFU pipe usage.
+ (power9-qpmul): New.
+ * rs6000/rs6000.md ("type" attr): Add qmul.
+ (mul<mode>3, fma<mode>4_hw, *fms<mode>4_hw, *nfma<mode>4_hw,
+ *nfms<mode>4_hw, mul<mode>3_odd, fma<mode>4_odd, *fms<mode>4_odd,
+ *nfma<mode>4_odd, *nfms<mode>4_odd): Change type to qmul.
+
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * builtins.c (compute_objsize): Handle arrays that
+ compute_builtin_object_size likes to fail for. Make extern.
+ * builtins.h (compute_objsize): Declare.
+ (check_strncpy_sizes): New function.
+ (expand_builtin_strncpy): Call check_strncpy_sizes.
+ * gimple-fold.c (gimple_fold_builtin_strncpy): Implement
+ -Wstringop-truncation.
+ (gimple_fold_builtin_strncat): Same.
+ * gimple.c (gimple_build_call_from_tree): Set call location.
+ * tree-ssa-strlen.c (strlen_to_stridx): New global variable.
+ (maybe_diag_bound_equal_length, is_strlen_related_p): New functions.
+ (handle_builtin_stxncpy, handle_builtin_strncat): Same.
+ (handle_builtin_strlen): Use strlen_to_stridx.
+ (strlen_optimize_stmt): Handle flavors of strncat, strncpy, and
+ stpncpy.
+ Use strlen_to_stridx.
+ (pass_strlen::execute): Release strlen_to_stridx.
+ * doc/invoke.texi (-Wsizeof-pointer-memaccess): Document enhancement.
+ (-Wstringop-truncation): Document new option.
+
+2017-11-10 Martin Liska <mliska@suse.cz>
+
+ PR gcov-profile/82702
+ * gcov.c (main): Handle intermediate files in a different
+ way.
+ (get_gcov_intermediate_filename): New function.
+ (output_gcov_file): Remove support of intermediate files.
+ (generate_results): Allocate intermediate file.
+ (release_structures): Clean-up properly fn_end.
+ (output_intermediate_file): Start iterating with line 1.
+
+2017-11-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/82929
+ * gimple-ssa-store-merging.c (struct store_immediate_info): Add
+ ops_swapped_p non-static data member.
+ (store_immediate_info::store_immediate_info): Clear it.
+ (imm_store_chain_info::coalesce_immediate_stores): If swapping
+ ops set ops_swapped_p.
+ (count_multiple_uses): Handle ops_swapped_p.
+
+2017-11-10 Martin Liska <mliska@suse.cz>
+
+ * coverage.c (coverage_init): Stream information about
+ support of has_unexecuted_blocks.
+ * doc/gcov.texi: Document that.
+ * gcov-dump.c (dump_gcov_file): Support it in gcov_dump tool.
+ * gcov.c (read_graph_file): Likewise.
+ (output_line_beginning): Fix a small issue with
+ color output.
+
+2017-11-10 Bin Cheng <bin.cheng@arm.com>
+
+ * tree-predcom.c (determine_roots_comp): Avoid memory leak by freeing
+ reference of trivial component.
+
+2017-11-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/82916
+ * gimple-ssa-store-merging.c
+ (pass_store_merging::terminate_all_aliasing_chains): For
+ gimple_store_p stmts also call refs_output_dependent_p.
+
+ PR rtl-optimization/82913
+ * compare-elim.c (try_merge_compare): Punt if def_insn is not
+ single set.
+
+2017-11-09 Jeff Law <law@redhat.com>
+
+ * vr-values.h: New file with vr_values class.
+ * tree-vrp.c: Include vr-values.h
+ (vrp_value_range_pool, vrp_equiv_obstack, num_vr_values): Move static
+ data objects into the vr_values class.
+ (vr_value, values_propagated, vr_phi_edge_counts): Likewise.
+ (get_value_range): Make it a member function within vr_values class.
+ (set_defs_to_varying, update_value_range, add_equivalence): Likewise.
+ (vrp_stmt_computes_nonzero_p, op_with_boolean_value_range_p): Likewise.
+ (op_with_constant_singleton_value_range): Likewise.
+ (extract_range_for_var_from_comparison_expr): Likewise.
+ (extract_range_from_assert, extract_range_from_ssa_name): Likewise.
+ (extract_range_from_binary_expr): Likewise.
+ (extract_range_from_unary_expr): Likewise.
+ (extract_range_from_cond_expr, extrat_range_from_comparison): Likewise.
+ (check_for_binary_op_overflow, extract_range_basic): Likewise.
+ (extract_range_from_assignment, adjust_range_with_scev): Likewise.
+ (dump_all_value_ranges, get_vr_for_comparison): Likewise.
+ (compare_name_with_value, compare_names): Likewise.
+ (vrp_evaluate_conditional_warnv_with_ops_using_ranges): Likewise.
+ (vrp_evaluate_conditional_warnv_with_ops): Likewise. Remove prototype.
+ (vrp_evaluate_conditional, vrp_visit_cond_stmt): Likewise.
+ (vrp_visit_switch_stmt, extract_range_from_stmt): Likewise.
+ (extract_range_from_phi_node): Likewise.
+ (simplify_truth_ops_using_ranges): Likewise.
+ (simplify_div_or_mod_using_ranges): Likewise.
+ (simplify_min_or_max_using_ranges, simplify_abs_using_ranges): Likewise.
+ (simplify_bit_ops_using_ranges, simplify_cond_using_ranges_1): Likewise.
+ (simplify_cond_using_ranges_2, simplify_switch_using_ranges): Likewise.
+ (simplify_float_conversion_using_ranges): Likewise.
+ (simplify_internal_call_using_ranges): Likewise.
+ (two_valued_val_range_p, simplify_stmt_using_ranges): Likewise.
+ (vrp_visit_assignment_or_call): Likewise. Smuggle class instance
+ poitner via x_vr_values for calls into gimple folder.
+ (vrp_initialize_lattice): Make this the vr_values ctor.
+ (vrp_free_lattice): Make this the vr_values dtor.
+ (set_vr_value): New function.
+ (class vrp_prop): Add vr_values data member. Add various member
+ functions as well as member functions that delegate to vr_values.
+ (check_array_ref): Make a member function within vrp_prop class.
+ (search_for_addr_array, vrp_initialize): Likewise.
+ (vrp_finalize): Likewise. Revamp to avoid direct access to
+ vr_value, values_propagated, etc.
+ (check_array_bounds): Extract vrp_prop class instance pointer from
+ walk info structure. Use it to call member functions.
+ (check_all_array_refs): Make a member function within vrp_prop class.
+ Smuggle class instance pointer via walk info structure.
+ (x_vr_values): New local static.
+ (vrp_valueize): Use x_vr_values to get class instance.
+ (vr_valueize_1): Likewise.
+ (class vrp_folder): Add vr_values data member. Add various member
+ functions as well as member functions that delegate to vr_values.
+ (fold_predicate_in): Make a mber fucntion within vrp_folder class.
+ (simplify_stmt_for_jump_threading): Extract smuggled vr_values
+ class instance from vr_values. Use it to call member functions.
+ (vrp_dom_walker): Add vr_values data member.
+ (vrp_dom_walker::after_dom_children): Smuggle vr_values class
+ instance via x_vr_values.
+ (identify_jump_threads): Accept vr_values as argument. Store
+ it into the walker structure.
+ (evrp_dom_walker): Add vr_values class data member. Add various
+ delegators.
+ (evrp_dom_walker::try_find_new_range): Use vr_values data
+ member to access the memory allocator.
+ (evrp_dom_walker::before_dom_children): Store vr_values class
+ instance into the vrp_folder class.
+ (evrp_dom_walker::push_value_range): Rework to avoid direct
+ access to num_vr_values and vr_value.
+ (evrp_dom_walker::pop_value_range): Likewise.
+ (execute_early_vrp): Remove call to vrp_initialize_lattice.
+ Use vr_values to get to dump_all_value_ranges member function.
+ Remove call to vrp_free_lattice. Call vrp_initialize, vrp_finalize,
+ and simplify_cond_using_ranges_2 via vrp_prop class instance.
+ Pass vr_values class instance down to identify_jump_threads.
+ Remove call to vrp_free_lattice.
+ (debug_all_value_ranges): Remove.
+
+ * tree-vrp.c (vrp_prop): Move class to earlier point in the file.
+ (vrp_folder): Likewise.
+
+ * tree-vrp.c (set_value_range): Do not reference vrp_equiv_obstack.
+ Get it from the existing bitmap instead.
+ (vrp_intersect_ranges_1): Likewise.
+
+2017-11-09 Jakub Jelinek <jakub@redhat.com>
+
+ * gimple-ssa-store-merging.c (struct store_immediate_info): Add
+ bit_not_p field.
+ (store_immediate_info::store_immediate_info): Add bitnotp argument,
+ set bit_not_p to it.
+ (imm_store_chain_info::coalesce_immediate_stores): Break group
+ if bit_not_p is different.
+ (count_multiple_uses, split_group,
+ imm_store_chain_info::output_merged_store): Handle info->bit_not_p.
+ (handled_load): Avoid multiple chained BIT_NOT_EXPRs.
+ (pass_store_merging::process_store): Handle BIT_{AND,IOR,XOR}_EXPR
+ result inverted using BIT_NOT_EXPR, compute bit_not_p, pass it
+ to store_immediate_info ctor.
+
+2017-11-09 Jim Wilson <jimw@sifive.com>
+
+ * collect2.c (OBJECT_FORMAT_COFF): Remove EXTENDED_COFF support.
+ (scan_prog_file): Likewise.
+
+2017-11-09 Jan Hubicka <hubicka@ucw.cz>
+
+ * bb-reorder.c (max_entry_frequency): Remove.
+ (find_traces, rotate_loop, mark_bb_visited, connect_better_edge_p,
+ connect_traces, push_to_next_round_p): Remove prototypes.
+ (find_traces_1_round): Use counts only.
+ (push_to_next_round_p): Likewise.
+ (find_traces): Likewise.
+ (rotate_loop): Likewise.
+ (find_traces_1_round): Likewise.
+ (connect_traces): Likewise.
+ (edge_order): Likewise.
+
+2017-11-09 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (output_return_instruction): Add comments to
+ indicate requirement for cmse_nonsecure_entry return to account
+ for the size of clearing instruction output here.
+ (thumb_exit): Likewise.
+ * config/arm/thumb2.md (thumb2_cmse_entry_return): Fix length for
+ return in hardfloat mode.
+
+2017-11-09 Segher Boessenkool <segher@kernel.crashing.org>
+
+ * config/rs6000/rs6000.c (machine_function): Add a bool,
+ "toc_is_wrapped_separately".
+ (rs6000_option_override_internal): Enable OPTION_MASK_SAVE_TOC_INDIRECT
+ if it wasn't explicitly set or unset, we are optimizing for speed, and
+ doing separate shrink-wrapping.
+ (rs6000_get_separate_components): Enable the TOC component if
+ saving the TOC register in the prologue.
+ (rs6000_components_for_bb): Handle the TOC component.
+ (rs6000_emit_prologue_components): Store the TOC register where needed.
+ (rs6000_set_handled_components): Mark TOC as handled, if handled.
+ (rs6000_emit_prologue): Don't save the TOC if that is already done.
+
+2017-11-09 Martin Jambor <mjambor@suse.cz>
+
+ * ipa-param-manipulation.c: New file.
+ * ipa-param-manipulation.h: Likewise.
+ * Makefile.in (OBJS): Add ipa-param-manipulation.o.
+ (PLUGIN_HEADERS): Addded ipa-param-manipulation.h
+ * ipa-param.h (ipa_parm_op): Moved to ipa-param-manipulation.h.
+ (ipa_parm_adjustment): Likewise.
+ (ipa_parm_adjustment_vec): Likewise.
+ (ipa_get_vector_of_formal_parms): Moved declaration to
+ ipa-param-manipulation.h.
+ (ipa_get_vector_of_formal_parm_types): Likewise.
+ (ipa_modify_formal_parameters): Likewise.
+ (ipa_modify_call_arguments): Likewise.
+ (ipa_combine_adjustments): Likewise.
+ (ipa_dump_param_adjustments): Likewise.
+ (ipa_modify_expr): Likewise.
+ (ipa_get_adjustment_candidate): Likewise.
+ * ipa-prop.c (ipa_get_vector_of_formal_parms): Moved to
+ ipa-param-manipulation.c.
+ (ipa_get_vector_of_formal_parm_types): Likewise.
+ (ipa_modify_formal_parameters): Likewise.
+ (ipa_modify_call_arguments): Likewise.
+ (ipa_modify_expr): Likewise.
+ (get_ssa_base_param): Likewise.
+ (ipa_get_adjustment_candidate): Likewise.
+ (index_in_adjustments_multiple_times_p): Likewise.
+ (ipa_combine_adjustments): Likewise.
+ (ipa_dump_param_adjustments): Likewise.
+ * tree-sra.c: Also include ipa-param-manipulation.h
+ * omp-simd-clone.c: Include ipa-param-manipulation.h instead of
+ ipa-param.h.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/sourcebuild.texi (vect_masked_store): Document.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/sourcebuild.texi (vect_align_stack_vars): Document.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/sourcebuild.texi (vect_variable_length): Document.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/sourcebuild.texi (vect_unaligned_possible): Document.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/sourcebuild.texi (vect_element_align_preferred): Document.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/sourcebuild.texi (vect_perm_short, vect_perm_byte): Document
+ previously undocumented selectors.
+ (vect_perm3_byte, vect_perm3_short, vect_perm3_int): Document.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * doc/rtl.texi (const_vector): Say that elements can be
+ const_wide_ints too.
+ * emit-rtl.h (valid_for_const_vec_duplicate_p): Declare.
+ * emit-rtl.c (valid_for_const_vec_duplicate_p): New function.
+ (gen_vec_duplicate): Use it instead of CONSTANT_P.
+ * optabs.c (expand_vector_broadcast): Likewise.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * tree-ssa-loop-ivopts.c (get_address_cost): Try using a
+ scaled index even if the unscaled address was invalid.
+ Don't increase the complexity of using a scale in that case.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * doc/rtl.texi: Rewrite the subreg rules so that they partition
+ the inner register into REGMODE_NATURAL_SIZE bytes rather than
+ UNITS_PER_WORD bytes.
+ * emit-rtl.c (validate_subreg): Divide subregs into blocks
+ based on REGMODE_NATURAL_SIZE of the inner mode.
+ (gen_lowpart_common): Split the SCALAR_FLOAT_MODE_P and
+ !SCALAR_FLOAT_MODE_P cases. Use REGMODE_NATURAL_SIZE for the latter.
+ * expmed.c (lowpart_bit_field_p): Divide the value up into
+ chunks of REGMODE_NATURAL_SIZE rather than UNITS_PER_WORD.
+ * expr.c (store_constructor): Use REGMODE_NATURAL_SIZE to test
+ whether something is likely to occupy more than one register.
+
+2017-11-09 Jan Hubicka <hubicka@ucw.cz>
+
+ PR ipa/82879
+ * ipa-inline-transform.c (update_noncloned_frequencies): Use
+ profile_count::adjust_for_ipa_scaling.
+ * tree-inline.c (copy_bb, copy_cfg_body): Likewise.
+ * profile-count.c (profile_count::adjust_for_ipa_scaling): New member
+ function.
+ * profile-count.h (profile_count::adjust_for_ipa_scaling): Declare.
+
+2017-11-09 Jakub Jelinek <jakub@redhat.com>
+
+ * gimple-ssa-store-merging.c (count_multiple_uses): New function.
+ (split_group): Add total_orig and total_new arguments, estimate the
+ number of statements related to the store group without store merging
+ and with store merging.
+ (imm_store_chain_info::output_merged_store): Adjust split_group
+ callers, punt if estimated number of statements with store merging
+ is not smaller than estimated number of statements without it.
+ Formatting fix.
+ (handled_load): Remove has_single_use checks.
+ (pass_store_merging::process_store): Likewise.
+
+2017-11-09 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/82902
+ * tree-ssa-phiprop.c (propagate_with_phi): Test proper type.
+
+2017-11-09 Martin Liska <mliska@suse.cz>
+
+ PR target/82863
+ * emit-rtl.c (init_emit_regs): Initialize split_branch_probability to
+ uninitialized.
+
+2017-11-09 Martin Liska <mliska@suse.cz>
+
+ PR tree-optimization/82669
+ * sbitmap.h (bmp_iter_set_init): Remove non needed check.
+
+2017-11-09 Martin Liska <mliska@suse.cz>
+
+ PR gcov-profile/48463
+ * coverage.c (coverage_begin_function): Output also end locus
+ of a function and information whether the function is
+ artificial.
+ * gcov-dump.c (tag_function): Parse and print the information.
+ * gcov.c (INCLUDE_MAP): Add include.
+ (INCLUDE_SET): Likewise.
+ (struct line_info): Move earlier in the source file because
+ of vector<line_info> in function_info structure.
+ (line_info::line_info): Likewise.
+ (line_info::has_block): Likewise.
+ (struct source_info): Add new member index.
+ (source_info::get_functions_at_location): New function.
+ (function_info::group_line_p): New function.
+ (output_intermediate_line): New function.
+ (output_intermediate_file): Use the mentioned function.
+ (struct function_start): New.
+ (struct function_start_pair_hash): Likewise.
+ (process_file): Add code that identifies group functions.
+ Assign lines either to global or function scope.
+ (generate_results): Skip artificial functions.
+ (find_source): Assign index for each source file.
+ (read_graph_file): Read new flag artificial and end_line.
+ (add_line_counts): Assign it either to global of function scope.
+ (accumulate_line_counts): Isolate core of the function to
+ accumulate_line_info and call it for both function and global
+ scope lines.
+ (accumulate_line_info): New function.
+ (output_line_beginning): Fix GNU coding style.
+ (print_source_line): New function.
+ (output_line_details): Likewise.
+ (output_function_details): Likewise.
+ (output_lines): Iterate both source (global) scope and function
+ scope.
+ (struct function_line_start_cmp): New class.
+ * doc/gcov.texi: Reflect changes in documentation.
+
+2017-11-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR debug/82837
+ * dwarf2out.c (const_ok_for_output_1): Reject NEG in addition to NOT.
+ (mem_loc_descriptor): Handle (const (neg (...))) as (neg (const (...)))
+ and similarly for not instead of neg.
+
+2017-11-08 Andi Kleen <ak@linux.intel.com>
+
+ * config/i386/i386.opt: Add -mforce-indirect-call.
+ * config/i386/predicates.md: Check for flag_force_indirect_call.
+ * doc/invoke.texi: Document -mforce-indirect-call
+
+2017-11-08 Kito Cheng <kito.cheng@gmail.com>
+
+ * config/riscv/riscv-protos.h (riscv_slow_unaligned_access_p):
+ New extern.
+ (MOVE_RATIO): Use riscv_slow_unaligned_access_p.
+ config/riscv/riscv.c (predict.h): New include.
+ (riscv_slow_unaligned_access_p): No longer static.
+ (riscv_block_move_straight): Add require.
+ config/riscv/riscv-protos.h (riscv_hard_regno_nregs): Delete.
+
+2017-11-08 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/82855
+ * config/i386/sse.md (<avx512>_eq<mode>3<mask_scalar_merge_name>,
+ <avx512>_eq<mode>3<mask_scalar_merge_name>_1): Use
+ nonimmediate_operand predicate for operand 1 instead of
+ register_operand.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/aarch64/aarch64-simd.md (store_pair_lanes<mode>):
+ New pattern.
+ * config/aarch64/constraints.md (Uml): New constraint.
+ * config/aarch64/predicates.md (aarch64_mem_pair_lanes_operand): New
+ predicate.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * simplify-rtx.c (simplify_ternary_operation): Simplify vec_merge
+ of two vec_duplicates into a vec_concat.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * simplify-rtx.c (simplify_ternary_operation, VEC_MERGE):
+ Simplify vec_merge of vec_duplicate and vec_concat.
+ * config/aarch64/constraints.md (Utq): New constraint.
+ * config/aarch64/aarch64-simd.md (load_pair_lanes<mode>): New
+ define_insn.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * simplify-rtx.c (simplify_ternary_operation, VEC_MERGE):
+ Simplify vec_merge of vec_duplicate and const_vector.
+ * config/aarch64/predicates.md (aarch64_simd_or_scalar_imm_zero):
+ New predicate.
+ * config/aarch64/aarch64-simd.md (*aarch64_combinez<mode>): Use VDC
+ mode iterator. Update predicate on operand 1 to
+ handle non-const_vec constants. Delete constraints.
+ (*aarch64_combinez_be<mode>): Likewise for operand 2.
+
+2017-11-08 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/78821
+ * gimple-ssa-store-merging.c (struct store_operand_info): Add bit_not_p
+ data member.
+ (store_operand_info::store_operand_info): Initialize it to false.
+ (pass_store_merging::terminate_all_aliasing_chains): Rewritten to use
+ ref_maybe_used_by_stmt_p and stmt_may_clobber_ref_p on lhs of each
+ store in the group, and if chain_info is non-NULL, to ignore altogether
+ that chain.
+ (compatible_load_p): Fail if bit_not_p does not match.
+ (imm_store_chain_info::output_merged_store): Handle bit_not_p loads.
+ (handled_load): Fill in bit_not_p. Handle BIT_NOT_EXPR.
+ (pass_store_merging::process_store): Adjust
+ terminate_all_aliasing_chains calls to pass NULL in all current spots,
+ call terminate_all_aliasing_chains newly when adding a store into
+ a chain with non-NULL chain_info.
+
+2017-11-08 Wilco Dijkstra <wdijkstr@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_can_eliminate): Simplify logic.
+
+2017-11-08 Wilco Dijkstra <wdijkstr@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_frame_pointer_required)
+ Remove.
+ (aarch64_layout_frame): Initialise emit_frame_chain.
+ (aarch64_can_eliminate): Remove omit leaf frame pointer code.
+ (TARGET_FRAME_POINTER_REQUIRED): Remove define.
+
+2017-11-08 Martin Liska <mliska@suse.cz>
+
+ * gimplify.c (expand_FALLTHROUGH_r): Simplify usage
+ of gimple_call_internal_p.
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ * config/mips/mips.h (ASM_OUTPUT_LABELREF): Wrap in "do {} while (0)".
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ * config/mips/mips.h (ASM_OUTPUT_CASE_END): Remove semicolon after
+ "do {} while (0)".
+
+2017-11-08 Martin Liska <mliska@suse.cz>
+
+ PR sanitizer/82792
+ * gimplify.c (expand_FALLTHROUGH_r): Skip IFN_ASAN_MARK.
+
+2017-11-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gimple-pretty-print.c (dump_profile): Return "" instead of NULL.
+
+2017-11-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/82855
+ * config/i386/i386.md (SWI1248_AVX512BWDQ2_64): New mode iterator.
+ (*cmp<mode>_ccz_1): New insn with $k alternative.
+
+ PR target/82855
+ * config/i386/i386.c (ix86_swap_binary_operands_p): Treat
+ RTX_COMM_COMPARE as commutative as well.
+ (ix86_binary_operator_ok): Formatting fix.
+ * config/i386/sse.md (*mul<mode>3<mask_name><round_name>,
+ *<code><mode>3<mask_name><round_saeonly_name>,
+ *<code><mode>3<mask_name>, *<code>tf3, *mul<mode>3<mask_name>,
+ *<s>mul<mode>3_highpart<mask_name>,
+ *vec_widen_umult_even_v16si<mask_name>,
+ *vec_widen_umult_even_v8si<mask_name>,
+ *vec_widen_umult_even_v4si<mask_name>,
+ *vec_widen_smult_even_v16si<mask_name>,
+ *vec_widen_smult_even_v8si<mask_name>, *sse4_1_mulv2siv2di3<mask_name>,
+ *avx2_pmaddwd, *sse2_pmaddwd, *<sse4_1_avx2>_mul<mode>3<mask_name>,
+ *avx2_<code><mode>3, *avx512f_<code><mode>3<mask_name>,
+ *sse4_1_<code><mode>3<mask_name>, *<code>v8hi3,
+ *sse4_1_<code><mode>3<mask_name>, *<code>v16qi3, *avx2_eq<mode>3,
+ <avx512>_eq<mode>3<mask_scalar_merge_name>_1, *sse4_1_eqv2di3,
+ *sse2_eq<mode>3, <mask_codefor><code><mode>3<mask_name>,
+ *<code><mode>3, *<sse2_avx2>_uavg<mode>3<mask_name>,
+ *<ssse3_avx2>_pmulhrsw<mode>3<mask_name>, *ssse3_pmulhrswv4hi3): Use
+ !(MEM_P (operands[1]) && MEM_P (operands[2])) condition instead of
+ ix86_binary_operator_ok. Formatting fixes.
+ (*<plusminus_insn><mode>3<mask_name><round_name>,
+ *<plusminus_insn><mode>3, *<plusminus_insn><mode>3_m): Formatting
+ fixes.
+
+2017-11-07 Segher Boessenkool <segher@kernel.crashing.org>
+
+ * config/rs6000/rs6000.md (GPR2): New mode_iterator.
+ ("cstore<mode>4"): Don't always expand with rs6000_emit_int_cmove for
+ eq and ne if TARGET_ISEL.
+ (cmp): New code_iterator.
+ (UNS, UNSU_, UNSIK): New code_attrs.
+ (<code><GPR:mode><GPR2:mode>2_isel): New define_insn_and_split.
+ ("eq<mode>3"): New define_expand, rename the define_insn_and_split
+ to...
+ ("eq<mode>3"): ... this.
+ ("ne<mode>3"): New define_expand, rename the define_insn_and_split
+ to...
+ ("ne<mode>3"): ... this.
+
+2017-11-07 Julia Koval <julia.koval@intel.com>
+
+ PR target/82812
+ * common/config/i386/i386-common.c
+ (OPTION_MASK_ISA_GENERAL_REGS_ONLY_UNSET): Remove MPX from flag.
+ (ix86_handle_option): Move MPX to isa_flags2 and GFNI to isa_flags.
+ * config/i386/i386-c.c (ix86_target_macros_internal): Ditto.
+ * config/i386/i386.opt: Ditto.
+ * config/i386/i386.c (ix86_target_string): Ditto.
+ (ix86_option_override_internal): Ditto.
+ (ix86_init_mpx_builtins): Move MPX to args2.
+ (ix86_expand_builtin): Special handling for OPTION_MASK_ISA_GFNI.
+ * config/i386/i386-builtin.def (__builtin_ia32_vgf2p8affineinvqb_v64qi,
+ __builtin_ia32_vgf2p8affineinvqb_v64qi_mask,
+ __builtin_ia32_vgf2p8affineinvqb_v32qi,
+ __builtin_ia32_vgf2p8affineinvqb_v32qi_mask,
+ __builtin_ia32_vgf2p8affineinvqb_v16qi,
+ __builtin_ia32_vgf2p8affineinvqb_v16qi_mask): Move to ARGS array.
+
+2017-11-07 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/80425
+ * config/i386.i386.md (*zero_extendsidi2): Change (?r,*Yj), (?*Yi,r)
+ and (*x,m) to ($r,Yj), ($Yi,r) and ($x,m).
+ (zero-extendsidi peephole2): Remove peephole.
+
+2017-11-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR c/53037
+ * stor-layout.c: Include attribs.h.
+ (handle_warn_if_not_align): Replace test on TYPE_USER_ALIGN with
+ explicit lookup of "aligned" attribute.
+
+2017-11-07 Andrew Waterman <andrew@sifive.com>
+
+ * config/riscv/riscv-protos.h (riscv_hard_regno_nregs): New prototype.
+ (riscv_expand_block_move): Likewise.
+ * config/riscv/riscv.h (MOVE_RATIO): Tune cost to movmemsi
+ implementation.
+ (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER): New define.
+ (RISCV_MAX_MOVE_BYTES_STRAIGHT): New define.
+ * config/riscv/riscv.c (riscv_block_move_straight): New function.
+ (riscv_adjust_block_mem): Likewise.
+ (riscv_block_move_loop): Likewise.
+ (riscv_expand_block_move): Likewise.
+ gcc/config/riscv/riscv.md (movmemsi): New pattern.
+
+2017-11-07 Michael Clark <michaeljclark@mac.com>
+
+ * config/riscv/linux.h (MUSL_ABI_SUFFIX): New define.
+ (MUSL_DYNAMIC_LINKER): Likewise.
+
+2017-11-07 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * config/aarch64/aarch64.md (ashl<mode>3, ashr<mode>3, lshr<mode>3)
+ (rotr<mode>3, rotl<mode>3): Use aarch64_reg_or_imm instead of
+ nonmmory_operand.
+
+2017-11-07 Richard Biener <rguenther@suse.de>
+
+ * match.pd: Fix build.
+
+2017-11-07 Wilco Dijkstra <wdijkstr@arm.com>
+ Jackson Woodruff <jackson.woodruff@arm.com>
+
+ PR tree-optimization/71026
+ * match.pd: Canonicalize negate in division.
+
+2017-11-07 Sudakshina Das <sudi.das@arm.com>
+
+ PR middle-end/80131
+ * match.pd: Simplify 1 << (C - x) where C = precision (x) - 1.
+
+2017-11-07 Marc Glisse <marc.glisse@inria.fr>
+
+ * match.pd ((a&~b)|(a^b),(a&~b)^~a,(a|b)&~(a^b),a|~(a^b),
+ (a|b)|(a&^b),(a&b)|~(a^b),~(~a&b),~X^Y): New transformations.
+
+2017-11-07 Marc Glisse <marc.glisse@inria.fr>
+
+ * fold-const.c (negate_expr_p) [PLUS_EXPR, MINUS_EXPR]: Handle
+ non-scalar integral types.
+ * match.pd (negate_expr_p): Handle MINUS_EXPR.
+ (-(A-B), -(~A)): New transformations.
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ * config/powerpcspe/aix43.h (SUBTARGET_OVERRIDE_OPTIONS): Remove
+ semicolon after "do {} while (0)".
+ * config/powerpcspe/aix51.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/powerpcspe/aix52.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/powerpcspe/aix53.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/powerpcspe/aix61.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/powerpcspe/aix71.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ * config/rs6000/aix43.h (SUBTARGET_OVERRIDE_OPTIONS): Remove semicolon
+ after "do {} while (0)".
+ * config/rs6000/aix51.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/rs6000/aix52.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/rs6000/aix53.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/rs6000/aix61.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+ * config/rs6000/aix71.h (SUBTARGET_OVERRIDE_OPTIONS): Same.
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * config/arm/arm.c (HANDLE_NARROW_SHIFT_ARITH): Remove semicolon after
+ "while {} do (0)".
+ (arm_rtx_costs_internal): Add missing semicolon after
+ HANDLE_NARROW_SHIFT_ARITH call.
+
+2017-11-06 Segher Boessenkool <segher@kernel.crashing.org>
+
+ * config/rs6000/rs6000.c (rs6000_option_override_internal): Don't
+ disable isel if it was not set explicitly.
+
+2017-11-06 James Bowman <james.bowman@ftdichip.com>
+
+ * gcc/dwarf2out.c (modified_type_die): Retain ADDR_SPACE
+ qualifiers.
+ (add_type_attribute) likewise.
+
+2017-11-06 H.J. Lu <hongjiu.lu@intel.com>
+
+ * config/i386/i386.c (ix86_can_use_return_insn_p): Use reference
+ of struct ix86_frame.
+ (ix86_initial_elimination_offset): Likewise.
+ (ix86_expand_split_stack_prologue): Likewise.
+
+2017-11-06 Marc Glisse <marc.glisse@inria.fr>
+
+ * tree-vrp.h (enum value_range_type): Update stale comment.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_expand_vec_perm)
+ (aarch64_expand_vec_perm_const): Take the number of units too.
+ * config/aarch64/aarch64.c (aarch64_expand_vec_perm)
+ (aarch64_expand_vec_perm_const): Likewise.
+ * config/aarch64/aarch64-simd.md (vec_perm_const<mode>)
+ (vec_perm<mode>): Update accordingly.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_simd_vect_par_cnst_half):
+ Take the number of units too.
+ * config/aarch64/aarch64.c (aarch64_simd_vect_par_cnst_half): Likewise.
+ (aarch64_simd_check_vect_par_cnst_half): Update call accordingly,
+ but check for a vector mode before rather than after the call.
+ * config/aarch64/aarch64-simd.md (aarch64_split_simd_mov<mode>)
+ (move_hi_quad_<mode>, vec_unpack<su>_hi_<mode>)
+ (vec_unpack<su>_lo_<mode, vec_widen_<su>mult_lo_<mode>)
+ (vec_widen_<su>mult_hi_<mode>, vec_unpacks_lo_<mode>)
+ (vec_unpacks_hi_<mode>, aarch64_saddl2<mode>, aarch64_uaddl2<mode>)
+ (aarch64_ssubl2<mode>, aarch64_usubl2<mode>, widen_ssum<mode>3)
+ (widen_usum<mode>3, aarch64_saddw2<mode>, aarch64_uaddw2<mode>)
+ (aarch64_ssubw2<mode>, aarch64_usubw2<mode>, aarch64_sqdmlal2<mode>)
+ (aarch64_sqdmlsl2<mode>, aarch64_sqdmlal2_lane<mode>)
+ (aarch64_sqdmlal2_laneq<mode>, aarch64_sqdmlsl2_lane<mode>)
+ (aarch64_sqdmlsl2_laneq<mode>, aarch64_sqdmlal2_n<mode>)
+ (aarch64_sqdmlsl2_n<mode>, aarch64_sqdmull2<mode>)
+ (aarch64_sqdmull2_lane<mode>, aarch64_sqdmull2_laneq<mode>)
+ (aarch64_sqdmull2_n<mode>): Update accordingly.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_reverse_mask): Take
+ the number of units too.
+ * config/aarch64/aarch64.c (aarch64_reverse_mask): Likewise.
+ * config/aarch64/aarch64-simd.md (vec_load_lanesoi<mode>)
+ (vec_store_lanesoi<mode>, vec_load_lanesci<mode>)
+ (vec_store_lanesci<mode>, vec_load_lanesxi<mode>)
+ (vec_store_lanesxi<mode>): Update accordingly.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_endian_lane_rtx): Declare.
+ * config/aarch64/aarch64.c (aarch64_endian_lane_rtx): New function.
+ * config/aarch64/aarch64.h (ENDIAN_LANE_N): Take the number
+ of units rather than the mode.
+ * config/aarch64/iterators.md (nunits): New mode attribute.
+ * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args):
+ Use aarch64_endian_lane_rtx instead of GEN_INT (ENDIAN_LANE_N ...).
+ * config/aarch64/aarch64-simd.md (aarch64_dup_lane<mode>)
+ (aarch64_dup_lane_<vswap_width_name><mode>, *aarch64_mul3_elt<mode>)
+ (*aarch64_mul3_elt_<vswap_width_name><mode>): Likewise.
+ (*aarch64_mul3_elt_to_64v2df, *aarch64_mla_elt<mode>): Likewise.
+ (*aarch64_mla_elt_<vswap_width_name><mode>, *aarch64_mls_elt<mode>)
+ (*aarch64_mls_elt_<vswap_width_name><mode>, *aarch64_fma4_elt<mode>)
+ (*aarch64_fma4_elt_<vswap_width_name><mode>):: Likewise.
+ (*aarch64_fma4_elt_to_64v2df, *aarch64_fnma4_elt<mode>): Likewise.
+ (*aarch64_fnma4_elt_<vswap_width_name><mode>): Likewise.
+ (*aarch64_fnma4_elt_to_64v2df, reduc_plus_scal_<mode>): Likewise.
+ (reduc_plus_scal_v4sf, reduc_<maxmin_uns>_scal_<mode>): Likewise.
+ (reduc_<maxmin_uns>_scal_<mode>): Likewise.
+ (*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>): Likewise.
+ (*aarch64_get_lane_zero_extendsi<mode>): Likewise.
+ (aarch64_get_lane<mode>, *aarch64_mulx_elt_<vswap_width_name><mode>)
+ (*aarch64_mulx_elt<mode>, *aarch64_vgetfmulx<mode>): Likewise.
+ (aarch64_sq<r>dmulh_lane<mode>, aarch64_sq<r>dmulh_laneq<mode>)
+ (aarch64_sqrdml<SQRDMLH_AS:rdma_as>h_lane<mode>): Likewise.
+ (aarch64_sqrdml<SQRDMLH_AS:rdma_as>h_laneq<mode>): Likewise.
+ (aarch64_sqdml<SBINQOPS:as>l_lane<mode>): Likewise.
+ (aarch64_sqdml<SBINQOPS:as>l_laneq<mode>): Likewise.
+ (aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal): Likewise.
+ (aarch64_sqdml<SBINQOPS:as>l2_laneq<mode>_internal): Likewise.
+ (aarch64_sqdmull_lane<mode>, aarch64_sqdmull_laneq<mode>): Likewise.
+ (aarch64_sqdmull2_lane<mode>_internal): Likewise.
+ (aarch64_sqdmull2_laneq<mode>_internal): Likewise.
+ (aarch64_vec_load_lanesoi_lane<mode>): Likewise.
+ (aarch64_vec_store_lanesoi_lane<mode>): Likewise.
+ (aarch64_vec_load_lanesci_lane<mode>): Likewise.
+ (aarch64_vec_store_lanesci_lane<mode>): Likewise.
+ (aarch64_vec_load_lanesxi_lane<mode>): Likewise.
+ (aarch64_vec_store_lanesxi_lane<mode>): Likewise.
+ (aarch64_simd_vec_set<mode>): Update use of ENDIAN_LANE_N.
+ (aarch64_simd_vec_setv2di): Likewise.
+
+2017-11-06 Carl Love <cel@us.ibm.com>
+
+ * config/rs6000/rs6000-c.c (P8V_BUILTIN_VEC_REVB): Add power 8
+ definitions.
+ (P9V_BUILTIN_VEC_REVB): Remove the power 9 instance definitions.
+ * config/rs6000/altivec.h (vec_revb): Change the #define from power 9
+ to power 8.
+ * config/rs6000/r6000-protos.h (swap_endian_selector_for_mode): Add new
+ extern declaration.
+ * config/rs6000/rs6000.c (swap_endian_selector_for_mode): Add function.
+ * config/rs6000/rs6000-builtin.def (BU_P8V_VSX_1, BU_P8V_OVERLOAD_1):
+ Add power 8 macro expansions.
+ (BU_P9V_OVERLOAD_1): Remove power 9 overload expansion.
+ * config/rs6000/vsx.md (revb_<mode>): Add define_expand to generate
+ power 8 instructions. (VSX_XXBR): Add iterator.
+
+2017-11-06 Wilco Dijkstra <wdijkstr@arm.com>
+
+ * config/arm/arm.md (predicable_short_it): Change default to "no",
+ improve documentation, remove uses that are identical to the default.
+ (enabled_for_depr_it): Rename to enabled_for_short_it.
+ * gcc/config/arm/arm-fixed.md (predicable_short_it):
+ Remove default uses.
+ * gcc/config/arm/ldmstm.md (predicable_short_it): Likewise.
+ * gcc/config/arm/sync.md (predicable_short_it): Likewise.
+ * gcc/config/arm/thumb2.md (predicable_short_it): Likewise.
+ * gcc/config/arm/vfp.md (predicable_short_it): Likewise.
+
+2017-11-06 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ PR target/82748
+ * config/rs6000/rs6000-builtin.def (BU_FLOAT128_1): Delete
+ float128 helper macros, which are no longer used after deleting
+ the old 'q' built-in functions, and moving the round to odd
+ built-in functions to being special built-in functions.
+ (BU_FLOAT128_2): Likewise.
+ (BU_FLOAT128_1_HW): Likewise.
+ (BU_FLOAT128_2_HW): Likewise.
+ (BU_FLOAT128_3_HW): Likewise.
+ (FABSQ): Delete old 'q' built-in functions.
+ (COPYSIGNQ): Likewise.
+ (SQRTF128_ODD): Move round to odd built-in functions to be
+ special built-in functions, so that we can handle
+ -mabi=ieeelongdouble.
+ (TRUNCF128_ODD): Likewise.
+ (ADDF128_ODD): Likewise.
+ (SUBF128_ODD): Likewise.
+ (MULF128_ODD): Likewise.
+ (DIVF128_ODD): Likewise.
+ (FMAF128_ODD): Likewise.
+ * config/rs6000/rs6000-c.c (rs6000_cpu_cpp_builtins): Map old 'q'
+ built-in names to 'f128'.
+ * config/rs6000/rs6000.c (rs6000_fold_builtin): Remove folding the
+ old 'q' built-in functions, as the machine independent code for
+ 'f128' built-in functions handles this.
+ (rs6000_expand_builtin): Add expansion for float128 round to odd
+ functions, keying off on -mabi=ieeelongdouble of whether to use
+ the KFmode or TFmode variant.
+ (rs6000_init_builtins): Initialize the _Float128 round to odd
+ built-in functions.
+ * doc/extend.texi (PowerPC Built-in Functions): Document the old
+ _Float128 'q' built-in functions are now mapped into the new
+ 'f128' built-in functions.
+
+2017-11-06 David Edelsohn <dje.gcc@gmail.com>
+
+ * collect2.c (add_lto_object): Compile for OBJECT_COFF.
+ (scan_prog_file): Don't skip PASS_LTOINFO. Scan for LTO objects.
+
+2017-11-06 David Malcolm <dmalcolm@redhat.com>
+
+ PR jit/82826
+ * ipa-fnsummary.c (ipa_fnsummary_c_finalize): New function.
+ * ipa-fnsummary.h (ipa_fnsummary_c_finalize): New decl.
+ * toplev.c: Include "ipa-fnsummary.h".
+ (toplev::finalize): Call ipa_fnsummary_c_finalize.
+
+2017-11-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/82838
+ * gimple-ssa-store-merging.c
+ (imm_store_chain_info::output_merged_store): Call force_gimple_operand_1
+ on a separate gimple_seq which is then appended to seq.
+
+2017-11-06 Jeff Law <law@redhat.com>
+
+ PR target/82788
+ * config/i386/i386.c (PROBE_INTERVAL): Remove.
+ (get_probe_interval): New functions.
+ (ix86_adjust_stack_and_probe_stack_clash): Use get_probe_interval.
+ (ix86_adjust_stack_and_probe): Likewise.
+ (output_adjust_stack_and_probe): Likewise.
+ (ix86_emit_probe_stack_range): Likewise.
+ (ix86_expand_prologue): Likewise.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+
+ PR tree-optimization/82816
+ * tree-ssa-math-opts.c (convert_mult_to_widen): Return false
+ if the modes of the two types are the same.
+ (convert_plusminus_to_widen): Likewise.
+
+2017-11-06 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.md (*p9_vadu<mode>3) Rename to
+ p9_vadu<mode>3.
+ (usadv16qi): New define_expand.
+ (usadv8hi): New define_expand.
+
+2017-11-06 Jan Hubicka <hubicka@ucw.cz>
+
+ PR bootstrap/82832
+ * ipa-inline-transform.c (update_noncloned_frequencies): Always
+ scale.
+ (inline_transform): Likewise.
+ * predict.c (counts_to_freqs): Remove useless conditional.
+ * profile-count.h (profile_count::apply_scale): Move sanity check.
+ * tree-inline.c (copy_bb): Always scale.
+ (copy_cfg_body): Likewise.
+
+2017-11-06 Christophe Lyon <christophe.lyon@linaro.org>
+
+ PR target/67591
+ * config/arm/arm.md (*sub_shiftsi): Add predicable_short_it
+ attribute.
+ (*cmp_ite0): Add enabled_for_depr_it attribute.
+ (*cmp_ite1): Likewise.
+
+2017-11-06 Segher Boessenkool <segher@kernel.crashing.org>
+
+ * config/rs6000/rs6000.c (rs6000_insn_cost): Handle TYPE_MFCR and
+ TYPE_MFCRF.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * tree-vrp.c (vrp_int_const_binop): Return true on success and
+ return the value by pointer.
+ (extract_range_from_multiplicative_op_1): Update accordingly.
+ Return as soon as an operation fails.
+
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * asan.c (DEF_SANITIZER_BUILTIN_1): Factor out of ...
+ (DEF_SANITIZER_BUILTIN): ... here.
+ (initialize_sanitizer_builtins): Use DEF_SANITIZER_BUILTIN_1 instead of
+ DEF_SANITIZER_BUILTIN in if stmt. Add missing semicolon.
+
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * config/elfos.h (ASM_OUTPUT_BEFORE_CASE_LABEL): Remove semicolon after
+ macro body.
+ (ASM_OUTPUT_CASE_LABEL): Add semicolon after
+ ASM_OUTPUT_BEFORE_CASE_LABEL call.
+ * config/arc/arc.h (ASM_OUTPUT_BEFORE_CASE_LABEL): Remove semicolon
+ after macro body.
+ * config/m68k/m68kelf.h (ASM_OUTPUT_BEFORE_CASE_LABEL): Same.
+ * config/mips/mips.h (ASM_OUTPUT_BEFORE_CASE_LABEL): Same.
+ * config/v850/v850.h (ASM_OUTPUT_BEFORE_CASE_LABEL): Same.
+
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * graphite-scop-detection.c (DEBUG_PRINT): Remove semicolon after
+ "do {} while (0)".
+
+2017-11-04 Michael Clark <michaeljclark@mac.com>
+
+ * config/riscv/riscv.c (riscv_print_operand): Add a 'i' format.
+ config/riscv/riscv.md (addsi3): Use 'i' for immediates.
+ (adddi3): Likewise.
+ (*addsi3_extended): Likewise.
+ (*addsi3_extended2): Likewise.
+ (<optab>si3): Likewise.
+ (<optab>di3): Likewise.
+ (<optab><mode>3): Likewise.
+ (<*optabe>si3_internal): Likewise.
+ (zero_extendqi<SUPERQI:mode>2): Likewise.
+ (*add<mode>hi3): Likewise.
+ (*xor<mode>hi3): Likewise.
+ (<optab>di3): Likewise.
+ (*<optab>si3_extend): Likewise.
+ (*sge<u>_<X:mode><GPR:mode>): Likewise.
+ (*slt<u>_<X:mode><GPR:mode>): Likewise.
+ (*sle<u>_<X:mode><GPR:mode>): Likewise.
+
+2017-11-04 Andrew Waterman <andrew@sifive.com>
+
+ * config/riscv/riscv.c (riscv_option_override): Conditionally set
+ TARGET_STRICT_ALIGN based upon -mtune argument.
+
+2017-11-04 Andrew Waterman <andrew@sifive.com>
+
+ * config/riscv/riscv.h (SLOW_BYTE_ACCESS): Change to 1.
+
+2017-11-04 Daniel Santos <daniel.santos@pobox.com>
+
+ config/i386/i386.c (choose_basereg): Use optional scratch
+ register and add assertion.
+ (x86_emit_outlined_ms2sysv_save): Use scratch register when
+ needed, and don't allocate stack.
+ (ix86_expand_prologue): Rearrange where SSE saves/stub call is
+ emitted, correct wrong allocation with -mcall-ms2sysv-xlogues.
+ (ix86_emit_outlined_ms2sysv_restore): Fix non-immediate offsets.
+
2017-11-03 Jeff Law <law@redhat.com>
* config/i386/i386.c (ix86_emit_restore_reg_using_pop): Prototype.
@@ -255,7 +2117,7 @@
(find_trace): Use to_frequency.
(tail_duplicate): Use to_frequency.
* trans-mem.c (expand_transaction): Do not update frequency.
- * tree-call-cdce.c: Do not update frequency.
+ * tree-call-cdce.c: Do not update frequency.
* tree-cfg.c (gimple_find_sub_bbs): Likewise.
(gimple_merge_blocks): Likewise.
(gimple_split_edge): Likewise.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 2c700d42332..49bbb3c7bd9 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20171104
+20171116
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 7e23a230793..926ceb0b34b 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1302,6 +1302,7 @@ OBJS = \
gimple-low.o \
gimple-pretty-print.o \
gimple-ssa-backprop.o \
+ gimple-ssa-evrp.o \
gimple-ssa-isolate-paths.o \
gimple-ssa-nonnull-compare.o \
gimple-ssa-split-paths.o \
@@ -1355,6 +1356,7 @@ OBJS = \
ipa-predicate.o \
ipa-profile.o \
ipa-prop.o \
+ ipa-param-manipulation.o \
ipa-pure-const.o \
ipa-icf.o \
ipa-icf-gimple.o \
@@ -1576,6 +1578,7 @@ OBJS = \
varasm.o \
varpool.o \
vmsdbgout.o \
+ vr-values.o \
vtable-verify.o \
web.o \
wide-int.o \
@@ -3468,7 +3471,7 @@ PLUGIN_HEADERS = $(TREE_H) $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(C_COMMON_H) c-family/c-objc.h $(C_PRETTY_PRINT_H) \
tree-iterator.h $(PLUGIN_H) $(TREE_SSA_H) langhooks.h incpath.h debug.h \
$(EXCEPT_H) tree-ssa-sccvn.h real.h output.h $(IPA_UTILS_H) \
- $(C_PRAGMA_H) $(CPPLIB_H) $(FUNCTION_H) \
+ ipa-param-manipulation.h $(C_PRAGMA_H) $(CPPLIB_H) $(FUNCTION_H) \
cppdefault.h flags.h $(MD5_H) params.def params.h params-enum.h \
prefix.h tree-inline.h $(GIMPLE_PRETTY_PRINT_H) realmpfr.h \
$(IPA_PROP_H) $(TARGET_H) $(RTL_H) $(TM_P_H) $(CFGLOOP_H) $(EMIT_RTL_H) \
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 6e2a7ffd099..edf87c37cdf 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,791 @@
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * adadecode.c (__gnat_decode): Use memcpy instead of strncpy.
+ * argv.c (__gnat_fill_arg, __gnat_fill_env): Likewise.
+
+2017-11-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/utils.c (convert) <RECORD_TYPE>: Add comment and do
+ not fall through to the next case.
+ <ARRAY_TYPE>: Deal specially with a dereference from another array
+ type with the same element type.
+
+2017-11-09 Gary Dismukes <dismukes@adacore.com>
+
+ * exp_util.adb, freeze.adb: Minor reformatting.
+
+2017-11-09 Jerome Lambourg <lambourg@adacore.com>
+
+ * gcc-interface/Makefile.in: Add rules to build aarch64-qnx runtimes.
+
+2017-11-09 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * gcc-interface/trans.c (gnat_to_gnu): Add processing for
+ N_Variable_Reference_Marker nodes.
+
+2017-11-09 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_ch12.adb (Analyze_Generic_Package_Declaration): Handle properly
+ the pragma Compile_Time_Error when it appears in a generic package
+ declaration and uses an expanded name to denote the current unit.
+
+2017-11-09 Jerome Lambourg <lambourg@adacore.com>
+
+ * libgnarl/s-taprop__qnx.adb: Fix incorrect casing for pthread_self.
+ * tracebak.c: Add support for tracebacks in QNX.
+
+2017-11-09 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_aggr.adb (Aggr_Size_OK): Bump base limit from 50000 to 500000.
+
+2017-11-09 Yannick Moy <moy@adacore.com>
+
+ * erroutc.adb, set_targ.adb: Remove pragma Annotate for CodePeer
+ justification.
+
+2017-11-09 Joel Brobecker <brobecker@adacore.com>
+
+ * doc/gnat_ugn/platform_specific_information.rst: Document packages
+ needed on GNU/Linux by GNAT.
+ * gnat_ugn.texi: Regenerate.
+
+2017-11-09 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * contracts.adb (Analyze_Contracts): Remove the three parameter
+ version. This routine now only analyzes contracts and does not perform
+ any freezing actions.
+ (Analyze_Previous_Contracts): Removed.
+ (Freeze_Previous_Contracts): New routine.
+ * contracts.ads (Analyze_Previous_Contracts): Removed.
+ (Freeze_Previous_Contracts): New routine.
+ * sem_ch3.adb (Analyze_Declarations): Analyze the contract of an
+ enclosing package spec regardless of whether the list denotes the
+ visible or private declarations. Fix the removal of partial state
+ refinements when the context is a package spec.
+ * sem_ch6.adb (Analyze_Subprogram_Body_Helper): Freeze previous
+ contracts.
+ * sem_ch7.adb (Analyze_Package_Body_Helper): Freeze previous contracts.
+ * sem_ch9.adb (Analyze_Entry_Body): Freeze previous contracts.
+ (Analyze_Protected_Body): Freeze previous contracts.
+ (Analyze_Task_Body): Freeze previous contracts.
+ * sem_prag.adb: Comment reformatting.
+
+2017-11-09 Bob Duff <duff@adacore.com>
+
+ * libgnarl/g-thread.ads, libgnarl/g-thread.adb: (Make_Independent):
+ Export this so users can use it without importing
+ System.Tasking.Utilities.
+ * libgnarl/s-tassta.adb (Vulnerable_Complete_Task): Relax assertion
+ that fails when Make_Independent is called on a user task.
+ * libgnarl/s-taskin.ads (Master_Of_Task): Avoid unusual
+ capitalization style ((style) bad casing of "Master_of_Task").
+
+2017-11-09 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_ch12.adb (Analyze_Subprogram_Instantiation): Correct use of
+ uninitialized variable uncovered by Codepeer.
+
+2017-11-09 Arnaud Charlet <charlet@adacore.com>
+
+ * namet.adb: Replace pragma Assume by pragma Assert to fix bootstrap.
+
+2017-11-09 Javier Miranda <miranda@adacore.com>
+
+ * doc/gnat_rm/standard_and_implementation_defined_restrictions.rst:
+ (Static_Dispatch_Tables): Minor rewording.
+ * gnat_rm.texi: Regenerate.
+
+2017-11-09 Justin Squirek <squirek@adacore.com>
+
+ * sem_ch8.adb (Analyze_Use_Package): Remove forced installation of
+ use_clauses within instances.
+ (Use_One_Package): Add condition to check for "hidden" open scopes to
+ avoid skipping over packages that have not been properly installed even
+ though they are visible.
+
+2017-11-09 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_ch4.adb (Analyze_Selected_Component): Reject properly a call to a
+ private operation of a protected type, when the type has no visible
+ operations.
+
+2017-11-09 Javier Miranda <miranda@adacore.com>
+
+ * rtsfind.ads (RE_Id, RE_Unit_Table): Add RE_HT_Link.
+ * exp_disp.adb (Make_DT): Initialize the HT_Link field of the TSD only
+ if available.
+
+2017-11-09 Bob Duff <duff@adacore.com>
+
+ * exp_ch4.adb, exp_ch9.adb, exp_prag.adb, par-ch3.adb, sem_aggr.adb,
+ sem_ch12.adb, sem_ch13.adb, sem_ch4.adb, sem_disp.adb, sem_prag.adb,
+ sem_res.adb, sem_util.adb: Get rid of warnings about uninitialized
+ variables.
+
+2017-11-09 Yannick Moy <moy@adacore.com>
+
+ * exp_disp.adb (Make_DT): Default initialize Ifaces_List and
+ Ifaces_Comp_List.
+
+2017-11-09 Pascal Obry <obry@adacore.com>
+
+ * libgnarl/s-taprop__mingw.adb: On Windows, initialize the thead handle
+ only for foreign threads. We initialize the thread handle only if not
+ yet initialized. This happens in Enter_Task for foreign threads only.
+ But for native threads (Ada tasking) we do want to keep the real
+ handle (from Create_Task) to be able to free the corresponding
+ resources in Finalize_TCB (CloseHandle).
+
+2017-11-09 Yannick Moy <moy@adacore.com>
+
+ * sem_attr.adb (Analyze_Attribute): Default initialize P_Type,
+ P_Base_Type.
+ (Error_Attr_P): Fix name in pragma No_Return.
+ (Unexpected_Argument): Add pragma No_Return.
+ (Placement_Error): Add pragma No_Return.
+
+2017-11-09 Javier Miranda <miranda@adacore.com>
+
+ * exp_disp.adb (Elab_Flag_Needed): Elaboration flag not needed when the
+ dispatch table is statically built.
+ (Make_DT): Declare constant the Interface_Table object associated with
+ an statically built dispatch table. For this purpose the Offset_To_Top
+ value of each interface is computed using the dummy object.
+ * exp_ch3.adb (Build_Init_Procedure): Do not generate code initializing
+ the Offset_To_Top field of secondary dispatch tables when the dispatch
+ table is statically built.
+ (Initialize_Tag): Do not generate calls to Register_Interface_Offset
+ when the dispatch table is statically built.
+ * doc/gnat_rm/standard_and_implementation_defined_restrictions.rst:
+ Document the new GNAT restriction Static_Dispatch_Tables.
+ * gnat_rm.texi: Regenerate.
+
+2017-11-09 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * sem_aggr.adb (Resolve_Delta_Record_Aggregate): Reorder declarations
+ to avoid a dormant bug.
+
+2017-11-09 Jerome Lambourg <lambourg@adacore.com>
+
+ * init.c: Define missing __gnat_alternate_stack for QNX. Set it to 0,
+ as such capability is not available on the OS.
+ * link.c: Make sure linker options for QNX are correct.
+ * libgnarl/s-osinte__qnx.ads: Add some missing bindings to pthread.
+ * libgnarl/s-taprop__qnx.adb: New, derived from s-taprop__posix.adb. This brings
+ in particular a workaround with locks priority ceiling where a higher
+ priority task is allowed to lock a lower ceiling priority lock. This
+ also fixes the scheduling of FIFO tasks when the priority of a task is
+ lowered.
+ * libgnat/system-qnx-aarch64.ads: Fix priority ranges.
+
+2017-11-09 Yannick Moy <moy@adacore.com>
+
+ * erroutc.adb (Output_Error_Msgs): Justify CodePeer false positive
+ message.
+ * gnatbind.adb (Scan_Bind_Arg): Simplify test to remove always true
+ condition.
+ * namet.adb (Copy_One_Character): Add assumption for static analysis,
+ as knowledge that Hex(2) is in the range 0..255 is too complex for
+ CodePeer.
+ (Finalize): Add assumption for static analysis, as the fact that there
+ are symbols in the table depends on a global invariant at this point in
+ the program.
+ * set_targ.adb (Check_Spaces): Justify CodePeer false positive message.
+ * stylesw.adb (Save_Style_Check_Options): Rewrite to avoid test always
+ true.
+
+2017-11-09 Javier Miranda <miranda@adacore.com>
+
+ * libgnat/s-rident.ads (Static_Dispatch_Tables): New restriction name.
+ * exp_disp.adb (Building_Static_DT): Check restriction.
+ (Building_Static_Secondary_DT): Check restriction.
+ (Make_DT): Initialize the HT_Link to No_Tag.
+ * opt.ads (Static_Dispatch_Tables): Rename flag...
+ (Building_Static_Dispatch_Tables): ... into this. This will avoid
+ conflict with the restriction name.
+ * gnat1drv.adb: Update.
+ * exp_aggr.adb (Is_Static_Dispatch_Table_Aggregate): Update.
+ * exp_ch3.adb (Expand_N_Object_Declaration): Update.
+
+2017-11-09 Pascal Obry <obry@adacore.com>
+
+ * libgnarl/s-taprop__mingw.adb: Minor code clean-up. Better using a
+ named number.
+
+2017-11-09 Yannick Moy <moy@adacore.com>
+
+ * binde.adb (Diagnose_Elaboration_Problem): Mark procedure No_Return.
+ * checks.adb (Apply_Scalar_Range_Check): Rescope variable OK closer to
+ use. Default initialize Hi, Lo.
+ (Selected_Range_Checks): Retype Num_Checks more precisely.
+ (Determine_Range, Determine_Range_R): Default initialize Hi_Right,
+ Lo_Right.
+ * contracts.adb (Process_Contract_Cases): Mark parameter Stmts as
+ Unmodified.
+ (Process_Postconditions): Mark parameter Stmts as Unmodified.
+ * exp_attr.adb (Expand_Loop_Entry_Attribute): Default initialize Blk.
+ * exp_ch4.adb (Expand_N_Allocator): Default initialize Typ.
+ (Expand_Concatenate): Default initialize High_Bound.
+ (Optimize_Length_Comparison): Default initialize Ent, Index.
+ * exp_ch5.adb (Expand_Predicated_Loop): Default initialize L_Hi and
+ L_Lo.
+ * exp_ch6.adb (Expand_N_Extended_Return_Statement): Default initialize
+ Return_Stmt.
+ * exp_ch9.adb (Expand_Entry_Barrier): Default initialize Func_Body and
+ remove pragma Warnings(Off).
+ * exp_imgv.adb (Expand_Image_Attribute): Default initialize Tent.
+ * exp_util.adb (Find_Interface_Tag): Default initialize AI_Tag.
+ * freeze.adb (Check_Component_Storage_Order): Default initialize
+ Comp_Byte_Aligned rather than silencing messages with pragma
+ Warnings(Off), which does not work for CodePeer initialization
+ messages, and given that here the possible read of an unitialized value
+ depends on a proper use of parameters by the caller.
+ * inline.adb (Expand_Inlined_Call): Default initialize Lab_Decl, Targ.
+ * sem_ch12.adb (Build_Operator_Wrapper): Default initialize Expr.
+ * sem_ch3.adb (Build_Derived_Array_Type): Default initialize
+ Implicit_Base.
+ * sem_ch4.adb (List_Operand_Interps): Default initialize Nam and remove
+ pragma Warnings(Off).
+ (Analyze_Case_Expression): Rescope checking block within branch where
+ Others_Present is set by the call to Check_Choices.
+ * sem_ch5.adb (Analyze_Assignment): Default initialize
+ Save_Full_Analysis.
+ * sem_ch6.adb (Analyze_Function_Return): Default initialize Obj_Decl,
+ and restructure code to defend against previous errors, so that, in
+ that case, control does not flow to the elsif condition which read an
+ uninitialized Obj_Decl.
+ * sem_ch9.adb (Analyze_Requeue): Default initialize Synch_Type.
+ (Check_Interfaces): Default initialize Full_T_Ifaces and Priv_T_Ifaces,
+ which seem to be left uninitialized and possibly read in some cases.
+ * sem_dim.adb (Analyze_Aspect_Dimension_System): Retype Position more
+ precisely. This requires to exchange the test for exiting in case of
+ too many positions and the increment to Position, inside the loop.
+ * sem_eval.adb (Eval_Concatenation): Default initialize Folded_Val,
+ which cannot be read uninitialized, but the reasons for that are quite
+ subtle.
+ * sem_intr.adb (Check_Intrinsic_Call): Default initialize Rtyp.
+ * sem_prag.adb (Collect_Subprogram_Inputs_Outputs): Default initialize
+ Spec_Id.
+ * sem_res.adb (Make_Call_Into_Operator): Default initialize Opnd_Type,
+ and test for presence of non-null Opnd_Type before testing its scope,
+ in a test which would read its value uninitialized, and is very rarely
+ exercized (it depends on the presence of an extension of System).
+ * sem_spark.ads: Update comment to fix name of main analysis procedure.
+ * sem_warn.adb (Warn_On_Known_Condition): Default initialize
+ Test_Result.
+ * set_targ.adb (FailN): Mark procedure with No_Return.
+ * stylesw.adb (Save_Style_Check_Options): Delete useless code to
+ initialize all array Options to white space, as there is already code
+ doing the same for the remaining positions in Options at the end of the
+ procedure.
+
+2017-11-09 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_ch11.adb (Possible_Local_Raise): Do not issue the warning for
+ generic instantiations either.
+
+2017-11-09 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_prag.adb (Analyze_Part_Of): Reword error message.
+ (Get_SPARK_Mode_Type): Do not raise Program_Error in case pragma
+ SPARK_Mode appears with an illegal mode, treat this as a non-existent
+ mode.
+
+2017-11-09 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_ch4.adb (Analyze_Call): Reject a call to a function that returns
+ a limited view of a type T declared in unit U1, when the function is
+ declared in another unit U2 and the call appears in a procedure within
+ another unit.
+
+2017-11-09 Justin Squirek <squirek@adacore.com>
+
+ * sem_ch8.adb (Analyze_Use_Package): Force installation of use_clauses
+ when processing generic instances.
+
+2017-11-09 Bob Duff <duff@adacore.com>
+
+ * namet.ads, namet.adb (Valid_Name_Id): New subtype that excludes
+ Error_Name and No_Name. Use this (versus Name_Id) to indicate which
+ objects can have those special values. Valid_Name_Id could usefully be
+ used all over the compiler front end, but that's too much trouble for
+ now. If we did that, we might want to rename:
+ Name_Id --> Optional_Name_Id, Valid_Name_Id --> Name_Id.
+ For parameters of type Valid_Name_Id, remove some redundant tests,
+ including the ones found by CodePeer. Use Is_Valid_Name instead of
+ membership test when appropriate.
+ (Error_Name_Or_No_Name): Delete this; it's no longer needed.
+ * sem_ch2.adb (Analyze_Identifier): Use "not Is_Valid_Name" instead of
+ "in Error_Name_Or_No_Name".
+ (Check_Parameterless_Call): Use "not Is_Valid_Name" instead of "in
+ Error_Name_Or_No_Name".
+
+2017-11-09 Arnaud Charlet <charlet@adacore.com>
+
+ * gnat1drv.adb (Adjust_Global_Switches): Suppress warnings in codepeer
+ mode here unless -gnateC is specified.
+ * switch-c.adb (Scan_Front_End_Switches): Do not suppress warnings with
+ -gnatC here.
+
+2017-11-09 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-writ.adb (Write_ALI): Remove processing of the frontend xrefs as
+ part of the ALI writing; they are now processed directly from memory
+ when requested by the backend.
+ * lib-xref.ads (Collect_SPARK_Xrefs): Remove.
+ (Iterate_SPARK_Xrefs): New routine for iterating over frontend xrefs.
+ * lib-xref-spark_specific.adb (Traverse_Compilation_Unit): Remove.
+ (Add_SPARK_File): Remove.
+ (Add_SPARK_Xref): Refactored from removed code; filters xref entries
+ that are trivially uninteresting to the SPARK backend.
+ * spark_xrefs.ads: Remove code that is no longer needed.
+ * spark_xrefs.adb (dspark): Adapt to use Iterate_SPARK_Xrefs.
+
+2017-11-09 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * sem_elab.adb: Update the documentation on adding a new elaboration
+ schenario. Add new hash table Recorded_Top_Level_Scenarios.
+ (Is_Check_Emitting_Scenario): Removed.
+ (Is_Recorded_Top_Level_Scenario): New routine.
+ (Kill_Elaboration_Scenario): Reimplemented.
+ (Record_Elaboration_Scenario): Mark the scenario as recorded.
+ (Set_Is_Recorded_Top_Level_Scenario): New routine.
+ (Update_Elaboration_Scenario): Reimplemented.
+ * sinfo.adb (Is_Recorded_Scenario): Removed.
+ (Set_Is_Recorded_Scenario): Removed.
+ * sinfo.ads: Remove attribute Is_Recorded_Scenario along with
+ occurrences in nodes.
+ (Is_Recorded_Scenario): Removed along with pragma Inline.
+ (Set_Is_Recorded_Scenario): Removed along with pragma Inline.
+
+2017-11-09 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_prag.adb (Analyze_Part_Of): Change "designate" to "denote" in
+ error message.
+
+2017-11-09 Justin Squirek <squirek@adacore.com>
+
+ * sem_res.adb (Resolve_Allocator): Add warning messages corresponding
+ to the allocation of an anonymous access-to-controlled object.
+
+2017-11-09 Jerome Lambourg <lambourg@adacore.com>
+
+ * sigtramp-qnx.c: Fix obvious typo.
+
+2017-11-09 Doug Rupp <rupp@adacore.com>
+
+ * libgnarl/s-taprop__linux.adb (Monotonic_Clock): Minor reformatting.
+
+2017-11-09 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_res.adb (Resolve): If expression is an entity whose type has
+ implicit dereference, generate reference to it, because no reference is
+ generated for an overloaded entity during analysis, given that its
+ identity may not be known.
+
+2017-11-09 Javier Miranda <miranda@adacore.com>
+
+ * exp_disp.adb (Expand_Interface_Thunk): Replace substraction of
+ offset-to-top field by addition.
+ (Make_Secondary_DT): Initialize the offset-to-top field with a negative
+ offset.
+ * exp_ch3.adb (Build_Offset_To_Top_Function): Build functions that
+ return a negative offset-to-top value.
+ (Initialize_Tag): Invoke runtime services Set_Dynamic_Offset_To_Top and
+ Set_Static_Offset_To_Top passing a negative offet-to-top value;
+ initialize also the offset-to-top field with a negative offset.
+ * libgnat/a-tags.adb (Base_Address): Displace the pointer by means of
+ an addition since the offset-to-top field is now a negative value.
+ (Displace): Displace the pointer to the object means of a substraction
+ since it is now a negative value.
+ (Set_Dynamic_Offset_to_top): Displace the pointer to the object by
+ means of a substraction since it is now a negative value.
+
+2017-11-09 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat1drv.adb (Gnat1drv): Call Errout.Finalize (Last_Call => True)
+ before Errout.Output_Messages also in the case of compilation errors.
+
+2017-11-09 Javier Miranda <miranda@adacore.com>
+
+ * doc/gnat_ugn/the_gnat_compilation_model.rst (Interfacing with C++ at
+ the Class Level): Fix error interfacing with C strings.
+ * gnat_ugn.texi: Regenerate.
+
+2017-11-09 Jerome Lambourg <lambourg@adacore.com>
+
+ * system-qnx-aarch64.ads: Fix the priority constants.
+ * s-osinte__qnx.ads: Fix constants for handling the locking protocols
+ and scheduling.
+ * s-osinte__qnx.adb: New file , prevents the use of priority 0 that
+ corresponds to an idle priority on QNX.
+
+2017-11-09 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_prag.adb, sem_util.adb, sem_elab.adb: Fix minor typos in
+ comments.
+
+2017-11-09 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-xref-spark_specific.adb (Add_SPARK_Xrefs): Ignore loop parameters
+ in expression funtions that are expanded into variables.
+
+2017-11-09 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_util.adb: Minor whitespace cleanup.
+
+2017-11-09 Jerome Lambourg <lambourg@adacore.com>
+
+ * libgnarl/s-taprop__qnx.adb: Refine aarch64-qnx. Use the POSIX
+ s-taprop version rather than a custom one.
+ * sigtramp-qnx.c (aarch64-qnx): Implement the signal trampoline.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-xref.ads, lib-xref-spark_specific.adb
+ (Traverse_Compilation_Unit): Move declaration to package body.
+
+2017-11-08 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * exp_spark.adb (Expand_SPARK_N_Object_Renaming_Declaration): Obtain
+ the type of the renaming from its defining entity, rather then the
+ subtype mark as there may not be a subtype mark.
+
+2017-11-08 Jerome Lambourg <lambourg@adacore.com>
+
+ * adaint.c, s-oscons-tmplt.c, init.c, libgnat/system-qnx-aarch64.ads,
+ libgnarl/a-intnam__qnx.ads, libgnarl/s-intman__qnx.adb,
+ libgnarl/s-osinte__qnx.ads, libgnarl/s-qnx.ads,
+ libgnarl/s-taprop__qnx.adb, s-oscons-tmplt.c, sigtramp-qnx.c,
+ terminals.c: Initial port of GNAT for aarch64-qnx
+
+2017-11-08 Elisa Barboni <barboni@adacore.com>
+
+ * exp_util.adb (Find_DIC_Type): Move...
+ * sem_util.ads, sem_util.adb (Find_DIC_Type): ... here.
+
+2017-11-08 Justin Squirek <squirek@adacore.com>
+
+ * sem_res.adb (Resolve_Allocator): Add info messages corresponding to
+ the owner and corresponding coextension.
+
+2017-11-08 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_aggr.adb (Resolve_Delta_Aggregate): Divide into the
+ following separate procedures.
+ (Resolve_Delta_Array_Aggregate): Previous code form
+ Resolve_Delta_Aggregate.
+ (Resolve_Delta_Record_Aggregate): Extend previous code to cover latest
+ ARG decisions on the legality rules for delta aggregates for records:
+ in the case of a variant record, components from different variants
+ cannot be specified in the delta aggregate, and this must be checked
+ statically.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_Scope_Record): Remove File_Num component.
+ * lib-xref-spark_specific.adb (Add_SPARK_Scope): Skip initialization of
+ removed component.
+
+2017-11-08 Gary Dismukes <dismukes@adacore.com>
+
+ * sem_ch4.adb: Minor typo fix.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_Scope_Record): Remove Spec_File_Num and
+ Spec_Scope_Num components.
+ * spark_xrefs.adb (dspark): Skip pretty-printing to removed components.
+ * lib-xref-spark_specific.adb (Add_SPARK_Scope): Skip initialization of
+ removed components.
+ (Collect_SPARK_Xrefs): Skip setting proper values of removed
+ components.
+
+2017-11-08 Gary Dismukes <dismukes@adacore.com>
+
+ * exp_ch4.adb (Expand_N_Type_Conversion): Add test that the selector
+ name is a discriminant in check for unconditional accessibility
+ violation within instances.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-xref-spark_specific.adb (Add_SPARK_Xrefs): Remove special-case
+ for constants (with variable input).
+ (Is_Constant_Object_Without_Variable_Input): Remove.
+
+2017-11-08 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * exp_ch9.adb, sem_disp.adb, sem_util.adb: Minor reformatting.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (Rtype): Remove special-casing of constants for SPARK
+ cross-references.
+ (dspark): Remove hardcoded table bound.
+
+2017-11-08 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_ch4.adb (Analyze_Aggregate): For Ada2020 delta aggregates, use
+ the type of the base of the construct to determine the type (or
+ candidate interpretations) of the delta aggregate. This allows the
+ construct to appear in a context that expects a private extension.
+ * sem_res.adb (Resolve): Handle properly a delta aggregate with an
+ overloaded base.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_Xref_Record): Replace file and scope indices
+ with Entity_Id of the reference.
+ * spark_xrefs.adb (dspark): Adapt pretty-printing routine.
+ * lib-xref-spark_specific.adb (Add_SPARK_Xrefs): Store Entity_Id of the
+ reference, not the file and scope indices.
+
+2017-11-08 Arnaud Charlet <charlet@adacore.com>
+
+ * errout.ads (Current_Node): New.
+ * errout.adb (Error_Msg): Use Current_Node.
+ * par-ch6.adb, par-ch7.adb, par-ch9.adb, par-util.adb: Set Current_Node
+ when relevant.
+ * style.adb: Call Error_Msg_N when possible.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_Scope_Record): Rename Scope_Id component to
+ Entity.
+ * lib-xref-spark_specific.adb, spark_xrefs.adb: Propagate renaming of
+ the Scope_Id record component.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_File_Record): Remove string components.
+ * spark_xrefs.adb (dspark): Remove pretty-printing of removed
+ SPARK_File_Record components.
+ * lib-xref-spark_specific.adb (Add_SPARK_File): Do not store string
+ representation of files/units.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-xref.ads, lib-xref-spark_specific.adb (Traverse_Declarations):
+ Remove Inside_Stubs parameter.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_Xref_Record): Referenced object is now
+ represented by Entity_Id.
+ (SPARK_Scope_Record): Referenced scope (e.g. subprogram) is now
+ represented by Entity_Id; this information is not repeated as
+ Scope_Entity.
+ (Heap): Moved from lib-xref-spark_specific.adb, to reside next to
+ Name_Of_Heap_Variable.
+ * spark_xrefs.adb (dspark): Adapt debug routine to above changes in
+ data types.
+ * lib-xref-spark_specific.adb: Adapt routines for populating SPARK
+ scope and xrefs tables to above changes in data types.
+
+2017-11-08 Justin Squirek <squirek@adacore.com>
+
+ * sem_ch8.adb (Mark_Use_Clauses): Add condition to always mark the
+ primitives of generic actuals.
+ (Mark_Use_Type): Add recursive call to properly mark class-wide type's
+ base type clauses as per ARM 8.4 (8.2/3).
+
+2017-11-08 Ed Schonberg <schonberg@adacore.com>
+
+ * sem_ch6.adb (Analyze_Generic_Subprobram_Body): Validate
+ categorization dependency of the body, as is done for non-generic
+ units.
+ (New_Overloaded_Entity, Visible_Part_Type): Remove linear search
+ through declarations (Simple optimization, no behavior change).
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * spark_xrefs.ads (SPARK_Xref_Record): Remove inessential components.
+ (SPARK_Scope_Record): Remove inessential components.
+ * spark_xrefs.adb (dspark): Remove pretty-printing of removed record
+ components.
+ * lib-xref-spark_specific.adb (Add_SPARK_Scope): Remove setting of
+ removed record components.
+ (Add_SPARK_Xrefs): Remove setting of removed record components.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-xref-spark_specific.adb (Add_SPARK_Xrefs): Remove dead check for
+ empty entities.
+
+2017-11-08 Javier Miranda <miranda@adacore.com>
+
+ * sem_disp.adb (Is_Inherited_Public_Operation): Extend the
+ functionality of this routine to handle multiple levels of derivations.
+
+2017-11-08 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * einfo.adb: Elist36 is now used as Nested_Scenarios.
+ (Nested_Scenarios): New routine.
+ (Set_Nested_Scenarios): New routine.
+ (Write_Field36_Name): New routine.
+ * einfo.ads: Add new attribute Nested_Scenarios along with occurrences
+ in entities.
+ (Nested_Scenarios): New routine along with pragma Inline.
+ (Set_Nested_Scenarios): New routine along with pragma Inline.
+ * sem_elab.adb (Find_And_Process_Nested_Scenarios): New routine.
+ (Process_Nested_Scenarios): New routine.
+ (Traverse_Body): When a subprogram body is traversed for the first
+ time, find, save, and process all suitable scenarios found within.
+ Subsequent traversals of the same subprogram body utilize the saved
+ scenarios.
+
+2017-11-08 Piotr Trojanek <trojanek@adacore.com>
+
+ * lib-xref-spark_specific.adb (Add_SPARK_Scope): Remove detection of
+ protected operations.
+ (Add_SPARK_Xrefs): Simplify detection of empty entities.
+ * get_spark_xrefs.ads, get_spark_xrefs.adb, put_spark_xrefs.ads,
+ put_spark_xrefs.adb, spark_xrefs_test.adb: Remove code for writing,
+ reading and testing SPARK cross-references stored in the ALI files.
+ * lib-xref.ads (Output_SPARK_Xrefs): Remove.
+ * lib-writ.adb (Write_ALI): Do not write SPARK cross-references to the
+ ALI file.
+ * spark_xrefs.ads, spark_xrefs.adb (pspark): Remove, together
+ with description of the SPARK xrefs ALI format.
+ * gcc-interface/Make-lang.in (GNAT_ADA_OBJS): Remove get_spark_refs.o
+ and put_spark_refs.o.
+
+2017-11-08 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * exp_ch4.adb (Apply_Accessibility_Check): Do not finalize the object
+ when the associated access type is subject to pragma
+ No_Heap_Finalization.
+ * exp_intr.adb (Expand_Unc_Deallocation): Use the available view of the
+ designated type in case it comes from a limited withed unit.
+
+2017-11-08 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * exp_ch3.adb (Expand_N_Object_Declaration): Save and restore relevant
+ SPARK-related flags. Add ??? comment.
+ * exp_util.adb (Insert_Actions): Add an entry for node
+ N_Variable_Reference_Marker.
+ * sem.adb (Analyze): Add an entry for node N_Variable_Reference_Marker.
+ * sem_ch8.adb (Find_Direct_Name): Add constant Is_Assignment_LHS. Build
+ and record a variable reference marker for the current name.
+ (Find_Expanded_Name): Add constant Is_Assignment_LHS. Build and record
+ a variable reference marker for the current name.
+ * sem_elab.adb (Build_Variable_Reference_Marker): New routine.
+ (Extract_Variable_Reference_Attributes): Reimplemented.
+ (Info_Scenario): Add output for variable references and remove output
+ for variable reads.
+ (Info_Variable_Read): Removed.
+ (Info_Variable_Reference): New routine.
+ (Is_Suitable_Scenario): Variable references are now suitable scenarios
+ while variable reads are not.
+ (Output_Active_Scenarios): Add output for variable references and
+ remove output for variable reads.
+ (Output_Variable_Read): Removed.
+ (Output_Variable_Reference): New routine.
+ (Process_Variable_Read): Removed.
+ (Process_Variable_Reference): New routine.
+ (Process_Variable_Reference_Read): New routine.
+ * sem_elab.ads (Build_Variable_Reference_Marker): New routine.
+ * sem_res.adb (Resolve_Actuals): Build and record a variable reference
+ marker for the current actual.
+ * sem_spark.adb (Check_Node): Add an entry for node
+ N_Variable_Reference_Marker.
+ * sem_util.adb (Within_Subprogram_Call): Moved to the library level.
+ * sem_util.ads (Within_Subprogram_Call): Moved to the library level.
+ * sinfo.adb (Is_Read): New routine.
+ (Is_Write): New routine.
+ (Target): Updated to handle variable reference markers.
+ (Set_Is_Read): New routine.
+ (Set_Is_Write): New routine.
+ (Set_Target): Updated to handle variable reference markers.
+ * sinfo.ads: Add new attributes Is_Read and Is_Write along with
+ occurrences in nodes. Update attribute Target. Add new node
+ kind N_Variable_Reference_Marker.
+ (Is_Read): New routine along with pragma Inline.
+ (Is_Write): New routine along with pragma Inline.
+ (Set_Is_Read): New routine along with pragma Inline.
+ (Set_Is_Write): New routine along with pragma Inline.
+ * sprint.adb (Sprint_Node_Actual): Add an entry for node
+ N_Variable_Reference_Marker.
+
+2017-11-08 Arnaud Charlet <charlet@adacore.com>
+
+ * sem_util.adb (Subprogram_Name): Append suffix for overloaded
+ subprograms.
+
+2017-11-08 Yannick Moy <moy@adacore.com>
+
+ * sem_ch8.adb (Use_One_Type, Update_Use_Clause_Chain): Do not report
+ about unused use-type or use-package clauses inside inlined bodies.
+
+2017-11-08 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * sem_elab.adb (Ensure_Prior_Elaboration): Add new parameter
+ In_Partial_Fin along with a comment on its usage. Do not guarantee the
+ prior elaboration of a unit when the need came from a partial
+ finalization context.
+ (In_Initialization_Context): Relocated to Process_Call.
+ (Is_Partial_Finalization_Proc): New routine.
+ (Process_Access): Add new parameter In_Partial_Fin along with a comment
+ on its usage.
+ (Process_Activation_Call): Add new parameter In_Partial_Fin along with
+ a comment on its usage.
+ (Process_Activation_Conditional_ABE_Impl): Add new parameter
+ In_Partial_Fin along with a comment on its usage. Do not emit any ABE
+ diagnostics when the activation occurs in a partial finalization
+ context.
+ (Process_Activation_Guaranteed_ABE_Impl): Add new parameter
+ In_Partial_Fin along with a comment on its usage.
+ (Process_Call): Add new parameter In_Partial_Fin along with a comment
+ on its usage. A call is within a partial finalization context when it
+ targets a finalizer or primitive [Deep_]Finalize, and the call appears
+ in initialization actions. Pass this information down to the recursive
+ steps of the Processing phase.
+ (Process_Call_Ada): Add new parameter In_Partial_Fin along with a
+ comment on its usage. Remove the guard which suppresses the generation
+ of implicit Elaborate[_All] pragmas. This is now done in
+ Ensure_Prior_Elaboration.
+ (Process_Call_Conditional_ABE): Add new parameter In_Partial_Fin along
+ with a comment on its usage. Do not emit any ABE diagnostics when the
+ call occurs in a partial finalization context.
+ (Process_Call_SPARK): Add new parameter In_Partial_Fin along with a
+ comment on its usage.
+ (Process_Instantiation): Add new parameter In_Partial_Fin along with a
+ comment on its usage.
+ (Process_Instantiation_Ada): Add new parameter In_Partial_Fin along
+ with a comment on its usage.
+ (Process_Instantiation_Conditional_ABE): Add new parameter
+ In_Partial_Fin along with a comment on its usage. Do not emit any ABE
+ diagnostics when the instantiation occurs in a partial finalization
+ context.
+ (Process_Instantiation_SPARK): Add new parameter In_Partial_Fin along
+ with a comment on its usage.
+ (Process_Scenario): Add new parameter In_Partial_Fin along with a
+ comment on its usage.
+ (Process_Single_Activation): Add new parameter In_Partial_Fin along
+ with a comment on its usage.
+ (Traverse_Body): Add new parameter In_Partial_Fin along with a comment
+ on its usage.
+
+2017-11-08 Arnaud Charlet <charlet@adacore.com>
+
+ * sem_ch13.adb: Add optional parameter to Error_Msg.
+
+2017-11-08 Jerome Lambourg <lambourg@adacore.com>
+
+ * fname.adb (Is_Internal_File_Name): Do not check the 8+3 naming schema
+ for the Interfaces.* hierarchy as longer unit names are now allowed.
+
+2017-11-08 Arnaud Charlet <charlet@adacore.com>
+
+ * sem_util.adb (Subprogram_Name): Emit sloc for the enclosing
+ subprogram as well. Support more cases of entities.
+ (Append_Entity_Name): Add some defensive code.
+
+2017-11-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/misc.c (gnat_post_options): Clear warn_return_type.
+
2017-10-31 Eric Botcazou <ebotcazou@adacore.com>
PR ada/82785
diff --git a/gcc/ada/adadecode.c b/gcc/ada/adadecode.c
index 8c9c7ab7a88..0cbef8123f9 100644
--- a/gcc/ada/adadecode.c
+++ b/gcc/ada/adadecode.c
@@ -330,7 +330,7 @@ __gnat_decode (const char *coded_name, char *ada_name, int verbose)
}
/* Write symbol in the space. */
- strncpy (optoken, trans_table[k][1], oplen);
+ memcpy (optoken, trans_table[k][1], oplen);
}
else
k++;
diff --git a/gcc/ada/adaint.c b/gcc/ada/adaint.c
index 10325b0f1d0..cb0f4bb93b0 100644
--- a/gcc/ada/adaint.c
+++ b/gcc/ada/adaint.c
@@ -1012,7 +1012,7 @@ __gnat_open_new_temp (char *path, int fmode)
#if (defined (__FreeBSD__) || defined (__NetBSD__) || defined (__OpenBSD__) \
|| defined (__linux__) || defined (__GLIBC__) || defined (__ANDROID__) \
- || defined (__DragonFly__)) && !defined (__vxworks)
+ || defined (__DragonFly__) || defined (__QNX__)) && !defined (__vxworks)
return mkstemp (path);
#elif defined (__Lynx__)
mktemp (path);
@@ -1185,7 +1185,7 @@ __gnat_tmp_name (char *tmp_filename)
#elif defined (__linux__) || defined (__FreeBSD__) || defined (__NetBSD__) \
|| defined (__OpenBSD__) || defined (__GLIBC__) || defined (__ANDROID__) \
- || defined (__DragonFly__)
+ || defined (__DragonFly__) || defined (__QNX__)
#define MAX_SAFE_PATH 1000
char *tmpdir = getenv ("TMPDIR");
diff --git a/gcc/ada/argv.c b/gcc/ada/argv.c
index 430404e3aa4..aee0f886443 100644
--- a/gcc/ada/argv.c
+++ b/gcc/ada/argv.c
@@ -92,7 +92,7 @@ void
__gnat_fill_arg (char *a, int i)
{
if (gnat_argv != NULL)
- strncpy (a, gnat_argv[i], strlen(gnat_argv[i]));
+ memcpy (a, gnat_argv[i], strlen (gnat_argv[i]));
}
int
@@ -118,7 +118,7 @@ void
__gnat_fill_env (char *a, int i)
{
if (gnat_envp != NULL)
- strncpy (a, gnat_envp[i], strlen (gnat_envp[i]));
+ memcpy (a, gnat_envp[i], strlen (gnat_envp[i]));
}
#ifdef __cplusplus
diff --git a/gcc/ada/binde.adb b/gcc/ada/binde.adb
index dd076be3acf..5a78bc82499 100644
--- a/gcc/ada/binde.adb
+++ b/gcc/ada/binde.adb
@@ -353,6 +353,7 @@ package body Binde is
procedure Diagnose_Elaboration_Problem
(Elab_Order : in out Unit_Id_Table);
+ pragma No_Return (Diagnose_Elaboration_Problem);
-- Called when no elaboration order can be found. Outputs an appropriate
-- diagnosis of the problem, and then abandons the bind.
diff --git a/gcc/ada/checks.adb b/gcc/ada/checks.adb
index b2c26ca4981..c4b37e788ab 100644
--- a/gcc/ada/checks.adb
+++ b/gcc/ada/checks.adb
@@ -2765,7 +2765,6 @@ package body Checks is
S_Typ : Entity_Id;
Arr : Node_Id := Empty; -- initialize to prevent warning
Arr_Typ : Entity_Id := Empty; -- initialize to prevent warning
- OK : Boolean := False; -- initialize to prevent warning
Is_Subscr_Ref : Boolean;
-- Set true if Expr is a subscript
@@ -2995,10 +2994,11 @@ package body Checks is
and then Compile_Time_Known_Value (Thi)
then
declare
+ OK : Boolean := False; -- initialize to prevent warning
Hiv : constant Uint := Expr_Value (Thi);
Lov : constant Uint := Expr_Value (Tlo);
- Hi : Uint;
- Lo : Uint;
+ Hi : Uint := No_Uint;
+ Lo : Uint := No_Uint;
begin
-- If range is null, we for sure have a constraint error (we
@@ -4370,8 +4370,8 @@ package body Checks is
Hi_Left : Uint;
-- Lo and Hi bounds of left operand
- Lo_Right : Uint;
- Hi_Right : Uint;
+ Lo_Right : Uint := No_Uint;
+ Hi_Right : Uint := No_Uint;
-- Lo and Hi bounds of right (or only) operand
Bound : Node_Id;
@@ -4909,8 +4909,8 @@ package body Checks is
Hi_Left : Ureal;
-- Lo and Hi bounds of left operand
- Lo_Right : Ureal;
- Hi_Right : Ureal;
+ Lo_Right : Ureal := No_Ureal;
+ Hi_Right : Ureal := No_Ureal;
-- Lo and Hi bounds of right (or only) operand
Bound : Node_Id;
@@ -9814,7 +9814,7 @@ package body Checks is
Do_Access : Boolean := False;
Wnode : Node_Id := Warn_Node;
Ret_Result : Check_Result := (Empty, Empty);
- Num_Checks : Integer := 0;
+ Num_Checks : Natural := 0;
procedure Add_Check (N : Node_Id);
-- Adds the action given to Ret_Result if N is non-Empty
diff --git a/gcc/ada/contracts.adb b/gcc/ada/contracts.adb
index 8a35b82f55e..1bd13bd91d3 100644
--- a/gcc/ada/contracts.adb
+++ b/gcc/ada/contracts.adb
@@ -53,16 +53,6 @@ with Tbuild; use Tbuild;
package body Contracts is
- procedure Analyze_Contracts
- (L : List_Id;
- Freeze_Nod : Node_Id;
- Freeze_Id : Entity_Id);
- -- Subsidiary to the one parameter version of Analyze_Contracts and routine
- -- Analyze_Previous_Constracts. Analyze the contracts of all constructs in
- -- the list L. If Freeze_Nod is set, then the analysis stops when the node
- -- is reached. Freeze_Id is the entity of some related context which caused
- -- freezing up to node Freeze_Nod.
-
procedure Build_And_Analyze_Contract_Only_Subprograms (L : List_Id);
-- (CodePeer): Subsidiary procedure to Analyze_Contracts which builds the
-- contract-only subprogram body of eligible subprograms found in L, adds
@@ -351,32 +341,16 @@ package body Contracts is
-----------------------
procedure Analyze_Contracts (L : List_Id) is
+ Decl : Node_Id;
+
begin
if CodePeer_Mode and then Debug_Flag_Dot_KK then
Build_And_Analyze_Contract_Only_Subprograms (L);
end if;
- Analyze_Contracts (L, Freeze_Nod => Empty, Freeze_Id => Empty);
- end Analyze_Contracts;
-
- procedure Analyze_Contracts
- (L : List_Id;
- Freeze_Nod : Node_Id;
- Freeze_Id : Entity_Id)
- is
- Decl : Node_Id;
-
- begin
Decl := First (L);
while Present (Decl) loop
- -- The caller requests that the traversal stops at a particular node
- -- that causes contract "freezing".
-
- if Present (Freeze_Nod) and then Decl = Freeze_Nod then
- exit;
- end if;
-
-- Entry or subprogram declarations
if Nkind_In (Decl, N_Abstract_Subprogram_Declaration,
@@ -388,7 +362,7 @@ package body Contracts is
Subp_Id : constant Entity_Id := Defining_Entity (Decl);
begin
- Analyze_Entry_Or_Subprogram_Contract (Subp_Id, Freeze_Id);
+ Analyze_Entry_Or_Subprogram_Contract (Subp_Id);
-- If analysis of a class-wide pre/postcondition indicates
-- that a class-wide clone is needed, analyze its declaration
@@ -410,9 +384,7 @@ package body Contracts is
-- Objects
elsif Nkind (Decl) = N_Object_Declaration then
- Analyze_Object_Contract
- (Obj_Id => Defining_Entity (Decl),
- Freeze_Id => Freeze_Id);
+ Analyze_Object_Contract (Defining_Entity (Decl));
-- Protected units
@@ -433,8 +405,9 @@ package body Contracts is
then
Analyze_Task_Contract (Defining_Entity (Decl));
- -- For type declarations, we need to do the pre-analysis of
- -- Iterable aspect specifications.
+ -- For type declarations, we need to do the pre-analysis of Iterable
+ -- aspect specifications.
+
-- Other type aspects need to be resolved here???
elsif Nkind (Decl) = N_Private_Type_Declaration
@@ -443,6 +416,7 @@ package body Contracts is
declare
E : constant Entity_Id := Defining_Identifier (Decl);
It : constant Node_Id := Find_Aspect (E, Aspect_Iterable);
+
begin
if Present (It) then
Validate_Iterable_Aspect (E, It);
@@ -1127,76 +1101,6 @@ package body Contracts is
end Analyze_Package_Contract;
--------------------------------
- -- Analyze_Previous_Contracts --
- --------------------------------
-
- procedure Analyze_Previous_Contracts (Body_Decl : Node_Id) is
- Body_Id : constant Entity_Id := Defining_Entity (Body_Decl);
- Orig_Decl : constant Node_Id := Original_Node (Body_Decl);
-
- Par : Node_Id;
-
- begin
- -- A body that is in the process of being inlined appears from source,
- -- but carries name _parent. Such a body does not cause "freezing" of
- -- contracts.
-
- if Chars (Body_Id) = Name_uParent then
- return;
- end if;
-
- -- Climb the parent chain looking for an enclosing package body. Do not
- -- use the scope stack, as a body uses the entity of its corresponding
- -- spec.
-
- Par := Parent (Body_Decl);
- while Present (Par) loop
- if Nkind (Par) = N_Package_Body then
- Analyze_Package_Body_Contract
- (Body_Id => Defining_Entity (Par),
- Freeze_Id => Defining_Entity (Body_Decl));
-
- exit;
-
- -- Do not look for an enclosing package body when the construct which
- -- causes freezing is a body generated for an expression function and
- -- it appears within a package spec. This ensures that the traversal
- -- will not reach too far up the parent chain and attempt to freeze a
- -- package body which should not be frozen.
-
- -- package body Enclosing_Body
- -- with Refined_State => (State => Var)
- -- is
- -- package Nested is
- -- type Some_Type is ...;
- -- function Cause_Freezing return ...;
- -- private
- -- function Cause_Freezing is (...);
- -- end Nested;
- --
- -- Var : Nested.Some_Type;
-
- elsif Nkind (Par) = N_Package_Declaration
- and then Nkind (Orig_Decl) = N_Expression_Function
- then
- exit;
- end if;
-
- Par := Parent (Par);
- end loop;
-
- -- Analyze the contracts of all eligible construct up to the body which
- -- caused the "freezing".
-
- if Is_List_Member (Body_Decl) then
- Analyze_Contracts
- (L => List_Containing (Body_Decl),
- Freeze_Nod => Body_Decl,
- Freeze_Id => Body_Id);
- end if;
- end Analyze_Previous_Contracts;
-
- --------------------------------
-- Analyze_Protected_Contract --
--------------------------------
@@ -2393,6 +2297,11 @@ package body Contracts is
end if;
end Process_Contract_Cases_For;
+ pragma Unmodified (Stmts);
+ -- Stmts is passed as IN OUT to signal that the list can be updated,
+ -- even if the corresponding integer value representing the list does
+ -- not change.
+
-- Start of processing for Process_Contract_Cases
begin
@@ -2535,6 +2444,11 @@ package body Contracts is
end loop;
end Process_Spec_Postconditions;
+ pragma Unmodified (Stmts);
+ -- Stmts is passed as IN OUT to signal that the list can be updated,
+ -- even if the corresponding integer value representing the list does
+ -- not change.
+
-- Start of processing for Process_Postconditions
begin
@@ -3087,6 +3001,187 @@ package body Contracts is
end if;
end Expand_Subprogram_Contract;
+ -------------------------------
+ -- Freeze_Previous_Contracts --
+ -------------------------------
+
+ procedure Freeze_Previous_Contracts (Body_Decl : Node_Id) is
+ function Causes_Contract_Freezing (N : Node_Id) return Boolean;
+ pragma Inline (Causes_Contract_Freezing);
+ -- Determine whether arbitrary node N causes contract freezing
+
+ procedure Freeze_Contracts;
+ pragma Inline (Freeze_Contracts);
+ -- Freeze the contracts of all eligible constructs which precede body
+ -- Body_Decl.
+
+ procedure Freeze_Enclosing_Package_Body;
+ pragma Inline (Freeze_Enclosing_Package_Body);
+ -- Freeze the contract of the nearest package body (if any) which
+ -- encloses body Body_Decl.
+
+ ------------------------------
+ -- Causes_Contract_Freezing --
+ ------------------------------
+
+ function Causes_Contract_Freezing (N : Node_Id) return Boolean is
+ begin
+ return Nkind_In (N, N_Entry_Body,
+ N_Package_Body,
+ N_Protected_Body,
+ N_Subprogram_Body,
+ N_Subprogram_Body_Stub,
+ N_Task_Body);
+ end Causes_Contract_Freezing;
+
+ ----------------------
+ -- Freeze_Contracts --
+ ----------------------
+
+ procedure Freeze_Contracts is
+ Body_Id : constant Entity_Id := Defining_Entity (Body_Decl);
+ Decl : Node_Id;
+
+ begin
+ -- Nothing to do when the body which causes freezing does not appear
+ -- in a declarative list because there cannot possibly be constructs
+ -- with contracts.
+
+ if not Is_List_Member (Body_Decl) then
+ return;
+ end if;
+
+ -- Inspect the declarations preceding the body, and freeze individual
+ -- contracts of eligible constructs.
+
+ Decl := Prev (Body_Decl);
+ while Present (Decl) loop
+
+ -- Stop the traversal when a preceding construct that causes
+ -- freezing is encountered as there is no point in refreezing
+ -- the already frozen constructs.
+
+ if Causes_Contract_Freezing (Decl) then
+ exit;
+
+ -- Entry or subprogram declarations
+
+ elsif Nkind_In (Decl, N_Abstract_Subprogram_Declaration,
+ N_Entry_Declaration,
+ N_Generic_Subprogram_Declaration,
+ N_Subprogram_Declaration)
+ then
+ Analyze_Entry_Or_Subprogram_Contract
+ (Subp_Id => Defining_Entity (Decl),
+ Freeze_Id => Body_Id);
+
+ -- Objects
+
+ elsif Nkind (Decl) = N_Object_Declaration then
+ Analyze_Object_Contract
+ (Obj_Id => Defining_Entity (Decl),
+ Freeze_Id => Body_Id);
+
+ -- Protected units
+
+ elsif Nkind_In (Decl, N_Protected_Type_Declaration,
+ N_Single_Protected_Declaration)
+ then
+ Analyze_Protected_Contract (Defining_Entity (Decl));
+
+ -- Subprogram body stubs
+
+ elsif Nkind (Decl) = N_Subprogram_Body_Stub then
+ Analyze_Subprogram_Body_Stub_Contract (Defining_Entity (Decl));
+
+ -- Task units
+
+ elsif Nkind_In (Decl, N_Single_Task_Declaration,
+ N_Task_Type_Declaration)
+ then
+ Analyze_Task_Contract (Defining_Entity (Decl));
+ end if;
+
+ Prev (Decl);
+ end loop;
+ end Freeze_Contracts;
+
+ -----------------------------------
+ -- Freeze_Enclosing_Package_Body --
+ -----------------------------------
+
+ procedure Freeze_Enclosing_Package_Body is
+ Orig_Decl : constant Node_Id := Original_Node (Body_Decl);
+ Par : Node_Id;
+
+ begin
+ -- Climb the parent chain looking for an enclosing package body. Do
+ -- not use the scope stack, because a body utilizes the entity of its
+ -- corresponding spec.
+
+ Par := Parent (Body_Decl);
+ while Present (Par) loop
+ if Nkind (Par) = N_Package_Body then
+ Analyze_Package_Body_Contract
+ (Body_Id => Defining_Entity (Par),
+ Freeze_Id => Defining_Entity (Body_Decl));
+
+ exit;
+
+ -- Do not look for an enclosing package body when the construct
+ -- which causes freezing is a body generated for an expression
+ -- function and it appears within a package spec. This ensures
+ -- that the traversal will not reach too far up the parent chain
+ -- and attempt to freeze a package body which must not be frozen.
+
+ -- package body Enclosing_Body
+ -- with Refined_State => (State => Var)
+ -- is
+ -- package Nested is
+ -- type Some_Type is ...;
+ -- function Cause_Freezing return ...;
+ -- private
+ -- function Cause_Freezing is (...);
+ -- end Nested;
+ --
+ -- Var : Nested.Some_Type;
+
+ elsif Nkind (Par) = N_Package_Declaration
+ and then Nkind (Orig_Decl) = N_Expression_Function
+ then
+ exit;
+
+ -- Prevent the search from going too far
+
+ elsif Is_Body_Or_Package_Declaration (Par) then
+ exit;
+ end if;
+
+ Par := Parent (Par);
+ end loop;
+ end Freeze_Enclosing_Package_Body;
+
+ -- Local variables
+
+ Body_Id : constant Entity_Id := Defining_Entity (Body_Decl);
+
+ -- Start of processing for Freeze_Previous_Contracts
+
+ begin
+ pragma Assert (Causes_Contract_Freezing (Body_Decl));
+
+ -- A body that is in the process of being inlined appears from source,
+ -- but carries name _parent. Such a body does not cause freezing of
+ -- contracts.
+
+ if Chars (Body_Id) = Name_uParent then
+ return;
+ end if;
+
+ Freeze_Enclosing_Package_Body;
+ Freeze_Contracts;
+ end Freeze_Previous_Contracts;
+
---------------------------------
-- Inherit_Subprogram_Contract --
---------------------------------
diff --git a/gcc/ada/contracts.ads b/gcc/ada/contracts.ads
index d40200e183d..3d700cc9dd3 100644
--- a/gcc/ada/contracts.ads
+++ b/gcc/ada/contracts.ads
@@ -6,7 +6,7 @@
-- --
-- S p e c --
-- --
--- Copyright (C) 2015-2016, Free Software Foundation, Inc. --
+-- Copyright (C) 2015-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -128,12 +128,6 @@ package Contracts is
-- Initializes
-- Part_Of
- procedure Analyze_Previous_Contracts (Body_Decl : Node_Id);
- -- Analyze the contracts of all source constructs found in the declarative
- -- list which contains entry, package, protected, subprogram, or task body
- -- denoted by Body_Decl. The analysis stops once Body_Decl is reached. In
- -- addition, analyze the contract of the nearest enclosing package body.
-
procedure Analyze_Protected_Contract (Prot_Id : Entity_Id);
-- Analyze all delayed pragmas chained on the contract of protected unit
-- Prot_Id if they appeared at the end of a declarative region. Currently
@@ -165,6 +159,12 @@ package Contracts is
-- generic body denoted by Unit by collecting all source contract-related
-- pragmas in the contract of the unit.
+ procedure Freeze_Previous_Contracts (Body_Decl : Node_Id);
+ -- Freeze the contracts of all source constructs found in the declarative
+ -- list which contains entry, package, protected, subprogram, or task body
+ -- denoted by Body_Decl. In addition, freeze the contract of the nearest
+ -- enclosing package body.
+
procedure Inherit_Subprogram_Contract
(Subp : Entity_Id;
From_Subp : Entity_Id);
diff --git a/gcc/ada/doc/gnat_rm/standard_and_implementation_defined_restrictions.rst b/gcc/ada/doc/gnat_rm/standard_and_implementation_defined_restrictions.rst
index 7b647682314..1f56403f81a 100644
--- a/gcc/ada/doc/gnat_rm/standard_and_implementation_defined_restrictions.rst
+++ b/gcc/ada/doc/gnat_rm/standard_and_implementation_defined_restrictions.rst
@@ -988,6 +988,13 @@ appear, and that no wide or wide wide string or character literals
appear in the program (that is literals representing characters not in
type ``Character``).
+Static_Dispatch_Tables
+----------------------
+.. index:: Static_Dispatch_Tables
+
+[GNAT] This restriction checks at compile time that all the artifacts
+associated with dispatch tables can be placed in read-only memory.
+
SPARK_05
--------
.. index:: SPARK_05
diff --git a/gcc/ada/doc/gnat_ugn/platform_specific_information.rst b/gcc/ada/doc/gnat_ugn/platform_specific_information.rst
index 9b54803aeec..bbf790124cc 100644
--- a/gcc/ada/doc/gnat_ugn/platform_specific_information.rst
+++ b/gcc/ada/doc/gnat_ugn/platform_specific_information.rst
@@ -219,6 +219,36 @@ this in a library package body in your application:
It gets the effective user id, and if it's not 0 (i.e. root), it raises
Program_Error.
+.. index:: Linux
+.. index:: GNU/Linux
+
+.. _GNU_Linux_Topics:
+
+GNU/Linux Topics
+================
+
+This section describes topics that are specific to GNU/Linux platforms.
+
+.. _Required_packages_on_GNU_Linux:
+
+Required Packages on GNU/Linux:
+-------------------------------
+
+GNAT requires the C library developer's package to be installed.
+The name of of that package depends on your GNU/Linux distribution:
+
+* RedHat, SUSE: ``glibc-devel``;
+* Debian, Ubuntu: ``libc6-dev`` (normally installed by default).
+
+If using the 32-bit version of GNAT on a 64-bit version of GNU/Linux,
+you'll need the 32-bit version of that package instead:
+
+* RedHat, SUSE: ``glibc-devel.i686``;
+* Debian, Ubuntu: ``libc6-dev:i386``.
+
+Other GNU/Linux distributions might be choosing a different name
+for that package.
+
.. index:: Windows
.. _Microsoft_Windows_Topics:
diff --git a/gcc/ada/doc/gnat_ugn/the_gnat_compilation_model.rst b/gcc/ada/doc/gnat_ugn/the_gnat_compilation_model.rst
index 248bf8ef97f..48fedfea3a4 100644
--- a/gcc/ada/doc/gnat_ugn/the_gnat_compilation_model.rst
+++ b/gcc/ada/doc/gnat_ugn/the_gnat_compilation_model.rst
@@ -4356,7 +4356,7 @@ how to import these C++ declarations from the Ada side:
type Dog is new Animal and Carnivore and Domestic with record
Tooth_Count : Natural;
- Owner : String (1 .. 30);
+ Owner : Chars_Ptr;
end record;
pragma Import (C_Plus_Plus, Dog);
diff --git a/gcc/ada/einfo.adb b/gcc/ada/einfo.adb
index 01d64f3aff5..94e326184eb 100644
--- a/gcc/ada/einfo.adb
+++ b/gcc/ada/einfo.adb
@@ -273,6 +273,7 @@ package body Einfo is
-- Entry_Max_Queue_Lengths_Array Node35
-- Import_Pragma Node35
+ -- Nested_Scenarios Elist36
-- Validated_Object Node36
-- Class_Wide_Clone Node38
@@ -2867,6 +2868,14 @@ package body Einfo is
return Flag22 (Id);
end Needs_No_Actuals;
+ function Nested_Scenarios (Id : E) return L is
+ begin
+ pragma Assert (Ekind_In (Id, E_Function,
+ E_Procedure,
+ E_Subprogram_Body));
+ return Elist36 (Id);
+ end Nested_Scenarios;
+
function Never_Set_In_Source (Id : E) return B is
begin
return Flag115 (Id);
@@ -6071,6 +6080,14 @@ package body Einfo is
Set_Flag22 (Id, V);
end Set_Needs_No_Actuals;
+ procedure Set_Nested_Scenarios (Id : E; V : L) is
+ begin
+ pragma Assert (Ekind_In (Id, E_Function,
+ E_Procedure,
+ E_Subprogram_Body));
+ Set_Elist36 (Id, V);
+ end Set_Nested_Scenarios;
+
procedure Set_Never_Set_In_Source (Id : E; V : B := True) is
begin
Set_Flag115 (Id, V);
@@ -11118,6 +11135,12 @@ package body Einfo is
procedure Write_Field36_Name (Id : Entity_Id) is
begin
case Ekind (Id) is
+ when E_Function
+ | E_Procedure
+ | E_Subprogram_Body
+ =>
+ Write_Str ("Nested_Scenarios");
+
when E_Variable =>
Write_Str ("Validated_Object");
diff --git a/gcc/ada/einfo.ads b/gcc/ada/einfo.ads
index bfe14fcae7c..7bcf3f9298d 100644
--- a/gcc/ada/einfo.ads
+++ b/gcc/ada/einfo.ads
@@ -3531,6 +3531,14 @@ package Einfo is
-- interpreted as an indexing of the result of the call. It is also
-- used to resolve various cases of entry calls.
+-- Nested_Scenarios (Elist36)
+-- Present in [stand alone] subprogram bodies. The list contains all
+-- nested scenarios (see the terminology in Sem_Elab) which appear within
+-- the declarations, statements, and exception handlers of the subprogram
+-- body. The list improves the performance of the ABE Processing phase by
+-- avoiding a full tree traversal when the same subprogram body is part
+-- of several distinct paths in the elaboration graph.
+
-- Never_Set_In_Source (Flag115)
-- Defined in all entities, but can be set only for variables and
-- parameters. This flag is set if the object is never assigned a value
@@ -6076,6 +6084,7 @@ package Einfo is
-- Linker_Section_Pragma (Node33)
-- Contract (Node34)
-- Import_Pragma (Node35) (non-generic case only)
+ -- Nested_Scenarios (Elist36)
-- Class_Wide_Clone (Node38)
-- Protected_Subprogram (Node39) (non-generic case only)
-- SPARK_Pragma (Node40)
@@ -6398,6 +6407,7 @@ package Einfo is
-- Linker_Section_Pragma (Node33)
-- Contract (Node34)
-- Import_Pragma (Node35) (non-generic case only)
+ -- Nested_Scenarios (Elist36)
-- Class_Wide_Clone (Node38)
-- Protected_Subprogram (Node39) (non-generic case only)
-- SPARK_Pragma (Node40)
@@ -6592,6 +6602,7 @@ package Einfo is
-- Extra_Formals (Node28)
-- Anonymous_Masters (Elist29)
-- Contract (Node34)
+ -- Nested_Scenarios (Elist36)
-- SPARK_Pragma (Node40)
-- Contains_Ignored_Ghost_Code (Flag279)
-- SPARK_Pragma_Inherited (Flag265)
@@ -7308,6 +7319,7 @@ package Einfo is
function Must_Have_Preelab_Init (Id : E) return B;
function Needs_Debug_Info (Id : E) return B;
function Needs_No_Actuals (Id : E) return B;
+ function Nested_Scenarios (Id : E) return L;
function Never_Set_In_Source (Id : E) return B;
function Next_Inlined_Subprogram (Id : E) return E;
function No_Dynamic_Predicate_On_Actual (Id : E) return B;
@@ -8005,6 +8017,7 @@ package Einfo is
procedure Set_Must_Have_Preelab_Init (Id : E; V : B := True);
procedure Set_Needs_Debug_Info (Id : E; V : B := True);
procedure Set_Needs_No_Actuals (Id : E; V : B := True);
+ procedure Set_Nested_Scenarios (Id : E; V : L);
procedure Set_Never_Set_In_Source (Id : E; V : B := True);
procedure Set_Next_Inlined_Subprogram (Id : E; V : E);
procedure Set_No_Dynamic_Predicate_On_Actual (Id : E; V : B := True);
@@ -8857,6 +8870,7 @@ package Einfo is
pragma Inline (Must_Have_Preelab_Init);
pragma Inline (Needs_Debug_Info);
pragma Inline (Needs_No_Actuals);
+ pragma Inline (Nested_Scenarios);
pragma Inline (Never_Set_In_Source);
pragma Inline (Next_Index);
pragma Inline (Next_Inlined_Subprogram);
@@ -9343,6 +9357,7 @@ package Einfo is
pragma Inline (Set_Must_Have_Preelab_Init);
pragma Inline (Set_Needs_Debug_Info);
pragma Inline (Set_Needs_No_Actuals);
+ pragma Inline (Set_Nested_Scenarios);
pragma Inline (Set_Never_Set_In_Source);
pragma Inline (Set_Next_Inlined_Subprogram);
pragma Inline (Set_No_Dynamic_Predicate_On_Actual);
diff --git a/gcc/ada/errout.adb b/gcc/ada/errout.adb
index a402c684101..2b9664daac3 100644
--- a/gcc/ada/errout.adb
+++ b/gcc/ada/errout.adb
@@ -307,7 +307,7 @@ package body Errout is
procedure Error_Msg (Msg : String; Flag_Location : Source_Ptr) is
begin
- Error_Msg (Msg, Flag_Location, Empty);
+ Error_Msg (Msg, Flag_Location, Current_Node);
end Error_Msg;
procedure Error_Msg
diff --git a/gcc/ada/errout.ads b/gcc/ada/errout.ads
index e9c4eb47f7f..d3de0ad9ff3 100644
--- a/gcc/ada/errout.ads
+++ b/gcc/ada/errout.ads
@@ -68,6 +68,10 @@ package Errout is
-- error message tag. The -gnatw.d switch sets this flag True, -gnatw.D
-- sets this flag False.
+ Current_Node : Node_Id := Empty;
+ -- Used by Error_Msg as a default Node_Id.
+ -- Relevant only when Opt.Include_Subprogram_In_Messages is set.
+
-----------------------------------
-- Suppression of Error Messages --
-----------------------------------
diff --git a/gcc/ada/exp_aggr.adb b/gcc/ada/exp_aggr.adb
index 86621a4a06a..919f46fde00 100644
--- a/gcc/ada/exp_aggr.adb
+++ b/gcc/ada/exp_aggr.adb
@@ -428,7 +428,7 @@ package body Exp_Aggr is
-- Start of processing for Aggr_Size_OK
begin
- -- The normal aggregate limit is 50000, but we increase this limit to
+ -- The normal aggregate limit is 500000, but we increase this limit to
-- 2**24 (about 16 million) if Restrictions (No_Elaboration_Code) or
-- Restrictions (No_Implicit_Loops) is specified, since in either case
-- we are at risk of declaring the program illegal because of this
@@ -448,7 +448,7 @@ package body Exp_Aggr is
-- Finally, we use a small limit in CodePeer mode where we favor loops
-- instead of thousands of single assignments (from large aggregates).
- Max_Aggr_Size := 50000;
+ Max_Aggr_Size := 500000;
if CodePeer_Mode then
Max_Aggr_Size := 100;
@@ -7533,7 +7533,7 @@ package body Exp_Aggr is
Typ : constant Entity_Id := Base_Type (Etype (N));
begin
- return Static_Dispatch_Tables
+ return Building_Static_Dispatch_Tables
and then Tagged_Type_Expansion
and then RTU_Loaded (Ada_Tags)
diff --git a/gcc/ada/exp_attr.adb b/gcc/ada/exp_attr.adb
index 70d39b7a916..79c6524769b 100644
--- a/gcc/ada/exp_attr.adb
+++ b/gcc/ada/exp_attr.adb
@@ -1054,7 +1054,7 @@ package body Exp_Attr is
Base_Typ : constant Entity_Id := Base_Type (Etype (Pref));
Exprs : constant List_Id := Expressions (N);
Aux_Decl : Node_Id;
- Blk : Node_Id;
+ Blk : Node_Id := Empty;
Decls : List_Id;
Installed : Boolean;
Loc : Source_Ptr;
diff --git a/gcc/ada/exp_ch11.adb b/gcc/ada/exp_ch11.adb
index 7941cbd2ca6..c4bf096cab7 100644
--- a/gcc/ada/exp_ch11.adb
+++ b/gcc/ada/exp_ch11.adb
@@ -1855,11 +1855,13 @@ package body Exp_Ch11 is
-- and the warning is enabled, generate the appropriate warnings.
-- ??? Do not do it for the Call_Marker nodes inserted by the ABE
- -- mechanism because this generates too many false positives.
+ -- mechanism because this generates too many false positives, or
+ -- for generic instantiations for the same reason.
elsif Warn_On_Non_Local_Exception
and then Restriction_Active (No_Exception_Propagation)
and then Nkind (N) /= N_Call_Marker
+ and then Nkind (N) not in N_Generic_Instantiation
then
Warn_No_Exception_Propagation_Active (N);
diff --git a/gcc/ada/exp_ch3.adb b/gcc/ada/exp_ch3.adb
index 043a02c64ba..f21806923da 100644
--- a/gcc/ada/exp_ch3.adb
+++ b/gcc/ada/exp_ch3.adb
@@ -2176,7 +2176,7 @@ package body Exp_Ch3 is
-- Generate
-- function Fxx (O : in Rec_Typ) return Storage_Offset is
-- begin
- -- return O.Iface_Comp'Position;
+ -- return -O.Iface_Comp'Position;
-- end Fxx;
Body_Node := New_Node (N_Subprogram_Body, Loc);
@@ -2199,15 +2199,16 @@ package body Exp_Ch3 is
Statements => New_List (
Make_Simple_Return_Statement (Loc,
Expression =>
- Make_Attribute_Reference (Loc,
- Prefix =>
- Make_Selected_Component (Loc,
- Prefix =>
- Unchecked_Convert_To (Acc_Type,
- Make_Identifier (Loc, Name_uO)),
- Selector_Name =>
- New_Occurrence_Of (Iface_Comp, Loc)),
- Attribute_Name => Name_Position)))));
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix =>
+ Unchecked_Convert_To (Acc_Type,
+ Make_Identifier (Loc, Name_uO)),
+ Selector_Name =>
+ New_Occurrence_Of (Iface_Comp, Loc)),
+ Attribute_Name => Name_Position))))));
Set_Ekind (Func_Id, E_Function);
Set_Mechanism (Func_Id, Default_Mechanism);
@@ -2544,6 +2545,7 @@ package body Exp_Ch3 is
then
declare
Elab_Sec_DT_Stmts_List : constant List_Id := New_List;
+ Elab_List : List_Id := New_List;
begin
Init_Secondary_Tags
@@ -2554,24 +2556,30 @@ package body Exp_Ch3 is
Fixed_Comps => True,
Variable_Comps => False);
- Append_To (Elab_Sec_DT_Stmts_List,
- Make_Assignment_Statement (Loc,
- Name =>
- New_Occurrence_Of
- (Access_Disp_Table_Elab_Flag (Rec_Type), Loc),
- Expression =>
- New_Occurrence_Of (Standard_False, Loc)));
-
- Prepend_List_To (Body_Stmts, New_List (
+ Elab_List := New_List (
Make_If_Statement (Loc,
Condition => New_Occurrence_Of (Set_Tag, Loc),
- Then_Statements => Init_Tags_List),
+ Then_Statements => Init_Tags_List));
+
+ if Elab_Flag_Needed (Rec_Type) then
+ Append_To (Elab_Sec_DT_Stmts_List,
+ Make_Assignment_Statement (Loc,
+ Name =>
+ New_Occurrence_Of
+ (Access_Disp_Table_Elab_Flag (Rec_Type),
+ Loc),
+ Expression =>
+ New_Occurrence_Of (Standard_False, Loc)));
+
+ Append_To (Elab_List,
+ Make_If_Statement (Loc,
+ Condition =>
+ New_Occurrence_Of
+ (Access_Disp_Table_Elab_Flag (Rec_Type), Loc),
+ Then_Statements => Elab_Sec_DT_Stmts_List));
+ end if;
- Make_If_Statement (Loc,
- Condition =>
- New_Occurrence_Of
- (Access_Disp_Table_Elab_Flag (Rec_Type), Loc),
- Then_Statements => Elab_Sec_DT_Stmts_List)));
+ Prepend_List_To (Body_Stmts, Elab_List);
end;
else
Prepend_To (Body_Stmts,
@@ -6279,7 +6287,7 @@ package body Exp_Ch3 is
-- Force construction of dispatch tables of library level tagged types
if Tagged_Type_Expansion
- and then Static_Dispatch_Tables
+ and then Building_Static_Dispatch_Tables
and then Is_Library_Level_Entity (Def_Id)
and then Is_Library_Level_Tagged_Type (Base_Typ)
and then Ekind_In (Base_Typ, E_Record_Type,
@@ -6727,8 +6735,11 @@ package body Exp_Ch3 is
declare
New_Id : constant Entity_Id := Defining_Identifier (N);
Next_Temp : constant Entity_Id := Next_Entity (New_Id);
- S_Flag : constant Boolean :=
+ Save_CFS : constant Boolean :=
Comes_From_Source (Def_Id);
+ Save_SP : constant Node_Id := SPARK_Pragma (Def_Id);
+ Save_SPI : constant Boolean :=
+ SPARK_Pragma_Inherited (Def_Id);
begin
Set_Next_Entity (New_Id, Next_Entity (Def_Id));
@@ -6740,8 +6751,20 @@ package body Exp_Ch3 is
Set_Sloc (Defining_Identifier (N), Sloc (Def_Id));
Set_Comes_From_Source (Def_Id, False);
+
+ -- ??? This is extremely dangerous!!! Exchanging entities
+ -- is very low level, and as a result it resets flags and
+ -- fields which belong to the original Def_Id. Several of
+ -- these attributes are saved and restored, but there may
+ -- be many more that need to be preserverd.
+
Exchange_Entities (Defining_Identifier (N), Def_Id);
- Set_Comes_From_Source (Def_Id, S_Flag);
+
+ -- Restore clobbered attributes
+
+ Set_Comes_From_Source (Def_Id, Save_CFS);
+ Set_SPARK_Pragma (Def_Id, Save_SP);
+ Set_SPARK_Pragma_Inherited (Def_Id, Save_SPI);
end;
end;
end if;
@@ -8501,13 +8524,14 @@ package body Exp_Ch3 is
Unchecked_Convert_To
(RTE (RE_Storage_Offset),
- Make_Attribute_Reference (Loc,
- Prefix =>
- Make_Selected_Component (Loc,
- Prefix => New_Copy_Tree (Target),
- Selector_Name =>
- New_Occurrence_Of (Tag_Comp, Loc)),
- Attribute_Name => Name_Position)),
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix => New_Copy_Tree (Target),
+ Selector_Name =>
+ New_Occurrence_Of (Tag_Comp, Loc)),
+ Attribute_Name => Name_Position))),
Unchecked_Convert_To (RTE (RE_Offset_To_Top_Function_Ptr),
Make_Attribute_Reference (Loc,
@@ -8530,12 +8554,13 @@ package body Exp_Ch3 is
New_Occurrence_Of (Offset_To_Top_Comp, Loc)),
Expression =>
- Make_Attribute_Reference (Loc,
- Prefix =>
- Make_Selected_Component (Loc,
- Prefix => New_Copy_Tree (Target),
- Selector_Name => New_Occurrence_Of (Tag_Comp, Loc)),
- Attribute_Name => Name_Position)));
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix => New_Copy_Tree (Target),
+ Selector_Name => New_Occurrence_Of (Tag_Comp, Loc)),
+ Attribute_Name => Name_Position))));
-- Normal case: No discriminants in the parent type
@@ -8552,13 +8577,14 @@ package body Exp_Ch3 is
Iface_Tag => New_Occurrence_Of (Iface_Tag, Loc),
Offset_Value =>
Unchecked_Convert_To (RTE (RE_Storage_Offset),
- Make_Attribute_Reference (Loc,
- Prefix =>
- Make_Selected_Component (Loc,
- Prefix => New_Copy_Tree (Target),
- Selector_Name =>
- New_Occurrence_Of (Tag_Comp, Loc)),
- Attribute_Name => Name_Position))));
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix => New_Copy_Tree (Target),
+ Selector_Name =>
+ New_Occurrence_Of (Tag_Comp, Loc)),
+ Attribute_Name => Name_Position)))));
end if;
-- Generate:
@@ -8569,7 +8595,9 @@ package body Exp_Ch3 is
-- Offset_Value => n,
-- Offset_Func => null);
- if RTE_Available (RE_Register_Interface_Offset) then
+ if not Building_Static_Secondary_DT (Typ)
+ and then RTE_Available (RE_Register_Interface_Offset)
+ then
Append_To (Stmts_List,
Make_Procedure_Call_Statement (Loc,
Name =>
@@ -8587,13 +8615,14 @@ package body Exp_Ch3 is
New_Occurrence_Of (Standard_True, Loc),
Unchecked_Convert_To (RTE (RE_Storage_Offset),
- Make_Attribute_Reference (Loc,
- Prefix =>
- Make_Selected_Component (Loc,
- Prefix => New_Copy_Tree (Target),
- Selector_Name =>
- New_Occurrence_Of (Tag_Comp, Loc)),
- Attribute_Name => Name_Position)),
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix => New_Copy_Tree (Target),
+ Selector_Name =>
+ New_Occurrence_Of (Tag_Comp, Loc)),
+ Attribute_Name => Name_Position))),
Make_Null (Loc))));
end if;
@@ -8697,15 +8726,11 @@ package body Exp_Ch3 is
-- Initialize secondary tags
else
- Append_To (Init_Tags_List,
- Make_Assignment_Statement (Loc,
- Name =>
- Make_Selected_Component (Loc,
- Prefix => New_Copy_Tree (Target),
- Selector_Name =>
- New_Occurrence_Of (Node (Iface_Comp_Elmt), Loc)),
- Expression =>
- New_Occurrence_Of (Node (Iface_Tag_Elmt), Loc)));
+ Initialize_Tag
+ (Typ => Full_Typ,
+ Iface => Node (Iface_Elmt),
+ Tag_Comp => Tag_Comp,
+ Iface_Tag => Node (Iface_Tag_Elmt));
end if;
-- Otherwise generate code to initialize the tag
@@ -8714,10 +8739,11 @@ package body Exp_Ch3 is
if (In_Variable_Pos and then Variable_Comps)
or else (not In_Variable_Pos and then Fixed_Comps)
then
- Initialize_Tag (Full_Typ,
- Iface => Node (Iface_Elmt),
- Tag_Comp => Tag_Comp,
- Iface_Tag => Node (Iface_Tag_Elmt));
+ Initialize_Tag
+ (Typ => Full_Typ,
+ Iface => Node (Iface_Elmt),
+ Tag_Comp => Tag_Comp,
+ Iface_Tag => Node (Iface_Tag_Elmt));
end if;
end if;
diff --git a/gcc/ada/exp_ch4.adb b/gcc/ada/exp_ch4.adb
index abf6d635451..88303c66861 100644
--- a/gcc/ada/exp_ch4.adb
+++ b/gcc/ada/exp_ch4.adb
@@ -630,7 +630,9 @@ package body Exp_Ch4 is
-- [Deep_]Finalize (Obj_Ref.all);
- if Needs_Finalization (DesigT) then
+ if Needs_Finalization (DesigT)
+ and then not No_Heap_Finalization (PtrT)
+ then
Fin_Call :=
Make_Final_Call
(Obj_Ref =>
@@ -2764,7 +2766,7 @@ package body Exp_Ch4 is
-- special case of setting the right high bound for a null result.
-- This is of type Ityp.
- High_Bound : Node_Id;
+ High_Bound : Node_Id := Empty;
-- A tree node representing the high bound of the result (of type Ityp)
Result : Node_Id;
@@ -4798,7 +4800,7 @@ package body Exp_Ch4 is
declare
Dis : Boolean := False;
- Typ : Entity_Id;
+ Typ : Entity_Id := Empty;
begin
if Has_Discriminants (T) then
@@ -10747,6 +10749,8 @@ package body Exp_Ch4 is
if Present (Stored) then
Elmt := First_Elmt (Stored);
+ else
+ Elmt := No_Elmt; -- init to avoid warning
end if;
Cons := New_List;
@@ -11277,6 +11281,7 @@ package body Exp_Ch4 is
elsif In_Instance_Body
and then Ekind (Operand_Type) = E_Anonymous_Access_Type
and then Nkind (Operand) = N_Selected_Component
+ and then Ekind (Entity (Selector_Name (Operand))) = E_Discriminant
and then Object_Access_Level (Operand) >
Type_Access_Level (Target_Type)
then
@@ -13109,10 +13114,10 @@ package body Exp_Ch4 is
Comp : Node_Id;
-- Comparison operand, set only if Is_Zero is false
- Ent : Entity_Id;
+ Ent : Entity_Id := Empty;
-- Entity whose length is being compared
- Index : Node_Id;
+ Index : Node_Id := Empty;
-- Integer_Literal node for length attribute expression, or Empty
-- if there is no such expression present.
diff --git a/gcc/ada/exp_ch5.adb b/gcc/ada/exp_ch5.adb
index 9d2f652f119..d98e725d85f 100644
--- a/gcc/ada/exp_ch5.adb
+++ b/gcc/ada/exp_ch5.adb
@@ -4769,8 +4769,8 @@ package body Exp_Ch5 is
-- If the domain is an itype, note the bounds of its range.
- L_Hi : Node_Id;
- L_Lo : Node_Id;
+ L_Hi : Node_Id := Empty;
+ L_Lo : Node_Id := Empty;
function Lo_Val (N : Node_Id) return Node_Id;
-- Given static expression or static range, returns an identifier
diff --git a/gcc/ada/exp_ch6.adb b/gcc/ada/exp_ch6.adb
index bca7e5deae4..357979e663e 100644
--- a/gcc/ada/exp_ch6.adb
+++ b/gcc/ada/exp_ch6.adb
@@ -4721,9 +4721,11 @@ package body Exp_Ch6 is
Exp : Node_Id;
HSS : Node_Id;
Result : Node_Id;
- Return_Stmt : Node_Id;
Stmts : List_Id;
+ Return_Stmt : Node_Id := Empty;
+ -- Force initialization to facilitate static analysis
+
-- Start of processing for Expand_N_Extended_Return_Statement
begin
diff --git a/gcc/ada/exp_ch9.adb b/gcc/ada/exp_ch9.adb
index 063b812f9bc..d94a72ffeb8 100644
--- a/gcc/ada/exp_ch9.adb
+++ b/gcc/ada/exp_ch9.adb
@@ -6189,8 +6189,7 @@ package body Exp_Ch9 is
Cond_Id : Entity_Id;
Entry_Body : Node_Id;
- Func_Body : Node_Id;
- pragma Warnings (Off, Func_Body);
+ Func_Body : Node_Id := Empty;
-- Start of processing for Expand_Entry_Barrier
@@ -12356,7 +12355,7 @@ package body Exp_Ch9 is
Call : Node_Id;
Call_Ent : Entity_Id;
Conc_Typ_Stmts : List_Id;
- Concval : Node_Id;
+ Concval : Node_Id := Empty; -- init to avoid warning
D_Alt : constant Node_Id := Delay_Alternative (N);
D_Conv : Node_Id;
D_Disc : Node_Id;
@@ -12909,8 +12908,8 @@ package body Exp_Ch9 is
end if;
-- If the type of the dispatching object is an access type then return
- -- an explicit dereference of a copy of the object, and note that
- -- this is the controlling actual of the call.
+ -- an explicit dereference of a copy of the object, and note that this
+ -- is the controlling actual of the call.
if Is_Access_Type (Etype (Object)) then
Object :=
@@ -14590,9 +14589,9 @@ package body Exp_Ch9 is
-- Jnn'unchecked_access
- -- and add it to aggegate for access to formals. Note that
- -- the actual may be by-copy but still be a controlling actual
- -- if it is an access to class-wide interface.
+ -- and add it to aggegate for access to formals. Note that the
+ -- actual may be by-copy but still be a controlling actual if it
+ -- is an access to class-wide interface.
if not Is_Controlling_Actual (Actual) then
Append_To (Params,
diff --git a/gcc/ada/exp_disp.adb b/gcc/ada/exp_disp.adb
index f3728f655d4..926df631ac9 100644
--- a/gcc/ada/exp_disp.adb
+++ b/gcc/ada/exp_disp.adb
@@ -281,7 +281,8 @@ package body Exp_Disp is
------------------------
function Building_Static_DT (Typ : Entity_Id) return Boolean is
- Root_Typ : Entity_Id := Root_Type (Typ);
+ Root_Typ : Entity_Id := Root_Type (Typ);
+ Static_DT : Boolean;
begin
-- Handle private types
@@ -290,14 +291,21 @@ package body Exp_Disp is
Root_Typ := Full_View (Root_Typ);
end if;
- return Static_Dispatch_Tables
- and then Is_Library_Level_Tagged_Type (Typ)
+ Static_DT :=
+ Building_Static_Dispatch_Tables
+ and then Is_Library_Level_Tagged_Type (Typ)
- -- If the type is derived from a CPP class we cannot statically
- -- build the dispatch tables because we must inherit primitives
- -- from the CPP side.
+ -- If the type is derived from a CPP class we cannot statically
+ -- build the dispatch tables because we must inherit primitives
+ -- from the CPP side.
- and then not Is_CPP_Class (Root_Typ);
+ and then not Is_CPP_Class (Root_Typ);
+
+ if not Static_DT then
+ Check_Restriction (Static_Dispatch_Tables, Typ);
+ end if;
+
+ return Static_DT;
end Building_Static_DT;
----------------------------------
@@ -305,8 +313,9 @@ package body Exp_Disp is
----------------------------------
function Building_Static_Secondary_DT (Typ : Entity_Id) return Boolean is
- Full_Typ : Entity_Id := Typ;
- Root_Typ : Entity_Id := Root_Type (Typ);
+ Full_Typ : Entity_Id := Typ;
+ Root_Typ : Entity_Id := Root_Type (Typ);
+ Static_DT : Boolean;
begin
-- Handle private types
@@ -319,11 +328,21 @@ package body Exp_Disp is
Root_Typ := Full_View (Root_Typ);
end if;
- return Building_Static_DT (Full_Typ)
+ Static_DT :=
+ Building_Static_DT (Full_Typ)
+ and then not Is_Interface (Full_Typ)
+ and then Has_Interfaces (Full_Typ)
+ and then (Full_Typ = Root_Typ
+ or else not Is_Variable_Size_Record (Etype (Full_Typ)));
+
+ if not Static_DT
and then not Is_Interface (Full_Typ)
and then Has_Interfaces (Full_Typ)
- and then (Full_Typ = Root_Typ
- or else not Is_Variable_Size_Record (Etype (Full_Typ)));
+ then
+ Check_Restriction (Static_Dispatch_Tables, Typ);
+ end if;
+
+ return Static_DT;
end Building_Static_Secondary_DT;
----------------------------------
@@ -660,7 +679,8 @@ package body Exp_Disp is
begin
return Ada_Version >= Ada_2005
and then not Is_Interface (Typ)
- and then Has_Interfaces (Typ);
+ and then Has_Interfaces (Typ)
+ and then not Building_Static_DT (Typ);
end Elab_Flag_Needed;
-----------------------------
@@ -1884,7 +1904,7 @@ package body Exp_Disp is
-- Generate:
-- type T is access all <<type of the target formal>>
-- S : Storage_Offset := Storage_Offset!(Formal)
- -- - Offset_To_Top (address!(Formal))
+ -- + Offset_To_Top (address!(Formal))
Decl_2 :=
Make_Full_Type_Declaration (Loc,
@@ -1918,7 +1938,7 @@ package body Exp_Disp is
Object_Definition =>
New_Occurrence_Of (RTE (RE_Storage_Offset), Loc),
Expression =>
- Make_Op_Subtract (Loc,
+ Make_Op_Add (Loc,
Left_Opnd =>
Unchecked_Convert_To
(RTE (RE_Storage_Offset),
@@ -1942,7 +1962,7 @@ package body Exp_Disp is
-- Generate:
-- S1 : Storage_Offset := Storage_Offset!(Formal'Address)
- -- - Offset_To_Top (Formal'Address)
+ -- + Offset_To_Top (Formal'Address)
-- S2 : Addr_Ptr := Addr_Ptr!(S1)
New_Arg :=
@@ -1969,7 +1989,7 @@ package body Exp_Disp is
Object_Definition =>
New_Occurrence_Of (RTE (RE_Storage_Offset), Loc),
Expression =>
- Make_Op_Subtract (Loc,
+ Make_Op_Add (Loc,
Left_Opnd =>
Unchecked_Convert_To
(RTE (RE_Storage_Offset),
@@ -4234,14 +4254,15 @@ package body Exp_Disp is
else
Append_To (DT_Aggr_List,
- Make_Attribute_Reference (Loc,
- Prefix =>
- Make_Selected_Component (Loc,
- Prefix =>
- New_Occurrence_Of (Dummy_Object, Loc),
- Selector_Name =>
- New_Occurrence_Of (Iface_Comp, Loc)),
- Attribute_Name => Name_Position));
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix =>
+ New_Occurrence_Of (Dummy_Object, Loc),
+ Selector_Name =>
+ New_Occurrence_Of (Iface_Comp, Loc)),
+ Attribute_Name => Name_Position)));
end if;
-- Generate the Object Specific Data table required to dispatch calls
@@ -5102,7 +5123,8 @@ package body Exp_Disp is
Append_To (Result,
Make_Object_Declaration (Loc,
Defining_Identifier => HT_Link,
- Object_Definition => New_Occurrence_Of (RTE (RE_Tag), Loc)));
+ Object_Definition => New_Occurrence_Of (RTE (RE_Tag), Loc),
+ Expression => New_Occurrence_Of (RTE (RE_No_Tag), Loc)));
end if;
-- Generate code to create the storage for the type specific data object
@@ -5370,7 +5392,8 @@ package body Exp_Disp is
Make_Attribute_Reference (Loc,
Prefix => New_Occurrence_Of (HT_Link, Loc),
Attribute_Name => Name_Address)));
- else
+
+ elsif RTE_Record_Component_Available (RE_HT_Link) then
Append_To (TSD_Aggr_List,
Unchecked_Convert_To (RTE (RE_Tag_Ptr),
New_Occurrence_Of (RTE (RE_Null_Address), Loc)));
@@ -5494,16 +5517,28 @@ package body Exp_Disp is
else
declare
- TSD_Ifaces_List : constant List_Id := New_List;
- Elmt : Elmt_Id;
- Sec_DT_Tag : Node_Id;
+ TSD_Ifaces_List : constant List_Id := New_List;
+ Elmt : Elmt_Id;
+ Ifaces_List : Elist_Id := No_Elist;
+ Ifaces_Comp_List : Elist_Id := No_Elist;
+ Ifaces_Tag_List : Elist_Id;
+ Offset_To_Top : Node_Id;
+ Sec_DT_Tag : Node_Id;
begin
+ -- Collect interfaces information if we need to compute the
+ -- offset to the top using the dummy object.
+
+ if Present (Dummy_Object) then
+ Collect_Interfaces_Info (Typ,
+ Ifaces_List, Ifaces_Comp_List, Ifaces_Tag_List);
+ end if;
+
AI := First_Elmt (Typ_Ifaces);
while Present (AI) loop
if Is_Ancestor (Node (AI), Typ, Use_Full_View => True) then
- Sec_DT_Tag :=
- New_Occurrence_Of (DT_Ptr, Loc);
+ Sec_DT_Tag := New_Occurrence_Of (DT_Ptr, Loc);
+
else
Elmt :=
Next_Elmt
@@ -5511,9 +5546,9 @@ package body Exp_Disp is
pragma Assert (Has_Thunks (Node (Elmt)));
while Is_Tag (Node (Elmt))
- and then not
- Is_Ancestor (Node (AI), Related_Type (Node (Elmt)),
- Use_Full_View => True)
+ and then not
+ Is_Ancestor (Node (AI), Related_Type (Node (Elmt)),
+ Use_Full_View => True)
loop
pragma Assert (Has_Thunks (Node (Elmt)));
Next_Elmt (Elmt);
@@ -5528,14 +5563,56 @@ package body Exp_Disp is
pragma Assert (Ekind (Node (Elmt)) = E_Constant
and then not
Has_Thunks (Node (Next_Elmt (Next_Elmt (Elmt)))));
+
Sec_DT_Tag :=
- New_Occurrence_Of (Node (Next_Elmt (Next_Elmt (Elmt))),
- Loc);
+ New_Occurrence_Of
+ (Node (Next_Elmt (Next_Elmt (Elmt))), Loc);
+ end if;
+
+ -- For static dispatch tables compute Offset_To_Top using
+ -- the dummy object.
+
+ if Present (Dummy_Object) then
+ declare
+ Iface : constant Node_Id := Node (AI);
+ Iface_Comp : Node_Id := Empty;
+ Iface_Comp_Elmt : Elmt_Id;
+ Iface_Elmt : Elmt_Id;
+
+ begin
+ Iface_Elmt := First_Elmt (Ifaces_List);
+ Iface_Comp_Elmt := First_Elmt (Ifaces_Comp_List);
+
+ while Present (Iface_Elmt) loop
+ if Node (Iface_Elmt) = Iface then
+ Iface_Comp := Node (Iface_Comp_Elmt);
+ exit;
+ end if;
+
+ Next_Elmt (Iface_Elmt);
+ Next_Elmt (Iface_Comp_Elmt);
+ end loop;
+
+ pragma Assert (Present (Iface_Comp));
+
+ Offset_To_Top :=
+ Make_Op_Minus (Loc,
+ Make_Attribute_Reference (Loc,
+ Prefix =>
+ Make_Selected_Component (Loc,
+ Prefix =>
+ New_Occurrence_Of (Dummy_Object, Loc),
+ Selector_Name =>
+ New_Occurrence_Of (Iface_Comp, Loc)),
+ Attribute_Name => Name_Position));
+ end;
+ else
+ Offset_To_Top := Make_Integer_Literal (Loc, 0);
end if;
Append_To (TSD_Ifaces_List,
- Make_Aggregate (Loc,
- Expressions => New_List (
+ Make_Aggregate (Loc,
+ Expressions => New_List (
-- Iface_Tag
@@ -5550,7 +5627,7 @@ package body Exp_Disp is
-- Offset_To_Top_Value
- Make_Integer_Literal (Loc, 0),
+ Offset_To_Top,
-- Offset_To_Top_Func
@@ -5558,9 +5635,7 @@ package body Exp_Disp is
-- Secondary_DT
- Unchecked_Convert_To (RTE (RE_Tag), Sec_DT_Tag)
-
- )));
+ Unchecked_Convert_To (RTE (RE_Tag), Sec_DT_Tag))));
Next_Elmt (AI);
end loop;
@@ -5570,17 +5645,15 @@ package body Exp_Disp is
Set_Is_Statically_Allocated (ITable,
Is_Library_Level_Tagged_Type (Typ));
- -- The table of interfaces is not constant; its slots are
- -- filled at run time by the IP routine using attribute
- -- 'Position to know the location of the tag components
- -- (and this attribute cannot be safely used before the
- -- object is initialized).
+ -- The table of interfaces is constant if we are building a
+ -- static dispatch table; otherwise is not constant because
+ -- its slots are filled at run time by the IP routine.
Append_To (Result,
Make_Object_Declaration (Loc,
Defining_Identifier => ITable,
Aliased_Present => True,
- Constant_Present => False,
+ Constant_Present => Present (Dummy_Object),
Object_Definition =>
Make_Subtype_Indication (Loc,
Subtype_Mark =>
@@ -5590,10 +5663,11 @@ package body Exp_Disp is
Constraints => New_List (
Make_Integer_Literal (Loc, Num_Ifaces)))),
- Expression => Make_Aggregate (Loc,
- Expressions => New_List (
- Make_Integer_Literal (Loc, Num_Ifaces),
- Make_Aggregate (Loc, TSD_Ifaces_List)))));
+ Expression =>
+ Make_Aggregate (Loc,
+ Expressions => New_List (
+ Make_Integer_Literal (Loc, Num_Ifaces),
+ Make_Aggregate (Loc, TSD_Ifaces_List)))));
Append_To (Result,
Make_Attribute_Definition_Clause (Loc,
diff --git a/gcc/ada/exp_imgv.adb b/gcc/ada/exp_imgv.adb
index 78777075d8b..6f6b008f99f 100644
--- a/gcc/ada/exp_imgv.adb
+++ b/gcc/ada/exp_imgv.adb
@@ -436,7 +436,7 @@ package body Exp_Imgv is
Imid : RE_Id;
Ptyp : Entity_Id;
Rtyp : Entity_Id;
- Tent : Entity_Id;
+ Tent : Entity_Id := Empty;
Ttyp : Entity_Id;
Proc_Ent : Entity_Id;
Enum_Case : Boolean;
diff --git a/gcc/ada/exp_intr.adb b/gcc/ada/exp_intr.adb
index 6de8952ae85..bca7301449f 100644
--- a/gcc/ada/exp_intr.adb
+++ b/gcc/ada/exp_intr.adb
@@ -924,7 +924,8 @@ package body Exp_Intr is
Arg : constant Node_Id := First_Actual (N);
Loc : constant Source_Ptr := Sloc (N);
Typ : constant Entity_Id := Etype (Arg);
- Desig_Typ : constant Entity_Id := Designated_Type (Typ);
+ Desig_Typ : constant Entity_Id :=
+ Available_View (Designated_Type (Typ));
Needs_Fin : constant Boolean := Needs_Finalization (Desig_Typ);
Root_Typ : constant Entity_Id := Underlying_Type (Root_Type (Typ));
Pool : constant Entity_Id := Associated_Storage_Pool (Root_Typ);
diff --git a/gcc/ada/exp_prag.adb b/gcc/ada/exp_prag.adb
index dfed6af66a7..a92db56b525 100644
--- a/gcc/ada/exp_prag.adb
+++ b/gcc/ada/exp_prag.adb
@@ -1090,7 +1090,7 @@ package body Exp_Prag is
Conseq_Checks : Node_Id := Empty;
Count : Entity_Id;
Count_Decl : Node_Id;
- Error_Decls : List_Id;
+ Error_Decls : List_Id := No_List; -- init to avoid warning
Flag : Entity_Id;
Flag_Decl : Node_Id;
If_Stmt : Node_Id;
diff --git a/gcc/ada/exp_spark.adb b/gcc/ada/exp_spark.adb
index 5386fa6578b..43ca12f7940 100644
--- a/gcc/ada/exp_spark.adb
+++ b/gcc/ada/exp_spark.adb
@@ -349,7 +349,7 @@ package body Exp_SPARK is
Loc : constant Source_Ptr := Sloc (N);
Obj_Id : constant Entity_Id := Defining_Entity (N);
Nam : constant Node_Id := Name (N);
- Typ : constant Entity_Id := Etype (Subtype_Mark (N));
+ Typ : constant Entity_Id := Etype (Obj_Id);
begin
-- Transform a renaming of the form
diff --git a/gcc/ada/exp_util.adb b/gcc/ada/exp_util.adb
index 8fdd8aa8200..6ebcc4c9794 100644
--- a/gcc/ada/exp_util.adb
+++ b/gcc/ada/exp_util.adb
@@ -165,11 +165,6 @@ package body Exp_Util is
-- Force evaluation of bounds of a slice, which may be given by a range
-- or by a subtype indication with or without a constraint.
- function Find_DIC_Type (Typ : Entity_Id) return Entity_Id;
- -- Subsidiary to all Build_DIC_Procedure_xxx routines. Find the type which
- -- defines the Default_Initial_Condition pragma of type Typ. This is either
- -- Typ itself or a parent type when the pragma is inherited.
-
function Make_CW_Equivalent_Type
(T : Entity_Id;
E : Node_Id) return Entity_Id;
@@ -4996,7 +4991,7 @@ package body Exp_Util is
-- is transformed into
- -- Val : Constrained_Subtype_of_T := Maybe_Modified_Expr;
+ -- Val : Constrained_Subtype_Of_T := Maybe_Modified_Expr;
--
-- Here are the main cases :
--
@@ -5389,66 +5384,6 @@ package body Exp_Util is
return TSS (Utyp, TSS_Finalize_Address);
end Finalize_Address;
- -------------------
- -- Find_DIC_Type --
- -------------------
-
- function Find_DIC_Type (Typ : Entity_Id) return Entity_Id is
- Curr_Typ : Entity_Id;
- -- The current type being examined in the parent hierarchy traversal
-
- DIC_Typ : Entity_Id;
- -- The type which carries the DIC pragma. This variable denotes the
- -- partial view when private types are involved.
-
- Par_Typ : Entity_Id;
- -- The parent type of the current type. This variable denotes the full
- -- view when private types are involved.
-
- begin
- -- The input type defines its own DIC pragma, therefore it is the owner
-
- if Has_Own_DIC (Typ) then
- DIC_Typ := Typ;
-
- -- Otherwise the DIC pragma is inherited from a parent type
-
- else
- pragma Assert (Has_Inherited_DIC (Typ));
-
- -- Climb the parent chain
-
- Curr_Typ := Typ;
- loop
- -- Inspect the parent type. Do not consider subtypes as they
- -- inherit the DIC attributes from their base types.
-
- DIC_Typ := Base_Type (Etype (Curr_Typ));
-
- -- Look at the full view of a private type because the type may
- -- have a hidden parent introduced in the full view.
-
- Par_Typ := DIC_Typ;
-
- if Is_Private_Type (Par_Typ)
- and then Present (Full_View (Par_Typ))
- then
- Par_Typ := Full_View (Par_Typ);
- end if;
-
- -- Stop the climb once the nearest parent type which defines a DIC
- -- pragma of its own is encountered or when the root of the parent
- -- chain is reached.
-
- exit when Has_Own_DIC (DIC_Typ) or else Curr_Typ = Par_Typ;
-
- Curr_Typ := Par_Typ;
- end loop;
- end if;
-
- return DIC_Typ;
- end Find_DIC_Type;
-
------------------------
-- Find_Interface_ADT --
------------------------
@@ -5512,7 +5447,7 @@ package body Exp_Util is
(T : Entity_Id;
Iface : Entity_Id) return Entity_Id
is
- AI_Tag : Entity_Id;
+ AI_Tag : Entity_Id := Empty;
Found : Boolean := False;
Typ : Entity_Id := T;
@@ -7255,9 +7190,11 @@ package body Exp_Util is
null;
end if;
- -- Special case: a call marker
+ -- Special case: a marker
- when N_Call_Marker =>
+ when N_Call_Marker
+ | N_Variable_Reference_Marker
+ =>
if Is_List_Member (P) then
Insert_List_Before_And_Analyze (P, Ins_Actions);
return;
@@ -11074,11 +11011,11 @@ package body Exp_Util is
Scope_Suppress.Suppress := (others => True);
- -- If this is an elementary or a small not by-reference record type, and
+ -- If this is an elementary or a small not-by-reference record type, and
-- we need to capture the value, just make a constant; this is cheap and
-- objects of both kinds of types can be bit aligned, so it might not be
-- possible to generate a reference to them. Likewise if this is not a
- -- name reference, except for a type conversion because we would enter
+ -- name reference, except for a type conversion, because we would enter
-- an infinite recursion with Checks.Apply_Predicate_Check if the target
-- type has predicates (and type conversions need a specific treatment
-- anyway, see below). Also do it if we have a volatile reference and
diff --git a/gcc/ada/fname.adb b/gcc/ada/fname.adb
index 2bdfbf685d9..96d813adbad 100644
--- a/gcc/ada/fname.adb
+++ b/gcc/ada/fname.adb
@@ -167,8 +167,11 @@ package body Fname is
is
begin
-- Definitely false if longer than 12 characters (8.3)
+ -- except for the Interfaces packages
- if Fname'Length > 12 then
+ if Fname'Length > 12
+ and then Fname (Fname'First .. Fname'First + 1) /= "i-"
+ then
return False;
end if;
diff --git a/gcc/ada/freeze.adb b/gcc/ada/freeze.adb
index a106d68ae86..bc7694cd170 100644
--- a/gcc/ada/freeze.adb
+++ b/gcc/ada/freeze.adb
@@ -1173,8 +1173,7 @@ package body Freeze is
Component_Aliased : Boolean;
- Comp_Byte_Aligned : Boolean;
- pragma Warnings (Off, Comp_Byte_Aligned);
+ Comp_Byte_Aligned : Boolean := False;
-- Set for the record case, True if Comp is aligned on byte boundaries
-- (in which case it is allowed to have different storage order).
@@ -2788,7 +2787,6 @@ package body Freeze is
elsif Csiz mod System_Storage_Unit = 0
and then Is_Composite_Type (Ctyp)
then
-
Set_Is_Packed (Base_Type (Arr), True);
Set_Has_Non_Standard_Rep (Base_Type (Arr), True);
Set_Is_Bit_Packed_Array (Base_Type (Arr), False);
diff --git a/gcc/ada/gcc-interface/Make-lang.in b/gcc/ada/gcc-interface/Make-lang.in
index 9c7b6e1496f..d51d3973b4d 100644
--- a/gcc/ada/gcc-interface/Make-lang.in
+++ b/gcc/ada/gcc-interface/Make-lang.in
@@ -322,7 +322,6 @@ GNAT_ADA_OBJS = \
ada/libgnat/g-spchge.o \
ada/libgnat/g-speche.o \
ada/libgnat/g-u3spch.o \
- ada/get_spark_xrefs.o \
ada/get_targ.o \
ada/ghost.o \
ada/libgnat/gnat.o \
@@ -352,7 +351,6 @@ GNAT_ADA_OBJS = \
ada/par_sco.o \
ada/prep.o \
ada/prepcomp.o \
- ada/put_spark_xrefs.o \
ada/put_scos.o \
ada/repinfo.o \
ada/restrict.o \
diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in
index b1621d11b11..749dbbfec47 100644
--- a/gcc/ada/gcc-interface/Makefile.in
+++ b/gcc/ada/gcc-interface/Makefile.in
@@ -1,5 +1,5 @@
# Makefile for GNU Ada Compiler (GNAT).
-# Copyright (C) 1994-2016 Free Software Foundation, Inc.
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
#This file is part of GCC.
@@ -887,6 +887,37 @@ ifeq ($(strip $(filter-out arm% linux-androideabi,$(target_cpu) $(target_os))),)
LIBRARY_VERSION := $(LIB_VERSION)
endif
+# AARCH64 QNX
+ifeq ($(strip $(filter-out aarch64 %qnx,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<libgnarl/a-intnam__qnx.ads \
+ s-inmaop.adb<libgnarl/s-inmaop__posix.adb \
+ s-intman.adb<libgnarl/s-intman__qnx.adb \
+ s-osinte.adb<libgnarl/s-osinte__qnx.adb \
+ s-osinte.ads<libgnarl/s-osinte__qnx.ads \
+ s-osprim.adb<libgnat/s-osprim__posix.adb \
+ s-qnx.ads<libgnarl/s-qnx.ads \
+ s-taprop.adb<libgnarl/s-taprop__qnx.adb \
+ s-taspri.ads<libgnarl/s-taspri__posix.ads \
+ s-tpopsp.adb<libgnarl/s-tpopsp__posix-foreign.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS) \
+ system.ads<libgnat/system-qnx-aarch64.ads
+
+ TOOLS_TARGET_PAIRS = indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-qnx.o
+ EXTRA_LIBGNAT_OBJS+=sigtramp-qnx.o
+ EXTRA_LIBGNAT_SRCS+=sigtramp.h
+ EH_MECHANISM=-gcc
+
+ SO_OPTS= -shared-libgcc -Wl,-soname,
+ MISCLIB= - lsocket
+ THREADSLIB =
+ GNATLIB_SHARED = gnatlib-shared-dual
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
# Sparc Solaris
ifeq ($(strip $(filter-out sparc% sun solaris%,$(target_cpu) $(target_vendor) $(target_os))),)
LIBGNAT_TARGET_PAIRS = \
diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c
index 4d7f432bff2..2cf5e51e91d 100644
--- a/gcc/ada/gcc-interface/misc.c
+++ b/gcc/ada/gcc-interface/misc.c
@@ -262,6 +262,9 @@ gnat_post_options (const char **pfilename ATTRIBUTE_UNUSED)
/* No psABI change warnings for Ada. */
warn_psabi = 0;
+ /* No return type warnings for Ada. */
+ warn_return_type = 0;
+
/* No caret by default for Ada. */
if (!global_options_set.x_flag_diagnostics_show_caret)
global_dc->show_caret = false;
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index d22d82ad610..238b841139b 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -7695,12 +7695,12 @@ gnat_to_gnu (Node_Id gnat_node)
/* Added Nodes */
/****************/
- /* Call markers are created by the ABE mechanism to capture the target of
- a call along with other elaboration-related attributes which are either
- unavailable of expensive to recompute. Call markers do not have static
- and runtime semantics, and should be ignored. */
+ /* Markers are created by the ABE mechanism to capture information which
+ is either unavailable of expensive to recompute. Markers do not have
+ and runtime semantics, and should be ignored. */
case N_Call_Marker:
+ case N_Variable_Reference_Marker:
gnu_result = alloc_stmt_list ();
break;
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index bad5aeade13..d7f9f3464ae 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -4706,6 +4706,7 @@ convert (tree type, tree expr)
return fold (convert_to_real (type, expr));
case RECORD_TYPE:
+ /* Do a normal conversion between scalar and justified modular type. */
if (TYPE_JUSTIFIED_MODULAR_P (type) && !AGGREGATE_TYPE_P (etype))
{
vec<constructor_elt, va_gc> *v;
@@ -4717,9 +4718,27 @@ convert (tree type, tree expr)
return gnat_build_constructor (type, v);
}
- /* ... fall through ... */
+ /* In these cases, assume the front-end has validated the conversion.
+ If the conversion is valid, it will be a bit-wise conversion, so
+ it can be viewed as an unchecked conversion. */
+ return unchecked_convert (type, expr, false);
case ARRAY_TYPE:
+ /* Do a normal conversion between unconstrained and constrained array
+ type, assuming the latter is a constrained version of the former. */
+ if (TREE_CODE (expr) == INDIRECT_REF
+ && ecode == ARRAY_TYPE
+ && TREE_TYPE (etype) == TREE_TYPE (type))
+ {
+ tree ptr_type = build_pointer_type (type);
+ tree t = build_unary_op (INDIRECT_REF, NULL_TREE,
+ fold_convert (ptr_type,
+ TREE_OPERAND (expr, 0)));
+ TREE_READONLY (t) = TREE_READONLY (expr);
+ TREE_THIS_NOTRAP (t) = TREE_THIS_NOTRAP (expr);
+ return t;
+ }
+
/* In these cases, assume the front-end has validated the conversion.
If the conversion is valid, it will be a bit-wise conversion, so
it can be viewed as an unchecked conversion. */
diff --git a/gcc/ada/get_spark_xrefs.adb b/gcc/ada/get_spark_xrefs.adb
deleted file mode 100644
index 9b82d5bfdd1..00000000000
--- a/gcc/ada/get_spark_xrefs.adb
+++ /dev/null
@@ -1,493 +0,0 @@
-------------------------------------------------------------------------------
--- --
--- GNAT COMPILER COMPONENTS --
--- --
--- G E T _ S P A R K _ X R E F S --
--- --
--- B o d y --
--- --
--- Copyright (C) 2011-2016, Free Software Foundation, Inc. --
--- --
--- GNAT is free software; you can redistribute it and/or modify it under --
--- terms of the GNU General Public License as published by the Free Soft- --
--- ware Foundation; either version 3, or (at your option) any later ver- --
--- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
--- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
--- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --
--- for more details. You should have received a copy of the GNU General --
--- Public License distributed with GNAT; see file COPYING3. If not, go to --
--- http://www.gnu.org/licenses for a complete copy of the license. --
--- --
--- GNAT was originally developed by the GNAT team at New York University. --
--- Extensive contributions were provided by Ada Core Technologies Inc. --
--- --
-------------------------------------------------------------------------------
-
-with SPARK_Xrefs; use SPARK_Xrefs;
-with Types; use Types;
-
-with Ada.IO_Exceptions; use Ada.IO_Exceptions;
-
-procedure Get_SPARK_Xrefs is
- C : Character;
-
- use ASCII;
- -- For CR/LF
-
- Cur_File : Nat;
- -- Dependency number for the current file
-
- Cur_Scope : Nat;
- -- Scope number for the current scope entity
-
- Cur_File_Idx : File_Index;
- -- Index in SPARK_File_Table of the current file
-
- Cur_Scope_Idx : Scope_Index;
- -- Index in SPARK_Scope_Table of the current scope
-
- Name_Str : String (1 .. 32768);
- Name_Len : Natural := 0;
- -- Local string used to store name of File/entity scanned as
- -- Name_Str (1 .. Name_Len).
-
- File_Name : String_Ptr;
- Unit_File_Name : String_Ptr;
-
- -----------------------
- -- Local Subprograms --
- -----------------------
-
- function At_EOL return Boolean;
- -- Skips any spaces, then checks if at the end of a line. If so, returns
- -- True (but does not skip the EOL sequence). If not, then returns False.
-
- procedure Check (C : Character);
- -- Checks that file is positioned at given character, and if so skips past
- -- it, If not, raises Data_Error.
-
- function Get_Nat return Nat;
- -- On entry the file is positioned to a digit. On return, the file is
- -- positioned past the last digit, and the returned result is the decimal
- -- value read. Data_Error is raised for overflow (value greater than
- -- Int'Last), or if the initial character is not a digit.
-
- procedure Get_Name;
- -- On entry the file is positioned to a name. On return, the file is
- -- positioned past the last character, and the name scanned is returned
- -- in Name_Str (1 .. Name_Len).
-
- procedure Skip_EOL;
- -- Called with the current character about to be read being LF or CR. Skips
- -- past CR/LF characters until either a non-CR/LF character is found, or
- -- the end of file is encountered.
-
- procedure Skip_Spaces;
- -- Skips zero or more spaces at the current position, leaving the file
- -- positioned at the first non-blank character (or Types.EOF).
-
- ------------
- -- At_EOL --
- ------------
-
- function At_EOL return Boolean is
- begin
- Skip_Spaces;
- return Nextc = CR or else Nextc = LF;
- end At_EOL;
-
- -----------
- -- Check --
- -----------
-
- procedure Check (C : Character) is
- begin
- if Nextc = C then
- Skipc;
- else
- raise Data_Error;
- end if;
- end Check;
-
- -------------
- -- Get_Nat --
- -------------
-
- function Get_Nat return Nat is
- C : Character := Nextc;
- Val : Nat := 0;
-
- begin
- if C not in '0' .. '9' then
- raise Data_Error;
- end if;
-
- -- Loop to read digits of integer value
-
- loop
- declare
- pragma Unsuppress (Overflow_Check);
- begin
- Val := Val * 10 + (Character'Pos (C) - Character'Pos ('0'));
- end;
-
- Skipc;
- C := Nextc;
-
- exit when C not in '0' .. '9';
- end loop;
-
- return Val;
-
- exception
- when Constraint_Error =>
- raise Data_Error;
- end Get_Nat;
-
- --------------
- -- Get_Name --
- --------------
-
- procedure Get_Name is
- N : Natural := 0;
-
- begin
- while Nextc > ' ' loop
- N := N + 1;
- Name_Str (N) := Getc;
- end loop;
-
- Name_Len := N;
- end Get_Name;
-
- --------------
- -- Skip_EOL --
- --------------
-
- procedure Skip_EOL is
- C : Character;
-
- begin
- loop
- Skipc;
- C := Nextc;
- exit when C /= LF and then C /= CR;
-
- if C = ' ' then
- Skip_Spaces;
- C := Nextc;
- exit when C /= LF and then C /= CR;
- end if;
- end loop;
- end Skip_EOL;
-
- -----------------
- -- Skip_Spaces --
- -----------------
-
- procedure Skip_Spaces is
- begin
- while Nextc = ' ' loop
- Skipc;
- end loop;
- end Skip_Spaces;
-
--- Start of processing for Get_SPARK_Xrefs
-
-begin
- Initialize_SPARK_Tables;
-
- Cur_File := 0;
- Cur_Scope := 0;
- Cur_File_Idx := 1;
- Cur_Scope_Idx := 0;
-
- -- Loop through lines of SPARK cross-reference information
-
- while Nextc = 'F' loop
- Skipc;
-
- C := Getc;
-
- -- Make sure first line is a File line
-
- if SPARK_File_Table.Last = 0 and then C /= 'D' then
- raise Data_Error;
- end if;
-
- -- Otherwise dispatch on type of line
-
- case C is
-
- -- Header entry for scope section
-
- when 'D' =>
-
- -- Complete previous entry if any
-
- if SPARK_File_Table.Last /= 0 then
- SPARK_File_Table.Table (SPARK_File_Table.Last).To_Scope :=
- SPARK_Scope_Table.Last;
- end if;
-
- -- Scan out dependency number and file name
-
- Skip_Spaces;
- Cur_File := Get_Nat;
- Skip_Spaces;
-
- Get_Name;
- File_Name := new String'(Name_Str (1 .. Name_Len));
- Skip_Spaces;
-
- -- Scan out unit file name when present (for subunits)
-
- if Nextc = '-' then
- Skipc;
- Check ('>');
- Skip_Spaces;
- Get_Name;
- Unit_File_Name := new String'(Name_Str (1 .. Name_Len));
-
- else
- Unit_File_Name := null;
- end if;
-
- -- Make new File table entry (will fill in To_Scope later)
-
- SPARK_File_Table.Append (
- (File_Name => File_Name,
- Unit_File_Name => Unit_File_Name,
- File_Num => Cur_File,
- From_Scope => SPARK_Scope_Table.Last + 1,
- To_Scope => 0));
-
- -- Initialize counter for scopes
-
- Cur_Scope := 1;
-
- -- Scope entry
-
- when 'S' =>
- declare
- Spec_File : Nat;
- Spec_Scope : Nat;
- Scope : Nat;
- Line : Nat;
- Col : Nat;
- Typ : Character;
-
- begin
- -- Scan out location
-
- Skip_Spaces;
- Check ('.');
- Scope := Get_Nat;
- Check (' ');
- Line := Get_Nat;
- Typ := Getc;
- Col := Get_Nat;
-
- pragma Assert (Scope = Cur_Scope);
-
- -- Scan out scope entity name
-
- Skip_Spaces;
- Get_Name;
- Skip_Spaces;
-
- if Nextc = '-' then
- Skipc;
- Check ('>');
- Skip_Spaces;
- Spec_File := Get_Nat;
- Check ('.');
- Spec_Scope := Get_Nat;
-
- else
- Spec_File := 0;
- Spec_Scope := 0;
- end if;
-
- -- Make new scope table entry (will fill in From_Xref and
- -- To_Xref later). Initial range (From_Xref .. To_Xref) is
- -- empty for scopes without entities.
-
- SPARK_Scope_Table.Append (
- (Scope_Entity => Empty,
- Scope_Name => new String'(Name_Str (1 .. Name_Len)),
- File_Num => Cur_File,
- Scope_Num => Cur_Scope,
- Spec_File_Num => Spec_File,
- Spec_Scope_Num => Spec_Scope,
- Line => Line,
- Stype => Typ,
- Col => Col,
- From_Xref => 1,
- To_Xref => 0));
- end;
-
- -- Update counter for scopes
-
- Cur_Scope := Cur_Scope + 1;
-
- -- Header entry for cross-ref section
-
- when 'X' =>
-
- -- Scan out dependency number and file name (ignored)
-
- Skip_Spaces;
- Cur_File := Get_Nat;
- Skip_Spaces;
- Get_Name;
-
- -- Update component From_Xref of current file if first reference
- -- in this file.
-
- while SPARK_File_Table.Table (Cur_File_Idx).File_Num /= Cur_File
- loop
- Cur_File_Idx := Cur_File_Idx + 1;
- end loop;
-
- -- Scan out scope entity number and entity name (ignored)
-
- Skip_Spaces;
- Check ('.');
- Cur_Scope := Get_Nat;
- Skip_Spaces;
- Get_Name;
-
- -- Update component To_Xref of previous scope
-
- if Cur_Scope_Idx /= 0 then
- SPARK_Scope_Table.Table (Cur_Scope_Idx).To_Xref :=
- SPARK_Xref_Table.Last;
- end if;
-
- -- Update component From_Xref of current scope
-
- Cur_Scope_Idx := SPARK_File_Table.Table (Cur_File_Idx).From_Scope;
-
- while SPARK_Scope_Table.Table (Cur_Scope_Idx).Scope_Num /=
- Cur_Scope
- loop
- Cur_Scope_Idx := Cur_Scope_Idx + 1;
- end loop;
-
- SPARK_Scope_Table.Table (Cur_Scope_Idx).From_Xref :=
- SPARK_Xref_Table.Last + 1;
-
- -- Cross reference entry
-
- when ' ' =>
- declare
- XR_Entity : String_Ptr;
- XR_Entity_Line : Nat;
- XR_Entity_Col : Nat;
- XR_Entity_Typ : Character;
-
- XR_File : Nat;
- -- Keeps track of the current file (changed by nn|)
-
- XR_Scope : Nat;
- -- Keeps track of the current scope (changed by nn:)
-
- begin
- XR_File := Cur_File;
- XR_Scope := Cur_Scope;
-
- XR_Entity_Line := Get_Nat;
- XR_Entity_Typ := Getc;
- XR_Entity_Col := Get_Nat;
-
- Skip_Spaces;
- Get_Name;
- XR_Entity := new String'(Name_Str (1 .. Name_Len));
-
- -- Initialize to scan items on one line
-
- Skip_Spaces;
-
- -- Loop through cross-references for this entity
-
- loop
- declare
- Line : Nat;
- Col : Nat;
- N : Nat;
- Rtype : Character;
-
- begin
- Skip_Spaces;
-
- if At_EOL then
- Skip_EOL;
- exit when Nextc /= '.';
- Skipc;
- Skip_Spaces;
- end if;
-
- if Nextc = '.' then
- Skipc;
- XR_Scope := Get_Nat;
- Check (':');
-
- else
- N := Get_Nat;
-
- if Nextc = '|' then
- XR_File := N;
- Skipc;
-
- else
- Line := N;
- Rtype := Getc;
- Col := Get_Nat;
-
- pragma Assert
- (Rtype = 'r' or else
- Rtype = 'c' or else
- Rtype = 'm' or else
- Rtype = 's');
-
- SPARK_Xref_Table.Append (
- (Entity_Name => XR_Entity,
- Entity_Line => XR_Entity_Line,
- Etype => XR_Entity_Typ,
- Entity_Col => XR_Entity_Col,
- File_Num => XR_File,
- Scope_Num => XR_Scope,
- Line => Line,
- Rtype => Rtype,
- Col => Col));
- end if;
- end if;
- end;
- end loop;
- end;
-
- -- No other SPARK lines are possible
-
- when others =>
- raise Data_Error;
- end case;
-
- -- For cross reference lines, the EOL character has been skipped already
-
- if C /= ' ' then
- Skip_EOL;
- end if;
- end loop;
-
- -- Here with all Xrefs stored, complete last entries in File/Scope tables
-
- if SPARK_File_Table.Last /= 0 then
- SPARK_File_Table.Table (SPARK_File_Table.Last).To_Scope :=
- SPARK_Scope_Table.Last;
- end if;
-
- if Cur_Scope_Idx /= 0 then
- SPARK_Scope_Table.Table (Cur_Scope_Idx).To_Xref := SPARK_Xref_Table.Last;
- end if;
-end Get_SPARK_Xrefs;
diff --git a/gcc/ada/get_spark_xrefs.ads b/gcc/ada/get_spark_xrefs.ads
deleted file mode 100644
index 22af7edccc2..00000000000
--- a/gcc/ada/get_spark_xrefs.ads
+++ /dev/null
@@ -1,60 +0,0 @@
-------------------------------------------------------------------------------
--- --
--- GNAT COMPILER COMPONENTS --
--- --
--- G E T _ S P A R K _ X R E F S --
--- --
--- S p e c --
--- --
--- Copyright (C) 2011-2013, Free Software Foundation, Inc. --
--- --
--- GNAT is free software; you can redistribute it and/or modify it under --
--- terms of the GNU General Public License as published by the Free Soft- --
--- ware Foundation; either version 3, or (at your option) any later ver- --
--- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
--- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
--- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --
--- for more details. You should have received a copy of the GNU General --
--- Public License distributed with GNAT; see file COPYING3. If not, go to --
--- http://www.gnu.org/licenses for a complete copy of the license. --
--- --
--- GNAT was originally developed by the GNAT team at New York University. --
--- Extensive contributions were provided by Ada Core Technologies Inc. --
--- --
-------------------------------------------------------------------------------
-
--- This package contains the function used to read SPARK cross-reference
--- information from an ALI file and populate the tables defined in package
--- SPARK_Xrefs with the result.
-
-generic
- -- These subprograms provide access to the ALI file. Locating, opening and
- -- providing access to the ALI file is the callers' responsibility.
-
- with function Getc return Character is <>;
- -- Get next character, positioning the ALI file ready to read the following
- -- character (equivalent to calling Nextc, then Skipc). If the end of file
- -- is encountered, the value Types.EOF is returned.
-
- with function Nextc return Character is <>;
- -- Look at the next character, and return it, leaving the position of the
- -- file unchanged, so that a subsequent call to Getc or Nextc will return
- -- this same character. If the file is positioned at the end of file, then
- -- Types.EOF is returned.
-
- with procedure Skipc is <>;
- -- Skip past the current character (which typically was read with Nextc),
- -- and position to the next character, which will be returned by the next
- -- call to Getc or Nextc.
-
-procedure Get_SPARK_Xrefs;
--- Load SPARK cross-reference information from ALI file text format into
--- internal SPARK tables (SPARK_Xrefs.SPARK_Xref_Table,
--- SPARK_Xrefs.SPARK_Scope_Table and SPARK_Xrefs.SPARK_File_Table). On entry
--- the input file is positioned to the initial 'F' of the first SPARK specific
--- line in the ALI file. On return, the file is positioned either to the end
--- of file, or to the first character of the line following the SPARK specific
--- information (which will never start with an 'F').
---
--- If a format error is detected in the input, then an exception is raised
--- (Ada.IO_Exceptions.Data_Error), with the file positioned to the error.
diff --git a/gcc/ada/gnat1drv.adb b/gcc/ada/gnat1drv.adb
index 4bf910bca3e..3e4234bcbd5 100644
--- a/gcc/ada/gnat1drv.adb
+++ b/gcc/ada/gnat1drv.adb
@@ -383,6 +383,15 @@ procedure Gnat1drv is
Relaxed_RM_Semantics := True;
+ if not Generate_CodePeer_Messages then
+
+ -- Suppress compiler warnings by default when generating SCIL for
+ -- CodePeer, except when combined with -gnateC where we do want to
+ -- emit GNAT warnings.
+
+ Warning_Mode := Suppress;
+ end if;
+
-- Disable all simple value propagation. This is an optimization
-- which is valuable for code optimization, and also for generation
-- of compiler warnings, but these are being turned off by default,
@@ -581,7 +590,7 @@ procedure Gnat1drv is
-- problems with subtypes of type Ada.Tags.Dispatch_Table_Wrapper. ???
if Debug_Flag_Dot_T then
- Static_Dispatch_Tables := False;
+ Building_Static_Dispatch_Tables := False;
end if;
-- Flip endian mode if -gnatd8 set
@@ -1180,6 +1189,7 @@ begin
if Compilation_Errors then
Treepr.Tree_Dump;
Post_Compilation_Validation_Checks;
+ Errout.Finalize (Last_Call => True);
Errout.Output_Messages;
Namet.Finalize;
@@ -1190,7 +1200,6 @@ begin
Tree_Gen;
end if;
- Errout.Finalize (Last_Call => True);
Exit_Program (E_Errors);
end if;
diff --git a/gcc/ada/gnat_rm.texi b/gcc/ada/gnat_rm.texi
index b042e2be3e1..0a2b151dffa 100644
--- a/gcc/ada/gnat_rm.texi
+++ b/gcc/ada/gnat_rm.texi
@@ -21,7 +21,7 @@
@copying
@quotation
-GNAT Reference Manual , Oct 14, 2017
+GNAT Reference Manual , Nov 09, 2017
AdaCore
@@ -535,6 +535,7 @@ Program Unit Level Restrictions
* No_Implicit_Loops::
* No_Obsolescent_Features::
* No_Wide_Characters::
+* Static_Dispatch_Tables::
* SPARK_05::
Implementation Advice
@@ -12917,6 +12918,7 @@ other compilation units in the partition.
* No_Implicit_Loops::
* No_Obsolescent_Features::
* No_Wide_Characters::
+* Static_Dispatch_Tables::
* SPARK_05::
@end menu
@@ -13118,7 +13120,7 @@ is set in the spec of a package, it will not apply to its body.
[RM 13.12.1] This restriction checks at compile time that no obsolescent
features are used, as defined in Annex J of the Ada Reference Manual.
-@node No_Wide_Characters,SPARK_05,No_Obsolescent_Features,Program Unit Level Restrictions
+@node No_Wide_Characters,Static_Dispatch_Tables,No_Obsolescent_Features,Program Unit Level Restrictions
@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-wide-characters}@anchor{209}
@subsection No_Wide_Characters
@@ -13132,8 +13134,18 @@ appear, and that no wide or wide wide string or character literals
appear in the program (that is literals representing characters not in
type @code{Character}).
-@node SPARK_05,,No_Wide_Characters,Program Unit Level Restrictions
-@anchor{gnat_rm/standard_and_implementation_defined_restrictions spark-05}@anchor{20a}
+@node Static_Dispatch_Tables,SPARK_05,No_Wide_Characters,Program Unit Level Restrictions
+@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-dispatch-tables}@anchor{20a}
+@subsection Static_Dispatch_Tables
+
+
+@geindex Static_Dispatch_Tables
+
+[GNAT] This restriction checks at compile time that all the artifacts
+associated with dispatch tables can be placed in read-only memory.
+
+@node SPARK_05,,Static_Dispatch_Tables,Program Unit Level Restrictions
+@anchor{gnat_rm/standard_and_implementation_defined_restrictions spark-05}@anchor{20b}
@subsection SPARK_05
@@ -13492,7 +13504,7 @@ violations will be reported for constructs forbidden in SPARK 95,
instead of SPARK 2005.
@node Implementation Advice,Implementation Defined Characteristics,Standard and Implementation Defined Restrictions,Top
-@anchor{gnat_rm/implementation_advice doc}@anchor{20b}@anchor{gnat_rm/implementation_advice implementation-advice}@anchor{a}@anchor{gnat_rm/implementation_advice id1}@anchor{20c}
+@anchor{gnat_rm/implementation_advice doc}@anchor{20c}@anchor{gnat_rm/implementation_advice implementation-advice}@anchor{a}@anchor{gnat_rm/implementation_advice id1}@anchor{20d}
@chapter Implementation Advice
@@ -13589,7 +13601,7 @@ case the text describes what GNAT does and why.
@end menu
@node RM 1 1 3 20 Error Detection,RM 1 1 3 31 Child Units,,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-1-1-3-20-error-detection}@anchor{20d}
+@anchor{gnat_rm/implementation_advice rm-1-1-3-20-error-detection}@anchor{20e}
@section RM 1.1.3(20): Error Detection
@@ -13606,7 +13618,7 @@ or diagnosed at compile time.
@geindex Child Units
@node RM 1 1 3 31 Child Units,RM 1 1 5 12 Bounded Errors,RM 1 1 3 20 Error Detection,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-1-1-3-31-child-units}@anchor{20e}
+@anchor{gnat_rm/implementation_advice rm-1-1-3-31-child-units}@anchor{20f}
@section RM 1.1.3(31): Child Units
@@ -13622,7 +13634,7 @@ Followed.
@geindex Bounded errors
@node RM 1 1 5 12 Bounded Errors,RM 2 8 16 Pragmas,RM 1 1 3 31 Child Units,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-1-1-5-12-bounded-errors}@anchor{20f}
+@anchor{gnat_rm/implementation_advice rm-1-1-5-12-bounded-errors}@anchor{210}
@section RM 1.1.5(12): Bounded Errors
@@ -13639,7 +13651,7 @@ runtime.
@geindex Pragmas
@node RM 2 8 16 Pragmas,RM 2 8 17-19 Pragmas,RM 1 1 5 12 Bounded Errors,Implementation Advice
-@anchor{gnat_rm/implementation_advice id2}@anchor{210}@anchor{gnat_rm/implementation_advice rm-2-8-16-pragmas}@anchor{211}
+@anchor{gnat_rm/implementation_advice id2}@anchor{211}@anchor{gnat_rm/implementation_advice rm-2-8-16-pragmas}@anchor{212}
@section RM 2.8(16): Pragmas
@@ -13752,7 +13764,7 @@ that this advice not be followed. For details see
@ref{7,,Implementation Defined Pragmas}.
@node RM 2 8 17-19 Pragmas,RM 3 5 2 5 Alternative Character Sets,RM 2 8 16 Pragmas,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-2-8-17-19-pragmas}@anchor{212}
+@anchor{gnat_rm/implementation_advice rm-2-8-17-19-pragmas}@anchor{213}
@section RM 2.8(17-19): Pragmas
@@ -13773,14 +13785,14 @@ replacing @code{library_items}."
@end itemize
@end quotation
-See @ref{211,,RM 2.8(16); Pragmas}.
+See @ref{212,,RM 2.8(16); Pragmas}.
@geindex Character Sets
@geindex Alternative Character Sets
@node RM 3 5 2 5 Alternative Character Sets,RM 3 5 4 28 Integer Types,RM 2 8 17-19 Pragmas,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-3-5-2-5-alternative-character-sets}@anchor{213}
+@anchor{gnat_rm/implementation_advice rm-3-5-2-5-alternative-character-sets}@anchor{214}
@section RM 3.5.2(5): Alternative Character Sets
@@ -13808,7 +13820,7 @@ there is no such restriction.
@geindex Integer types
@node RM 3 5 4 28 Integer Types,RM 3 5 4 29 Integer Types,RM 3 5 2 5 Alternative Character Sets,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-3-5-4-28-integer-types}@anchor{214}
+@anchor{gnat_rm/implementation_advice rm-3-5-4-28-integer-types}@anchor{215}
@section RM 3.5.4(28): Integer Types
@@ -13827,7 +13839,7 @@ are supported for convenient interface to C, and so that all hardware
types of the machine are easily available.
@node RM 3 5 4 29 Integer Types,RM 3 5 5 8 Enumeration Values,RM 3 5 4 28 Integer Types,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-3-5-4-29-integer-types}@anchor{215}
+@anchor{gnat_rm/implementation_advice rm-3-5-4-29-integer-types}@anchor{216}
@section RM 3.5.4(29): Integer Types
@@ -13843,7 +13855,7 @@ Followed.
@geindex Enumeration values
@node RM 3 5 5 8 Enumeration Values,RM 3 5 7 17 Float Types,RM 3 5 4 29 Integer Types,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-3-5-5-8-enumeration-values}@anchor{216}
+@anchor{gnat_rm/implementation_advice rm-3-5-5-8-enumeration-values}@anchor{217}
@section RM 3.5.5(8): Enumeration Values
@@ -13863,7 +13875,7 @@ Followed.
@geindex Float types
@node RM 3 5 7 17 Float Types,RM 3 6 2 11 Multidimensional Arrays,RM 3 5 5 8 Enumeration Values,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-3-5-7-17-float-types}@anchor{217}
+@anchor{gnat_rm/implementation_advice rm-3-5-7-17-float-types}@anchor{218}
@section RM 3.5.7(17): Float Types
@@ -13893,7 +13905,7 @@ since this is a software rather than a hardware format.
@geindex multidimensional
@node RM 3 6 2 11 Multidimensional Arrays,RM 9 6 30-31 Duration'Small,RM 3 5 7 17 Float Types,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-3-6-2-11-multidimensional-arrays}@anchor{218}
+@anchor{gnat_rm/implementation_advice rm-3-6-2-11-multidimensional-arrays}@anchor{219}
@section RM 3.6.2(11): Multidimensional Arrays
@@ -13911,7 +13923,7 @@ Followed.
@geindex Duration'Small
@node RM 9 6 30-31 Duration'Small,RM 10 2 1 12 Consistent Representation,RM 3 6 2 11 Multidimensional Arrays,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-9-6-30-31-duration-small}@anchor{219}
+@anchor{gnat_rm/implementation_advice rm-9-6-30-31-duration-small}@anchor{21a}
@section RM 9.6(30-31): Duration'Small
@@ -13932,7 +13944,7 @@ it need not be the same time base as used for @code{Calendar.Clock}."
Followed.
@node RM 10 2 1 12 Consistent Representation,RM 11 4 1 19 Exception Information,RM 9 6 30-31 Duration'Small,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-10-2-1-12-consistent-representation}@anchor{21a}
+@anchor{gnat_rm/implementation_advice rm-10-2-1-12-consistent-representation}@anchor{21b}
@section RM 10.2.1(12): Consistent Representation
@@ -13954,7 +13966,7 @@ advice without severely impacting efficiency of execution.
@geindex Exception information
@node RM 11 4 1 19 Exception Information,RM 11 5 28 Suppression of Checks,RM 10 2 1 12 Consistent Representation,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-11-4-1-19-exception-information}@anchor{21b}
+@anchor{gnat_rm/implementation_advice rm-11-4-1-19-exception-information}@anchor{21c}
@section RM 11.4.1(19): Exception Information
@@ -13985,7 +13997,7 @@ Pragma @code{Discard_Names}.
@geindex suppression of
@node RM 11 5 28 Suppression of Checks,RM 13 1 21-24 Representation Clauses,RM 11 4 1 19 Exception Information,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-11-5-28-suppression-of-checks}@anchor{21c}
+@anchor{gnat_rm/implementation_advice rm-11-5-28-suppression-of-checks}@anchor{21d}
@section RM 11.5(28): Suppression of Checks
@@ -14000,7 +14012,7 @@ Followed.
@geindex Representation clauses
@node RM 13 1 21-24 Representation Clauses,RM 13 2 6-8 Packed Types,RM 11 5 28 Suppression of Checks,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-1-21-24-representation-clauses}@anchor{21d}
+@anchor{gnat_rm/implementation_advice rm-13-1-21-24-representation-clauses}@anchor{21e}
@section RM 13.1 (21-24): Representation Clauses
@@ -14049,7 +14061,7 @@ Followed.
@geindex Packed types
@node RM 13 2 6-8 Packed Types,RM 13 3 14-19 Address Clauses,RM 13 1 21-24 Representation Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-2-6-8-packed-types}@anchor{21e}
+@anchor{gnat_rm/implementation_advice rm-13-2-6-8-packed-types}@anchor{21f}
@section RM 13.2(6-8): Packed Types
@@ -14088,7 +14100,7 @@ Followed.
@geindex Address clauses
@node RM 13 3 14-19 Address Clauses,RM 13 3 29-35 Alignment Clauses,RM 13 2 6-8 Packed Types,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-3-14-19-address-clauses}@anchor{21f}
+@anchor{gnat_rm/implementation_advice rm-13-3-14-19-address-clauses}@anchor{220}
@section RM 13.3(14-19): Address Clauses
@@ -14141,7 +14153,7 @@ Followed.
@geindex Alignment clauses
@node RM 13 3 29-35 Alignment Clauses,RM 13 3 42-43 Size Clauses,RM 13 3 14-19 Address Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-3-29-35-alignment-clauses}@anchor{220}
+@anchor{gnat_rm/implementation_advice rm-13-3-29-35-alignment-clauses}@anchor{221}
@section RM 13.3(29-35): Alignment Clauses
@@ -14198,7 +14210,7 @@ Followed.
@geindex Size clauses
@node RM 13 3 42-43 Size Clauses,RM 13 3 50-56 Size Clauses,RM 13 3 29-35 Alignment Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-3-42-43-size-clauses}@anchor{221}
+@anchor{gnat_rm/implementation_advice rm-13-3-42-43-size-clauses}@anchor{222}
@section RM 13.3(42-43): Size Clauses
@@ -14216,7 +14228,7 @@ object's @code{Alignment} (if the @code{Alignment} is nonzero)."
Followed.
@node RM 13 3 50-56 Size Clauses,RM 13 3 71-73 Component Size Clauses,RM 13 3 42-43 Size Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-3-50-56-size-clauses}@anchor{222}
+@anchor{gnat_rm/implementation_advice rm-13-3-50-56-size-clauses}@anchor{223}
@section RM 13.3(50-56): Size Clauses
@@ -14267,7 +14279,7 @@ Followed.
@geindex Component_Size clauses
@node RM 13 3 71-73 Component Size Clauses,RM 13 4 9-10 Enumeration Representation Clauses,RM 13 3 50-56 Size Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-3-71-73-component-size-clauses}@anchor{223}
+@anchor{gnat_rm/implementation_advice rm-13-3-71-73-component-size-clauses}@anchor{224}
@section RM 13.3(71-73): Component Size Clauses
@@ -14301,7 +14313,7 @@ Followed.
@geindex enumeration
@node RM 13 4 9-10 Enumeration Representation Clauses,RM 13 5 1 17-22 Record Representation Clauses,RM 13 3 71-73 Component Size Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-4-9-10-enumeration-representation-clauses}@anchor{224}
+@anchor{gnat_rm/implementation_advice rm-13-4-9-10-enumeration-representation-clauses}@anchor{225}
@section RM 13.4(9-10): Enumeration Representation Clauses
@@ -14323,7 +14335,7 @@ Followed.
@geindex records
@node RM 13 5 1 17-22 Record Representation Clauses,RM 13 5 2 5 Storage Place Attributes,RM 13 4 9-10 Enumeration Representation Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-5-1-17-22-record-representation-clauses}@anchor{225}
+@anchor{gnat_rm/implementation_advice rm-13-5-1-17-22-record-representation-clauses}@anchor{226}
@section RM 13.5.1(17-22): Record Representation Clauses
@@ -14383,7 +14395,7 @@ and all mentioned features are implemented.
@geindex Storage place attributes
@node RM 13 5 2 5 Storage Place Attributes,RM 13 5 3 7-8 Bit Ordering,RM 13 5 1 17-22 Record Representation Clauses,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-5-2-5-storage-place-attributes}@anchor{226}
+@anchor{gnat_rm/implementation_advice rm-13-5-2-5-storage-place-attributes}@anchor{227}
@section RM 13.5.2(5): Storage Place Attributes
@@ -14403,7 +14415,7 @@ Followed. There are no such components in GNAT.
@geindex Bit ordering
@node RM 13 5 3 7-8 Bit Ordering,RM 13 7 37 Address as Private,RM 13 5 2 5 Storage Place Attributes,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-5-3-7-8-bit-ordering}@anchor{227}
+@anchor{gnat_rm/implementation_advice rm-13-5-3-7-8-bit-ordering}@anchor{228}
@section RM 13.5.3(7-8): Bit Ordering
@@ -14423,7 +14435,7 @@ Thus non-default bit ordering is not supported.
@geindex as private type
@node RM 13 7 37 Address as Private,RM 13 7 1 16 Address Operations,RM 13 5 3 7-8 Bit Ordering,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-7-37-address-as-private}@anchor{228}
+@anchor{gnat_rm/implementation_advice rm-13-7-37-address-as-private}@anchor{229}
@section RM 13.7(37): Address as Private
@@ -14441,7 +14453,7 @@ Followed.
@geindex operations of
@node RM 13 7 1 16 Address Operations,RM 13 9 14-17 Unchecked Conversion,RM 13 7 37 Address as Private,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-7-1-16-address-operations}@anchor{229}
+@anchor{gnat_rm/implementation_advice rm-13-7-1-16-address-operations}@anchor{22a}
@section RM 13.7.1(16): Address Operations
@@ -14459,7 +14471,7 @@ operation raises @code{Program_Error}, since all operations make sense.
@geindex Unchecked conversion
@node RM 13 9 14-17 Unchecked Conversion,RM 13 11 23-25 Implicit Heap Usage,RM 13 7 1 16 Address Operations,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-9-14-17-unchecked-conversion}@anchor{22a}
+@anchor{gnat_rm/implementation_advice rm-13-9-14-17-unchecked-conversion}@anchor{22b}
@section RM 13.9(14-17): Unchecked Conversion
@@ -14503,7 +14515,7 @@ Followed.
@geindex implicit
@node RM 13 11 23-25 Implicit Heap Usage,RM 13 11 2 17 Unchecked Deallocation,RM 13 9 14-17 Unchecked Conversion,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-11-23-25-implicit-heap-usage}@anchor{22b}
+@anchor{gnat_rm/implementation_advice rm-13-11-23-25-implicit-heap-usage}@anchor{22c}
@section RM 13.11(23-25): Implicit Heap Usage
@@ -14554,7 +14566,7 @@ Followed.
@geindex Unchecked deallocation
@node RM 13 11 2 17 Unchecked Deallocation,RM 13 13 2 17 Stream Oriented Attributes,RM 13 11 23-25 Implicit Heap Usage,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-11-2-17-unchecked-deallocation}@anchor{22c}
+@anchor{gnat_rm/implementation_advice rm-13-11-2-17-unchecked-deallocation}@anchor{22d}
@section RM 13.11.2(17): Unchecked Deallocation
@@ -14569,7 +14581,7 @@ Followed.
@geindex Stream oriented attributes
@node RM 13 13 2 17 Stream Oriented Attributes,RM A 1 52 Names of Predefined Numeric Types,RM 13 11 2 17 Unchecked Deallocation,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-13-13-2-17-stream-oriented-attributes}@anchor{22d}
+@anchor{gnat_rm/implementation_advice rm-13-13-2-17-stream-oriented-attributes}@anchor{22e}
@section RM 13.13.2(17): Stream Oriented Attributes
@@ -14624,7 +14636,7 @@ the @emph{GNAT and Libraries} section of the @cite{GNAT User's Guide}.
@end itemize
@node RM A 1 52 Names of Predefined Numeric Types,RM A 3 2 49 Ada Characters Handling,RM 13 13 2 17 Stream Oriented Attributes,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-a-1-52-names-of-predefined-numeric-types}@anchor{22e}
+@anchor{gnat_rm/implementation_advice rm-a-1-52-names-of-predefined-numeric-types}@anchor{22f}
@section RM A.1(52): Names of Predefined Numeric Types
@@ -14642,7 +14654,7 @@ Followed.
@geindex Ada.Characters.Handling
@node RM A 3 2 49 Ada Characters Handling,RM A 4 4 106 Bounded-Length String Handling,RM A 1 52 Names of Predefined Numeric Types,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-a-3-2-49-ada-characters-handling}@anchor{22f}
+@anchor{gnat_rm/implementation_advice rm-a-3-2-49-ada-characters-handling}@anchor{230}
@section RM A.3.2(49): @code{Ada.Characters.Handling}
@@ -14659,7 +14671,7 @@ Followed. GNAT provides no such localized definitions.
@geindex Bounded-length strings
@node RM A 4 4 106 Bounded-Length String Handling,RM A 5 2 46-47 Random Number Generation,RM A 3 2 49 Ada Characters Handling,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-a-4-4-106-bounded-length-string-handling}@anchor{230}
+@anchor{gnat_rm/implementation_advice rm-a-4-4-106-bounded-length-string-handling}@anchor{231}
@section RM A.4.4(106): Bounded-Length String Handling
@@ -14674,7 +14686,7 @@ Followed. No implicit pointers or dynamic allocation are used.
@geindex Random number generation
@node RM A 5 2 46-47 Random Number Generation,RM A 10 7 23 Get_Immediate,RM A 4 4 106 Bounded-Length String Handling,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-a-5-2-46-47-random-number-generation}@anchor{231}
+@anchor{gnat_rm/implementation_advice rm-a-5-2-46-47-random-number-generation}@anchor{232}
@section RM A.5.2(46-47): Random Number Generation
@@ -14703,7 +14715,7 @@ condition here to hold true.
@geindex Get_Immediate
@node RM A 10 7 23 Get_Immediate,RM B 1 39-41 Pragma Export,RM A 5 2 46-47 Random Number Generation,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-a-10-7-23-get-immediate}@anchor{232}
+@anchor{gnat_rm/implementation_advice rm-a-10-7-23-get-immediate}@anchor{233}
@section RM A.10.7(23): @code{Get_Immediate}
@@ -14727,7 +14739,7 @@ this functionality.
@geindex Export
@node RM B 1 39-41 Pragma Export,RM B 2 12-13 Package Interfaces,RM A 10 7 23 Get_Immediate,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-b-1-39-41-pragma-export}@anchor{233}
+@anchor{gnat_rm/implementation_advice rm-b-1-39-41-pragma-export}@anchor{234}
@section RM B.1(39-41): Pragma @code{Export}
@@ -14775,7 +14787,7 @@ Followed.
@geindex Interfaces
@node RM B 2 12-13 Package Interfaces,RM B 3 63-71 Interfacing with C,RM B 1 39-41 Pragma Export,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-b-2-12-13-package-interfaces}@anchor{234}
+@anchor{gnat_rm/implementation_advice rm-b-2-12-13-package-interfaces}@anchor{235}
@section RM B.2(12-13): Package @code{Interfaces}
@@ -14805,7 +14817,7 @@ Followed. GNAT provides all the packages described in this section.
@geindex interfacing with
@node RM B 3 63-71 Interfacing with C,RM B 4 95-98 Interfacing with COBOL,RM B 2 12-13 Package Interfaces,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-b-3-63-71-interfacing-with-c}@anchor{235}
+@anchor{gnat_rm/implementation_advice rm-b-3-63-71-interfacing-with-c}@anchor{236}
@section RM B.3(63-71): Interfacing with C
@@ -14893,7 +14905,7 @@ Followed.
@geindex interfacing with
@node RM B 4 95-98 Interfacing with COBOL,RM B 5 22-26 Interfacing with Fortran,RM B 3 63-71 Interfacing with C,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-b-4-95-98-interfacing-with-cobol}@anchor{236}
+@anchor{gnat_rm/implementation_advice rm-b-4-95-98-interfacing-with-cobol}@anchor{237}
@section RM B.4(95-98): Interfacing with COBOL
@@ -14934,7 +14946,7 @@ Followed.
@geindex interfacing with
@node RM B 5 22-26 Interfacing with Fortran,RM C 1 3-5 Access to Machine Operations,RM B 4 95-98 Interfacing with COBOL,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-b-5-22-26-interfacing-with-fortran}@anchor{237}
+@anchor{gnat_rm/implementation_advice rm-b-5-22-26-interfacing-with-fortran}@anchor{238}
@section RM B.5(22-26): Interfacing with Fortran
@@ -14985,7 +14997,7 @@ Followed.
@geindex Machine operations
@node RM C 1 3-5 Access to Machine Operations,RM C 1 10-16 Access to Machine Operations,RM B 5 22-26 Interfacing with Fortran,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-1-3-5-access-to-machine-operations}@anchor{238}
+@anchor{gnat_rm/implementation_advice rm-c-1-3-5-access-to-machine-operations}@anchor{239}
@section RM C.1(3-5): Access to Machine Operations
@@ -15020,7 +15032,7 @@ object that is specified as exported."
Followed.
@node RM C 1 10-16 Access to Machine Operations,RM C 3 28 Interrupt Support,RM C 1 3-5 Access to Machine Operations,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-1-10-16-access-to-machine-operations}@anchor{239}
+@anchor{gnat_rm/implementation_advice rm-c-1-10-16-access-to-machine-operations}@anchor{23a}
@section RM C.1(10-16): Access to Machine Operations
@@ -15081,7 +15093,7 @@ Followed on any target supporting such operations.
@geindex Interrupt support
@node RM C 3 28 Interrupt Support,RM C 3 1 20-21 Protected Procedure Handlers,RM C 1 10-16 Access to Machine Operations,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-3-28-interrupt-support}@anchor{23a}
+@anchor{gnat_rm/implementation_advice rm-c-3-28-interrupt-support}@anchor{23b}
@section RM C.3(28): Interrupt Support
@@ -15099,7 +15111,7 @@ of interrupt blocking.
@geindex Protected procedure handlers
@node RM C 3 1 20-21 Protected Procedure Handlers,RM C 3 2 25 Package Interrupts,RM C 3 28 Interrupt Support,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-3-1-20-21-protected-procedure-handlers}@anchor{23b}
+@anchor{gnat_rm/implementation_advice rm-c-3-1-20-21-protected-procedure-handlers}@anchor{23c}
@section RM C.3.1(20-21): Protected Procedure Handlers
@@ -15125,7 +15137,7 @@ Followed. Compile time warnings are given when possible.
@geindex Interrupts
@node RM C 3 2 25 Package Interrupts,RM C 4 14 Pre-elaboration Requirements,RM C 3 1 20-21 Protected Procedure Handlers,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-3-2-25-package-interrupts}@anchor{23c}
+@anchor{gnat_rm/implementation_advice rm-c-3-2-25-package-interrupts}@anchor{23d}
@section RM C.3.2(25): Package @code{Interrupts}
@@ -15143,7 +15155,7 @@ Followed.
@geindex Pre-elaboration requirements
@node RM C 4 14 Pre-elaboration Requirements,RM C 5 8 Pragma Discard_Names,RM C 3 2 25 Package Interrupts,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-4-14-pre-elaboration-requirements}@anchor{23d}
+@anchor{gnat_rm/implementation_advice rm-c-4-14-pre-elaboration-requirements}@anchor{23e}
@section RM C.4(14): Pre-elaboration Requirements
@@ -15159,7 +15171,7 @@ Followed. Executable code is generated in some cases, e.g., loops
to initialize large arrays.
@node RM C 5 8 Pragma Discard_Names,RM C 7 2 30 The Package Task_Attributes,RM C 4 14 Pre-elaboration Requirements,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-5-8-pragma-discard-names}@anchor{23e}
+@anchor{gnat_rm/implementation_advice rm-c-5-8-pragma-discard-names}@anchor{23f}
@section RM C.5(8): Pragma @code{Discard_Names}
@@ -15177,7 +15189,7 @@ Followed.
@geindex Task_Attributes
@node RM C 7 2 30 The Package Task_Attributes,RM D 3 17 Locking Policies,RM C 5 8 Pragma Discard_Names,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-c-7-2-30-the-package-task-attributes}@anchor{23f}
+@anchor{gnat_rm/implementation_advice rm-c-7-2-30-the-package-task-attributes}@anchor{240}
@section RM C.7.2(30): The Package Task_Attributes
@@ -15198,7 +15210,7 @@ Not followed. This implementation is not targeted to such a domain.
@geindex Locking Policies
@node RM D 3 17 Locking Policies,RM D 4 16 Entry Queuing Policies,RM C 7 2 30 The Package Task_Attributes,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-d-3-17-locking-policies}@anchor{240}
+@anchor{gnat_rm/implementation_advice rm-d-3-17-locking-policies}@anchor{241}
@section RM D.3(17): Locking Policies
@@ -15215,7 +15227,7 @@ whose names (@code{Inheritance_Locking} and
@geindex Entry queuing policies
@node RM D 4 16 Entry Queuing Policies,RM D 6 9-10 Preemptive Abort,RM D 3 17 Locking Policies,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-d-4-16-entry-queuing-policies}@anchor{241}
+@anchor{gnat_rm/implementation_advice rm-d-4-16-entry-queuing-policies}@anchor{242}
@section RM D.4(16): Entry Queuing Policies
@@ -15230,7 +15242,7 @@ Followed. No such implementation-defined queuing policies exist.
@geindex Preemptive abort
@node RM D 6 9-10 Preemptive Abort,RM D 7 21 Tasking Restrictions,RM D 4 16 Entry Queuing Policies,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-d-6-9-10-preemptive-abort}@anchor{242}
+@anchor{gnat_rm/implementation_advice rm-d-6-9-10-preemptive-abort}@anchor{243}
@section RM D.6(9-10): Preemptive Abort
@@ -15256,7 +15268,7 @@ Followed.
@geindex Tasking restrictions
@node RM D 7 21 Tasking Restrictions,RM D 8 47-49 Monotonic Time,RM D 6 9-10 Preemptive Abort,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-d-7-21-tasking-restrictions}@anchor{243}
+@anchor{gnat_rm/implementation_advice rm-d-7-21-tasking-restrictions}@anchor{244}
@section RM D.7(21): Tasking Restrictions
@@ -15275,7 +15287,7 @@ pragma @code{Profile (Restricted)} for more details.
@geindex monotonic
@node RM D 8 47-49 Monotonic Time,RM E 5 28-29 Partition Communication Subsystem,RM D 7 21 Tasking Restrictions,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-d-8-47-49-monotonic-time}@anchor{244}
+@anchor{gnat_rm/implementation_advice rm-d-8-47-49-monotonic-time}@anchor{245}
@section RM D.8(47-49): Monotonic Time
@@ -15310,7 +15322,7 @@ Followed.
@geindex PCS
@node RM E 5 28-29 Partition Communication Subsystem,RM F 7 COBOL Support,RM D 8 47-49 Monotonic Time,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-e-5-28-29-partition-communication-subsystem}@anchor{245}
+@anchor{gnat_rm/implementation_advice rm-e-5-28-29-partition-communication-subsystem}@anchor{246}
@section RM E.5(28-29): Partition Communication Subsystem
@@ -15338,7 +15350,7 @@ GNAT.
@geindex COBOL support
@node RM F 7 COBOL Support,RM F 1 2 Decimal Radix Support,RM E 5 28-29 Partition Communication Subsystem,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-f-7-cobol-support}@anchor{246}
+@anchor{gnat_rm/implementation_advice rm-f-7-cobol-support}@anchor{247}
@section RM F(7): COBOL Support
@@ -15358,7 +15370,7 @@ Followed.
@geindex Decimal radix support
@node RM F 1 2 Decimal Radix Support,RM G Numerics,RM F 7 COBOL Support,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-f-1-2-decimal-radix-support}@anchor{247}
+@anchor{gnat_rm/implementation_advice rm-f-1-2-decimal-radix-support}@anchor{248}
@section RM F.1(2): Decimal Radix Support
@@ -15374,7 +15386,7 @@ representations.
@geindex Numerics
@node RM G Numerics,RM G 1 1 56-58 Complex Types,RM F 1 2 Decimal Radix Support,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-g-numerics}@anchor{248}
+@anchor{gnat_rm/implementation_advice rm-g-numerics}@anchor{249}
@section RM G: Numerics
@@ -15394,7 +15406,7 @@ Followed.
@geindex Complex types
@node RM G 1 1 56-58 Complex Types,RM G 1 2 49 Complex Elementary Functions,RM G Numerics,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-g-1-1-56-58-complex-types}@anchor{249}
+@anchor{gnat_rm/implementation_advice rm-g-1-1-56-58-complex-types}@anchor{24a}
@section RM G.1.1(56-58): Complex Types
@@ -15456,7 +15468,7 @@ Followed.
@geindex Complex elementary functions
@node RM G 1 2 49 Complex Elementary Functions,RM G 2 4 19 Accuracy Requirements,RM G 1 1 56-58 Complex Types,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-g-1-2-49-complex-elementary-functions}@anchor{24a}
+@anchor{gnat_rm/implementation_advice rm-g-1-2-49-complex-elementary-functions}@anchor{24b}
@section RM G.1.2(49): Complex Elementary Functions
@@ -15478,7 +15490,7 @@ Followed.
@geindex Accuracy requirements
@node RM G 2 4 19 Accuracy Requirements,RM G 2 6 15 Complex Arithmetic Accuracy,RM G 1 2 49 Complex Elementary Functions,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-g-2-4-19-accuracy-requirements}@anchor{24b}
+@anchor{gnat_rm/implementation_advice rm-g-2-4-19-accuracy-requirements}@anchor{24c}
@section RM G.2.4(19): Accuracy Requirements
@@ -15502,7 +15514,7 @@ Followed.
@geindex complex arithmetic
@node RM G 2 6 15 Complex Arithmetic Accuracy,RM H 6 15/2 Pragma Partition_Elaboration_Policy,RM G 2 4 19 Accuracy Requirements,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-g-2-6-15-complex-arithmetic-accuracy}@anchor{24c}
+@anchor{gnat_rm/implementation_advice rm-g-2-6-15-complex-arithmetic-accuracy}@anchor{24d}
@section RM G.2.6(15): Complex Arithmetic Accuracy
@@ -15520,7 +15532,7 @@ Followed.
@geindex Sequential elaboration policy
@node RM H 6 15/2 Pragma Partition_Elaboration_Policy,,RM G 2 6 15 Complex Arithmetic Accuracy,Implementation Advice
-@anchor{gnat_rm/implementation_advice rm-h-6-15-2-pragma-partition-elaboration-policy}@anchor{24d}
+@anchor{gnat_rm/implementation_advice rm-h-6-15-2-pragma-partition-elaboration-policy}@anchor{24e}
@section RM H.6(15/2): Pragma Partition_Elaboration_Policy
@@ -15535,7 +15547,7 @@ immediately terminated."
Not followed.
@node Implementation Defined Characteristics,Intrinsic Subprograms,Implementation Advice,Top
-@anchor{gnat_rm/implementation_defined_characteristics implementation-defined-characteristics}@anchor{b}@anchor{gnat_rm/implementation_defined_characteristics doc}@anchor{24e}@anchor{gnat_rm/implementation_defined_characteristics id1}@anchor{24f}
+@anchor{gnat_rm/implementation_defined_characteristics implementation-defined-characteristics}@anchor{b}@anchor{gnat_rm/implementation_defined_characteristics doc}@anchor{24f}@anchor{gnat_rm/implementation_defined_characteristics id1}@anchor{250}
@chapter Implementation Defined Characteristics
@@ -16731,7 +16743,7 @@ When the @code{Pattern} parameter is not the null string, it is interpreted
according to the syntax of regular expressions as defined in the
@code{GNAT.Regexp} package.
-See @ref{250,,GNAT.Regexp (g-regexp.ads)}.
+See @ref{251,,GNAT.Regexp (g-regexp.ads)}.
@itemize *
@@ -17775,7 +17787,7 @@ H.4(27)."
There are no restrictions on pragma @code{Restrictions}.
@node Intrinsic Subprograms,Representation Clauses and Pragmas,Implementation Defined Characteristics,Top
-@anchor{gnat_rm/intrinsic_subprograms doc}@anchor{251}@anchor{gnat_rm/intrinsic_subprograms intrinsic-subprograms}@anchor{c}@anchor{gnat_rm/intrinsic_subprograms id1}@anchor{252}
+@anchor{gnat_rm/intrinsic_subprograms doc}@anchor{252}@anchor{gnat_rm/intrinsic_subprograms intrinsic-subprograms}@anchor{c}@anchor{gnat_rm/intrinsic_subprograms id1}@anchor{253}
@chapter Intrinsic Subprograms
@@ -17813,7 +17825,7 @@ Ada standard does not require Ada compilers to implement this feature.
@end menu
@node Intrinsic Operators,Compilation_ISO_Date,,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms id2}@anchor{253}@anchor{gnat_rm/intrinsic_subprograms intrinsic-operators}@anchor{254}
+@anchor{gnat_rm/intrinsic_subprograms id2}@anchor{254}@anchor{gnat_rm/intrinsic_subprograms intrinsic-operators}@anchor{255}
@section Intrinsic Operators
@@ -17844,7 +17856,7 @@ It is also possible to specify such operators for private types, if the
full views are appropriate arithmetic types.
@node Compilation_ISO_Date,Compilation_Date,Intrinsic Operators,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms id3}@anchor{255}@anchor{gnat_rm/intrinsic_subprograms compilation-iso-date}@anchor{256}
+@anchor{gnat_rm/intrinsic_subprograms id3}@anchor{256}@anchor{gnat_rm/intrinsic_subprograms compilation-iso-date}@anchor{257}
@section Compilation_ISO_Date
@@ -17858,7 +17870,7 @@ application program should simply call the function
the current compilation (in local time format YYYY-MM-DD).
@node Compilation_Date,Compilation_Time,Compilation_ISO_Date,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms compilation-date}@anchor{257}@anchor{gnat_rm/intrinsic_subprograms id4}@anchor{258}
+@anchor{gnat_rm/intrinsic_subprograms compilation-date}@anchor{258}@anchor{gnat_rm/intrinsic_subprograms id4}@anchor{259}
@section Compilation_Date
@@ -17868,7 +17880,7 @@ Same as Compilation_ISO_Date, except the string is in the form
MMM DD YYYY.
@node Compilation_Time,Enclosing_Entity,Compilation_Date,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms compilation-time}@anchor{259}@anchor{gnat_rm/intrinsic_subprograms id5}@anchor{25a}
+@anchor{gnat_rm/intrinsic_subprograms compilation-time}@anchor{25a}@anchor{gnat_rm/intrinsic_subprograms id5}@anchor{25b}
@section Compilation_Time
@@ -17882,7 +17894,7 @@ application program should simply call the function
the current compilation (in local time format HH:MM:SS).
@node Enclosing_Entity,Exception_Information,Compilation_Time,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms id6}@anchor{25b}@anchor{gnat_rm/intrinsic_subprograms enclosing-entity}@anchor{25c}
+@anchor{gnat_rm/intrinsic_subprograms id6}@anchor{25c}@anchor{gnat_rm/intrinsic_subprograms enclosing-entity}@anchor{25d}
@section Enclosing_Entity
@@ -17896,7 +17908,7 @@ application program should simply call the function
the current subprogram, package, task, entry, or protected subprogram.
@node Exception_Information,Exception_Message,Enclosing_Entity,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms id7}@anchor{25d}@anchor{gnat_rm/intrinsic_subprograms exception-information}@anchor{25e}
+@anchor{gnat_rm/intrinsic_subprograms id7}@anchor{25e}@anchor{gnat_rm/intrinsic_subprograms exception-information}@anchor{25f}
@section Exception_Information
@@ -17910,7 +17922,7 @@ so an application program should simply call the function
the exception information associated with the current exception.
@node Exception_Message,Exception_Name,Exception_Information,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms exception-message}@anchor{25f}@anchor{gnat_rm/intrinsic_subprograms id8}@anchor{260}
+@anchor{gnat_rm/intrinsic_subprograms exception-message}@anchor{260}@anchor{gnat_rm/intrinsic_subprograms id8}@anchor{261}
@section Exception_Message
@@ -17924,7 +17936,7 @@ so an application program should simply call the function
the message associated with the current exception.
@node Exception_Name,File,Exception_Message,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms exception-name}@anchor{261}@anchor{gnat_rm/intrinsic_subprograms id9}@anchor{262}
+@anchor{gnat_rm/intrinsic_subprograms exception-name}@anchor{262}@anchor{gnat_rm/intrinsic_subprograms id9}@anchor{263}
@section Exception_Name
@@ -17938,7 +17950,7 @@ so an application program should simply call the function
the name of the current exception.
@node File,Line,Exception_Name,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms id10}@anchor{263}@anchor{gnat_rm/intrinsic_subprograms file}@anchor{264}
+@anchor{gnat_rm/intrinsic_subprograms id10}@anchor{264}@anchor{gnat_rm/intrinsic_subprograms file}@anchor{265}
@section File
@@ -17952,7 +17964,7 @@ application program should simply call the function
file.
@node Line,Shifts and Rotates,File,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms id11}@anchor{265}@anchor{gnat_rm/intrinsic_subprograms line}@anchor{266}
+@anchor{gnat_rm/intrinsic_subprograms id11}@anchor{266}@anchor{gnat_rm/intrinsic_subprograms line}@anchor{267}
@section Line
@@ -17966,7 +17978,7 @@ application program should simply call the function
source line.
@node Shifts and Rotates,Source_Location,Line,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms shifts-and-rotates}@anchor{267}@anchor{gnat_rm/intrinsic_subprograms id12}@anchor{268}
+@anchor{gnat_rm/intrinsic_subprograms shifts-and-rotates}@anchor{268}@anchor{gnat_rm/intrinsic_subprograms id12}@anchor{269}
@section Shifts and Rotates
@@ -18005,7 +18017,7 @@ the Provide_Shift_Operators pragma, which provides the function declarations
and corresponding pragma Import's for all five shift functions.
@node Source_Location,,Shifts and Rotates,Intrinsic Subprograms
-@anchor{gnat_rm/intrinsic_subprograms source-location}@anchor{269}@anchor{gnat_rm/intrinsic_subprograms id13}@anchor{26a}
+@anchor{gnat_rm/intrinsic_subprograms source-location}@anchor{26a}@anchor{gnat_rm/intrinsic_subprograms id13}@anchor{26b}
@section Source_Location
@@ -18019,7 +18031,7 @@ application program should simply call the function
source file location.
@node Representation Clauses and Pragmas,Standard Library Routines,Intrinsic Subprograms,Top
-@anchor{gnat_rm/representation_clauses_and_pragmas representation-clauses-and-pragmas}@anchor{d}@anchor{gnat_rm/representation_clauses_and_pragmas doc}@anchor{26b}@anchor{gnat_rm/representation_clauses_and_pragmas id1}@anchor{26c}
+@anchor{gnat_rm/representation_clauses_and_pragmas representation-clauses-and-pragmas}@anchor{d}@anchor{gnat_rm/representation_clauses_and_pragmas doc}@anchor{26c}@anchor{gnat_rm/representation_clauses_and_pragmas id1}@anchor{26d}
@chapter Representation Clauses and Pragmas
@@ -18065,7 +18077,7 @@ and this section describes the additional capabilities provided.
@end menu
@node Alignment Clauses,Size Clauses,,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id2}@anchor{26d}@anchor{gnat_rm/representation_clauses_and_pragmas alignment-clauses}@anchor{26e}
+@anchor{gnat_rm/representation_clauses_and_pragmas id2}@anchor{26e}@anchor{gnat_rm/representation_clauses_and_pragmas alignment-clauses}@anchor{26f}
@section Alignment Clauses
@@ -18194,7 +18206,7 @@ assumption is non-portable, and other compilers may choose different
alignments for the subtype @code{RS}.
@node Size Clauses,Storage_Size Clauses,Alignment Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id3}@anchor{26f}@anchor{gnat_rm/representation_clauses_and_pragmas size-clauses}@anchor{270}
+@anchor{gnat_rm/representation_clauses_and_pragmas id3}@anchor{270}@anchor{gnat_rm/representation_clauses_and_pragmas size-clauses}@anchor{271}
@section Size Clauses
@@ -18271,7 +18283,7 @@ if it is known that a Size value can be accommodated in an object of
type Integer.
@node Storage_Size Clauses,Size of Variant Record Objects,Size Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas storage-size-clauses}@anchor{271}@anchor{gnat_rm/representation_clauses_and_pragmas id4}@anchor{272}
+@anchor{gnat_rm/representation_clauses_and_pragmas storage-size-clauses}@anchor{272}@anchor{gnat_rm/representation_clauses_and_pragmas id4}@anchor{273}
@section Storage_Size Clauses
@@ -18344,7 +18356,7 @@ Of course in practice, there will not be any explicit allocators in the
case of such an access declaration.
@node Size of Variant Record Objects,Biased Representation,Storage_Size Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id5}@anchor{273}@anchor{gnat_rm/representation_clauses_and_pragmas size-of-variant-record-objects}@anchor{274}
+@anchor{gnat_rm/representation_clauses_and_pragmas id5}@anchor{274}@anchor{gnat_rm/representation_clauses_and_pragmas size-of-variant-record-objects}@anchor{275}
@section Size of Variant Record Objects
@@ -18454,7 +18466,7 @@ the maximum size, regardless of the current variant value, the
variant value.
@node Biased Representation,Value_Size and Object_Size Clauses,Size of Variant Record Objects,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id6}@anchor{275}@anchor{gnat_rm/representation_clauses_and_pragmas biased-representation}@anchor{276}
+@anchor{gnat_rm/representation_clauses_and_pragmas id6}@anchor{276}@anchor{gnat_rm/representation_clauses_and_pragmas biased-representation}@anchor{277}
@section Biased Representation
@@ -18492,7 +18504,7 @@ biased representation can be used for all discrete types except for
enumeration types for which a representation clause is given.
@node Value_Size and Object_Size Clauses,Component_Size Clauses,Biased Representation,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id7}@anchor{277}@anchor{gnat_rm/representation_clauses_and_pragmas value-size-and-object-size-clauses}@anchor{278}
+@anchor{gnat_rm/representation_clauses_and_pragmas id7}@anchor{278}@anchor{gnat_rm/representation_clauses_and_pragmas value-size-and-object-size-clauses}@anchor{279}
@section Value_Size and Object_Size Clauses
@@ -18799,7 +18811,7 @@ definition clause forces biased representation. This
warning can be turned off using @code{-gnatw.B}.
@node Component_Size Clauses,Bit_Order Clauses,Value_Size and Object_Size Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id8}@anchor{279}@anchor{gnat_rm/representation_clauses_and_pragmas component-size-clauses}@anchor{27a}
+@anchor{gnat_rm/representation_clauses_and_pragmas id8}@anchor{27a}@anchor{gnat_rm/representation_clauses_and_pragmas component-size-clauses}@anchor{27b}
@section Component_Size Clauses
@@ -18846,7 +18858,7 @@ and a pragma Pack for the same array type. if such duplicate
clauses are given, the pragma Pack will be ignored.
@node Bit_Order Clauses,Effect of Bit_Order on Byte Ordering,Component_Size Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas bit-order-clauses}@anchor{27b}@anchor{gnat_rm/representation_clauses_and_pragmas id9}@anchor{27c}
+@anchor{gnat_rm/representation_clauses_and_pragmas bit-order-clauses}@anchor{27c}@anchor{gnat_rm/representation_clauses_and_pragmas id9}@anchor{27d}
@section Bit_Order Clauses
@@ -18952,7 +18964,7 @@ if desired. The following section contains additional
details regarding the issue of byte ordering.
@node Effect of Bit_Order on Byte Ordering,Pragma Pack for Arrays,Bit_Order Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id10}@anchor{27d}@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-bit-order-on-byte-ordering}@anchor{27e}
+@anchor{gnat_rm/representation_clauses_and_pragmas id10}@anchor{27e}@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-bit-order-on-byte-ordering}@anchor{27f}
@section Effect of Bit_Order on Byte Ordering
@@ -19209,7 +19221,7 @@ to set the boolean constant @code{Master_Byte_First} in
an appropriate manner.
@node Pragma Pack for Arrays,Pragma Pack for Records,Effect of Bit_Order on Byte Ordering,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-arrays}@anchor{27f}@anchor{gnat_rm/representation_clauses_and_pragmas id11}@anchor{280}
+@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-arrays}@anchor{280}@anchor{gnat_rm/representation_clauses_and_pragmas id11}@anchor{281}
@section Pragma Pack for Arrays
@@ -19326,7 +19338,7 @@ Here 31-bit packing is achieved as required, and no warning is generated,
since in this case the programmer intention is clear.
@node Pragma Pack for Records,Record Representation Clauses,Pragma Pack for Arrays,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-records}@anchor{281}@anchor{gnat_rm/representation_clauses_and_pragmas id12}@anchor{282}
+@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-records}@anchor{282}@anchor{gnat_rm/representation_clauses_and_pragmas id12}@anchor{283}
@section Pragma Pack for Records
@@ -19411,7 +19423,7 @@ the @code{L6} field is aligned to the next byte boundary, and takes an
integral number of bytes, i.e., 72 bits.
@node Record Representation Clauses,Handling of Records with Holes,Pragma Pack for Records,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id13}@anchor{283}@anchor{gnat_rm/representation_clauses_and_pragmas record-representation-clauses}@anchor{284}
+@anchor{gnat_rm/representation_clauses_and_pragmas id13}@anchor{284}@anchor{gnat_rm/representation_clauses_and_pragmas record-representation-clauses}@anchor{285}
@section Record Representation Clauses
@@ -19489,7 +19501,7 @@ end record;
@end example
@node Handling of Records with Holes,Enumeration Clauses,Record Representation Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas handling-of-records-with-holes}@anchor{285}@anchor{gnat_rm/representation_clauses_and_pragmas id14}@anchor{286}
+@anchor{gnat_rm/representation_clauses_and_pragmas handling-of-records-with-holes}@anchor{286}@anchor{gnat_rm/representation_clauses_and_pragmas id14}@anchor{287}
@section Handling of Records with Holes
@@ -19566,7 +19578,7 @@ for Hrec'Size use 64;
@end example
@node Enumeration Clauses,Address Clauses,Handling of Records with Holes,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas enumeration-clauses}@anchor{287}@anchor{gnat_rm/representation_clauses_and_pragmas id15}@anchor{288}
+@anchor{gnat_rm/representation_clauses_and_pragmas enumeration-clauses}@anchor{288}@anchor{gnat_rm/representation_clauses_and_pragmas id15}@anchor{289}
@section Enumeration Clauses
@@ -19609,7 +19621,7 @@ the overhead of converting representation values to the corresponding
positional values, (i.e., the value delivered by the @code{Pos} attribute).
@node Address Clauses,Use of Address Clauses for Memory-Mapped I/O,Enumeration Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id16}@anchor{289}@anchor{gnat_rm/representation_clauses_and_pragmas address-clauses}@anchor{28a}
+@anchor{gnat_rm/representation_clauses_and_pragmas id16}@anchor{28a}@anchor{gnat_rm/representation_clauses_and_pragmas address-clauses}@anchor{28b}
@section Address Clauses
@@ -19938,7 +19950,7 @@ then the program compiles without the warning and when run will generate
the output @code{X was not clobbered}.
@node Use of Address Clauses for Memory-Mapped I/O,Effect of Convention on Representation,Address Clauses,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id17}@anchor{28b}@anchor{gnat_rm/representation_clauses_and_pragmas use-of-address-clauses-for-memory-mapped-i-o}@anchor{28c}
+@anchor{gnat_rm/representation_clauses_and_pragmas id17}@anchor{28c}@anchor{gnat_rm/representation_clauses_and_pragmas use-of-address-clauses-for-memory-mapped-i-o}@anchor{28d}
@section Use of Address Clauses for Memory-Mapped I/O
@@ -19996,7 +20008,7 @@ provides the pragma @code{Volatile_Full_Access} which can be used in lieu of
pragma @code{Atomic} and will give the additional guarantee.
@node Effect of Convention on Representation,Conventions and Anonymous Access Types,Use of Address Clauses for Memory-Mapped I/O,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id18}@anchor{28d}@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-convention-on-representation}@anchor{28e}
+@anchor{gnat_rm/representation_clauses_and_pragmas id18}@anchor{28e}@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-convention-on-representation}@anchor{28f}
@section Effect of Convention on Representation
@@ -20074,7 +20086,7 @@ when one of these values is read, any nonzero value is treated as True.
@end itemize
@node Conventions and Anonymous Access Types,Determining the Representations chosen by GNAT,Effect of Convention on Representation,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas conventions-and-anonymous-access-types}@anchor{28f}@anchor{gnat_rm/representation_clauses_and_pragmas id19}@anchor{290}
+@anchor{gnat_rm/representation_clauses_and_pragmas conventions-and-anonymous-access-types}@anchor{290}@anchor{gnat_rm/representation_clauses_and_pragmas id19}@anchor{291}
@section Conventions and Anonymous Access Types
@@ -20150,7 +20162,7 @@ package ConvComp is
@end example
@node Determining the Representations chosen by GNAT,,Conventions and Anonymous Access Types,Representation Clauses and Pragmas
-@anchor{gnat_rm/representation_clauses_and_pragmas id20}@anchor{291}@anchor{gnat_rm/representation_clauses_and_pragmas determining-the-representations-chosen-by-gnat}@anchor{292}
+@anchor{gnat_rm/representation_clauses_and_pragmas id20}@anchor{292}@anchor{gnat_rm/representation_clauses_and_pragmas determining-the-representations-chosen-by-gnat}@anchor{293}
@section Determining the Representations chosen by GNAT
@@ -20302,7 +20314,7 @@ generated by the compiler into the original source to fix and guarantee
the actual representation to be used.
@node Standard Library Routines,The Implementation of Standard I/O,Representation Clauses and Pragmas,Top
-@anchor{gnat_rm/standard_library_routines standard-library-routines}@anchor{e}@anchor{gnat_rm/standard_library_routines doc}@anchor{293}@anchor{gnat_rm/standard_library_routines id1}@anchor{294}
+@anchor{gnat_rm/standard_library_routines standard-library-routines}@anchor{e}@anchor{gnat_rm/standard_library_routines doc}@anchor{294}@anchor{gnat_rm/standard_library_routines id1}@anchor{295}
@chapter Standard Library Routines
@@ -21128,7 +21140,7 @@ For packages in Interfaces and System, all the RM defined packages are
available in GNAT, see the Ada 2012 RM for full details.
@node The Implementation of Standard I/O,The GNAT Library,Standard Library Routines,Top
-@anchor{gnat_rm/the_implementation_of_standard_i_o the-implementation-of-standard-i-o}@anchor{f}@anchor{gnat_rm/the_implementation_of_standard_i_o doc}@anchor{295}@anchor{gnat_rm/the_implementation_of_standard_i_o id1}@anchor{296}
+@anchor{gnat_rm/the_implementation_of_standard_i_o the-implementation-of-standard-i-o}@anchor{f}@anchor{gnat_rm/the_implementation_of_standard_i_o doc}@anchor{296}@anchor{gnat_rm/the_implementation_of_standard_i_o id1}@anchor{297}
@chapter The Implementation of Standard I/O
@@ -21180,7 +21192,7 @@ these additional facilities are also described in this chapter.
@end menu
@node Standard I/O Packages,FORM Strings,,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o standard-i-o-packages}@anchor{297}@anchor{gnat_rm/the_implementation_of_standard_i_o id2}@anchor{298}
+@anchor{gnat_rm/the_implementation_of_standard_i_o standard-i-o-packages}@anchor{298}@anchor{gnat_rm/the_implementation_of_standard_i_o id2}@anchor{299}
@section Standard I/O Packages
@@ -21251,7 +21263,7 @@ flush the common I/O streams and in particular Standard_Output before
elaborating the Ada code.
@node FORM Strings,Direct_IO,Standard I/O Packages,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o form-strings}@anchor{299}@anchor{gnat_rm/the_implementation_of_standard_i_o id3}@anchor{29a}
+@anchor{gnat_rm/the_implementation_of_standard_i_o form-strings}@anchor{29a}@anchor{gnat_rm/the_implementation_of_standard_i_o id3}@anchor{29b}
@section FORM Strings
@@ -21277,7 +21289,7 @@ unrecognized keyword appears in a form string, it is silently ignored
and not considered invalid.
@node Direct_IO,Sequential_IO,FORM Strings,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o direct-io}@anchor{29b}@anchor{gnat_rm/the_implementation_of_standard_i_o id4}@anchor{29c}
+@anchor{gnat_rm/the_implementation_of_standard_i_o direct-io}@anchor{29c}@anchor{gnat_rm/the_implementation_of_standard_i_o id4}@anchor{29d}
@section Direct_IO
@@ -21297,7 +21309,7 @@ There is no limit on the size of Direct_IO files, they are expanded as
necessary to accommodate whatever records are written to the file.
@node Sequential_IO,Text_IO,Direct_IO,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o sequential-io}@anchor{29d}@anchor{gnat_rm/the_implementation_of_standard_i_o id5}@anchor{29e}
+@anchor{gnat_rm/the_implementation_of_standard_i_o sequential-io}@anchor{29e}@anchor{gnat_rm/the_implementation_of_standard_i_o id5}@anchor{29f}
@section Sequential_IO
@@ -21344,7 +21356,7 @@ using Stream_IO, and this is the preferred mechanism. In particular, the
above program fragment rewritten to use Stream_IO will work correctly.
@node Text_IO,Wide_Text_IO,Sequential_IO,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o id6}@anchor{29f}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io}@anchor{2a0}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id6}@anchor{2a0}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io}@anchor{2a1}
@section Text_IO
@@ -21427,7 +21439,7 @@ the file.
@end menu
@node Stream Pointer Positioning,Reading and Writing Non-Regular Files,,Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o id7}@anchor{2a1}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning}@anchor{2a2}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id7}@anchor{2a2}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning}@anchor{2a3}
@subsection Stream Pointer Positioning
@@ -21463,7 +21475,7 @@ between two Ada files, then the difference may be observable in some
situations.
@node Reading and Writing Non-Regular Files,Get_Immediate,Stream Pointer Positioning,Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files}@anchor{2a3}@anchor{gnat_rm/the_implementation_of_standard_i_o id8}@anchor{2a4}
+@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files}@anchor{2a4}@anchor{gnat_rm/the_implementation_of_standard_i_o id8}@anchor{2a5}
@subsection Reading and Writing Non-Regular Files
@@ -21514,7 +21526,7 @@ to read data past that end of
file indication, until another end of file indication is entered.
@node Get_Immediate,Treating Text_IO Files as Streams,Reading and Writing Non-Regular Files,Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o get-immediate}@anchor{2a5}@anchor{gnat_rm/the_implementation_of_standard_i_o id9}@anchor{2a6}
+@anchor{gnat_rm/the_implementation_of_standard_i_o get-immediate}@anchor{2a6}@anchor{gnat_rm/the_implementation_of_standard_i_o id9}@anchor{2a7}
@subsection Get_Immediate
@@ -21532,7 +21544,7 @@ possible), it is undefined whether the FF character will be treated as a
page mark.
@node Treating Text_IO Files as Streams,Text_IO Extensions,Get_Immediate,Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o id10}@anchor{2a7}@anchor{gnat_rm/the_implementation_of_standard_i_o treating-text-io-files-as-streams}@anchor{2a8}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id10}@anchor{2a8}@anchor{gnat_rm/the_implementation_of_standard_i_o treating-text-io-files-as-streams}@anchor{2a9}
@subsection Treating Text_IO Files as Streams
@@ -21548,7 +21560,7 @@ skipped and the effect is similar to that described above for
@code{Get_Immediate}.
@node Text_IO Extensions,Text_IO Facilities for Unbounded Strings,Treating Text_IO Files as Streams,Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o id11}@anchor{2a9}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-extensions}@anchor{2aa}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id11}@anchor{2aa}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-extensions}@anchor{2ab}
@subsection Text_IO Extensions
@@ -21576,7 +21588,7 @@ the string is to be read.
@end itemize
@node Text_IO Facilities for Unbounded Strings,,Text_IO Extensions,Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-facilities-for-unbounded-strings}@anchor{2ab}@anchor{gnat_rm/the_implementation_of_standard_i_o id12}@anchor{2ac}
+@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-facilities-for-unbounded-strings}@anchor{2ac}@anchor{gnat_rm/the_implementation_of_standard_i_o id12}@anchor{2ad}
@subsection Text_IO Facilities for Unbounded Strings
@@ -21624,7 +21636,7 @@ files @code{a-szuzti.ads} and @code{a-szuzti.adb} provides similar extended
@code{Wide_Wide_Text_IO} functionality for unbounded wide wide strings.
@node Wide_Text_IO,Wide_Wide_Text_IO,Text_IO,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o wide-text-io}@anchor{2ad}@anchor{gnat_rm/the_implementation_of_standard_i_o id13}@anchor{2ae}
+@anchor{gnat_rm/the_implementation_of_standard_i_o wide-text-io}@anchor{2ae}@anchor{gnat_rm/the_implementation_of_standard_i_o id13}@anchor{2af}
@section Wide_Text_IO
@@ -21871,12 +21883,12 @@ input also causes Constraint_Error to be raised.
@end menu
@node Stream Pointer Positioning<2>,Reading and Writing Non-Regular Files<2>,,Wide_Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-1}@anchor{2af}@anchor{gnat_rm/the_implementation_of_standard_i_o id14}@anchor{2b0}
+@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-1}@anchor{2b0}@anchor{gnat_rm/the_implementation_of_standard_i_o id14}@anchor{2b1}
@subsection Stream Pointer Positioning
@code{Ada.Wide_Text_IO} is similar to @code{Ada.Text_IO} in its handling
-of stream pointer positioning (@ref{2a0,,Text_IO}). There is one additional
+of stream pointer positioning (@ref{2a1,,Text_IO}). There is one additional
case:
If @code{Ada.Wide_Text_IO.Look_Ahead} reads a character outside the
@@ -21895,7 +21907,7 @@ to a normal program using @code{Wide_Text_IO}. However, this discrepancy
can be observed if the wide text file shares a stream with another file.
@node Reading and Writing Non-Regular Files<2>,,Stream Pointer Positioning<2>,Wide_Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-1}@anchor{2b1}@anchor{gnat_rm/the_implementation_of_standard_i_o id15}@anchor{2b2}
+@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-1}@anchor{2b2}@anchor{gnat_rm/the_implementation_of_standard_i_o id15}@anchor{2b3}
@subsection Reading and Writing Non-Regular Files
@@ -21906,7 +21918,7 @@ treated as data characters), and @code{End_Of_Page} always returns
it is possible to read beyond an end of file.
@node Wide_Wide_Text_IO,Stream_IO,Wide_Text_IO,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o id16}@anchor{2b3}@anchor{gnat_rm/the_implementation_of_standard_i_o wide-wide-text-io}@anchor{2b4}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id16}@anchor{2b4}@anchor{gnat_rm/the_implementation_of_standard_i_o wide-wide-text-io}@anchor{2b5}
@section Wide_Wide_Text_IO
@@ -22075,12 +22087,12 @@ input also causes Constraint_Error to be raised.
@end menu
@node Stream Pointer Positioning<3>,Reading and Writing Non-Regular Files<3>,,Wide_Wide_Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-2}@anchor{2b5}@anchor{gnat_rm/the_implementation_of_standard_i_o id17}@anchor{2b6}
+@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-2}@anchor{2b6}@anchor{gnat_rm/the_implementation_of_standard_i_o id17}@anchor{2b7}
@subsection Stream Pointer Positioning
@code{Ada.Wide_Wide_Text_IO} is similar to @code{Ada.Text_IO} in its handling
-of stream pointer positioning (@ref{2a0,,Text_IO}). There is one additional
+of stream pointer positioning (@ref{2a1,,Text_IO}). There is one additional
case:
If @code{Ada.Wide_Wide_Text_IO.Look_Ahead} reads a character outside the
@@ -22099,7 +22111,7 @@ to a normal program using @code{Wide_Wide_Text_IO}. However, this discrepancy
can be observed if the wide text file shares a stream with another file.
@node Reading and Writing Non-Regular Files<3>,,Stream Pointer Positioning<3>,Wide_Wide_Text_IO
-@anchor{gnat_rm/the_implementation_of_standard_i_o id18}@anchor{2b7}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-2}@anchor{2b8}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id18}@anchor{2b8}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-2}@anchor{2b9}
@subsection Reading and Writing Non-Regular Files
@@ -22110,7 +22122,7 @@ treated as data characters), and @code{End_Of_Page} always returns
it is possible to read beyond an end of file.
@node Stream_IO,Text Translation,Wide_Wide_Text_IO,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o id19}@anchor{2b9}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-io}@anchor{2ba}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id19}@anchor{2ba}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-io}@anchor{2bb}
@section Stream_IO
@@ -22132,7 +22144,7 @@ manner described for stream attributes.
@end itemize
@node Text Translation,Shared Files,Stream_IO,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o id20}@anchor{2bb}@anchor{gnat_rm/the_implementation_of_standard_i_o text-translation}@anchor{2bc}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id20}@anchor{2bc}@anchor{gnat_rm/the_implementation_of_standard_i_o text-translation}@anchor{2bd}
@section Text Translation
@@ -22166,7 +22178,7 @@ mode. (corresponds to_O_U16TEXT).
@end itemize
@node Shared Files,Filenames encoding,Text Translation,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o id21}@anchor{2bd}@anchor{gnat_rm/the_implementation_of_standard_i_o shared-files}@anchor{2be}
+@anchor{gnat_rm/the_implementation_of_standard_i_o id21}@anchor{2be}@anchor{gnat_rm/the_implementation_of_standard_i_o shared-files}@anchor{2bf}
@section Shared Files
@@ -22229,7 +22241,7 @@ heterogeneous input-output. Although this approach will work in GNAT if
for this purpose (using the stream attributes)
@node Filenames encoding,File content encoding,Shared Files,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o filenames-encoding}@anchor{2bf}@anchor{gnat_rm/the_implementation_of_standard_i_o id22}@anchor{2c0}
+@anchor{gnat_rm/the_implementation_of_standard_i_o filenames-encoding}@anchor{2c0}@anchor{gnat_rm/the_implementation_of_standard_i_o id22}@anchor{2c1}
@section Filenames encoding
@@ -22269,7 +22281,7 @@ platform. On the other Operating Systems the run-time is supporting
UTF-8 natively.
@node File content encoding,Open Modes,Filenames encoding,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o file-content-encoding}@anchor{2c1}@anchor{gnat_rm/the_implementation_of_standard_i_o id23}@anchor{2c2}
+@anchor{gnat_rm/the_implementation_of_standard_i_o file-content-encoding}@anchor{2c2}@anchor{gnat_rm/the_implementation_of_standard_i_o id23}@anchor{2c3}
@section File content encoding
@@ -22302,7 +22314,7 @@ Unicode 8-bit encoding
This encoding is only supported on the Windows platform.
@node Open Modes,Operations on C Streams,File content encoding,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o open-modes}@anchor{2c3}@anchor{gnat_rm/the_implementation_of_standard_i_o id24}@anchor{2c4}
+@anchor{gnat_rm/the_implementation_of_standard_i_o open-modes}@anchor{2c4}@anchor{gnat_rm/the_implementation_of_standard_i_o id24}@anchor{2c5}
@section Open Modes
@@ -22405,7 +22417,7 @@ subsequently requires switching from reading to writing or vice-versa,
then the file is reopened in @code{r+} mode to permit the required operation.
@node Operations on C Streams,Interfacing to C Streams,Open Modes,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o operations-on-c-streams}@anchor{2c5}@anchor{gnat_rm/the_implementation_of_standard_i_o id25}@anchor{2c6}
+@anchor{gnat_rm/the_implementation_of_standard_i_o operations-on-c-streams}@anchor{2c6}@anchor{gnat_rm/the_implementation_of_standard_i_o id25}@anchor{2c7}
@section Operations on C Streams
@@ -22565,7 +22577,7 @@ end Interfaces.C_Streams;
@end example
@node Interfacing to C Streams,,Operations on C Streams,The Implementation of Standard I/O
-@anchor{gnat_rm/the_implementation_of_standard_i_o interfacing-to-c-streams}@anchor{2c7}@anchor{gnat_rm/the_implementation_of_standard_i_o id26}@anchor{2c8}
+@anchor{gnat_rm/the_implementation_of_standard_i_o interfacing-to-c-streams}@anchor{2c8}@anchor{gnat_rm/the_implementation_of_standard_i_o id26}@anchor{2c9}
@section Interfacing to C Streams
@@ -22658,7 +22670,7 @@ imported from a C program, allowing an Ada file to operate on an
existing C file.
@node The GNAT Library,Interfacing to Other Languages,The Implementation of Standard I/O,Top
-@anchor{gnat_rm/the_gnat_library the-gnat-library}@anchor{10}@anchor{gnat_rm/the_gnat_library doc}@anchor{2c9}@anchor{gnat_rm/the_gnat_library id1}@anchor{2ca}
+@anchor{gnat_rm/the_gnat_library the-gnat-library}@anchor{10}@anchor{gnat_rm/the_gnat_library doc}@anchor{2ca}@anchor{gnat_rm/the_gnat_library id1}@anchor{2cb}
@chapter The GNAT Library
@@ -22850,7 +22862,7 @@ of GNAT, and will generate a warning message.
@end menu
@node Ada Characters Latin_9 a-chlat9 ads,Ada Characters Wide_Latin_1 a-cwila1 ads,,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id2}@anchor{2cb}@anchor{gnat_rm/the_gnat_library ada-characters-latin-9-a-chlat9-ads}@anchor{2cc}
+@anchor{gnat_rm/the_gnat_library id2}@anchor{2cc}@anchor{gnat_rm/the_gnat_library ada-characters-latin-9-a-chlat9-ads}@anchor{2cd}
@section @code{Ada.Characters.Latin_9} (@code{a-chlat9.ads})
@@ -22867,7 +22879,7 @@ is specifically authorized by the Ada Reference Manual
(RM A.3.3(27)).
@node Ada Characters Wide_Latin_1 a-cwila1 ads,Ada Characters Wide_Latin_9 a-cwila1 ads,Ada Characters Latin_9 a-chlat9 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-1-a-cwila1-ads}@anchor{2cd}@anchor{gnat_rm/the_gnat_library id3}@anchor{2ce}
+@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-1-a-cwila1-ads}@anchor{2ce}@anchor{gnat_rm/the_gnat_library id3}@anchor{2cf}
@section @code{Ada.Characters.Wide_Latin_1} (@code{a-cwila1.ads})
@@ -22884,7 +22896,7 @@ is specifically authorized by the Ada Reference Manual
(RM A.3.3(27)).
@node Ada Characters Wide_Latin_9 a-cwila1 ads,Ada Characters Wide_Wide_Latin_1 a-chzla1 ads,Ada Characters Wide_Latin_1 a-cwila1 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id4}@anchor{2cf}@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-9-a-cwila1-ads}@anchor{2d0}
+@anchor{gnat_rm/the_gnat_library id4}@anchor{2d0}@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-9-a-cwila1-ads}@anchor{2d1}
@section @code{Ada.Characters.Wide_Latin_9} (@code{a-cwila1.ads})
@@ -22901,7 +22913,7 @@ is specifically authorized by the Ada Reference Manual
(RM A.3.3(27)).
@node Ada Characters Wide_Wide_Latin_1 a-chzla1 ads,Ada Characters Wide_Wide_Latin_9 a-chzla9 ads,Ada Characters Wide_Latin_9 a-cwila1 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-1-a-chzla1-ads}@anchor{2d1}@anchor{gnat_rm/the_gnat_library id5}@anchor{2d2}
+@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-1-a-chzla1-ads}@anchor{2d2}@anchor{gnat_rm/the_gnat_library id5}@anchor{2d3}
@section @code{Ada.Characters.Wide_Wide_Latin_1} (@code{a-chzla1.ads})
@@ -22918,7 +22930,7 @@ is specifically authorized by the Ada Reference Manual
(RM A.3.3(27)).
@node Ada Characters Wide_Wide_Latin_9 a-chzla9 ads,Ada Containers Formal_Doubly_Linked_Lists a-cfdlli ads,Ada Characters Wide_Wide_Latin_1 a-chzla1 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-9-a-chzla9-ads}@anchor{2d3}@anchor{gnat_rm/the_gnat_library id6}@anchor{2d4}
+@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-9-a-chzla9-ads}@anchor{2d4}@anchor{gnat_rm/the_gnat_library id6}@anchor{2d5}
@section @code{Ada.Characters.Wide_Wide_Latin_9} (@code{a-chzla9.ads})
@@ -22935,7 +22947,7 @@ is specifically authorized by the Ada Reference Manual
(RM A.3.3(27)).
@node Ada Containers Formal_Doubly_Linked_Lists a-cfdlli ads,Ada Containers Formal_Hashed_Maps a-cfhama ads,Ada Characters Wide_Wide_Latin_9 a-chzla9 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id7}@anchor{2d5}@anchor{gnat_rm/the_gnat_library ada-containers-formal-doubly-linked-lists-a-cfdlli-ads}@anchor{2d6}
+@anchor{gnat_rm/the_gnat_library id7}@anchor{2d6}@anchor{gnat_rm/the_gnat_library ada-containers-formal-doubly-linked-lists-a-cfdlli-ads}@anchor{2d7}
@section @code{Ada.Containers.Formal_Doubly_Linked_Lists} (@code{a-cfdlli.ads})
@@ -22954,7 +22966,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Formal_Hashed_Maps a-cfhama ads,Ada Containers Formal_Hashed_Sets a-cfhase ads,Ada Containers Formal_Doubly_Linked_Lists a-cfdlli ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id8}@anchor{2d7}@anchor{gnat_rm/the_gnat_library ada-containers-formal-hashed-maps-a-cfhama-ads}@anchor{2d8}
+@anchor{gnat_rm/the_gnat_library id8}@anchor{2d8}@anchor{gnat_rm/the_gnat_library ada-containers-formal-hashed-maps-a-cfhama-ads}@anchor{2d9}
@section @code{Ada.Containers.Formal_Hashed_Maps} (@code{a-cfhama.ads})
@@ -22973,7 +22985,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Formal_Hashed_Sets a-cfhase ads,Ada Containers Formal_Ordered_Maps a-cforma ads,Ada Containers Formal_Hashed_Maps a-cfhama ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id9}@anchor{2d9}@anchor{gnat_rm/the_gnat_library ada-containers-formal-hashed-sets-a-cfhase-ads}@anchor{2da}
+@anchor{gnat_rm/the_gnat_library id9}@anchor{2da}@anchor{gnat_rm/the_gnat_library ada-containers-formal-hashed-sets-a-cfhase-ads}@anchor{2db}
@section @code{Ada.Containers.Formal_Hashed_Sets} (@code{a-cfhase.ads})
@@ -22992,7 +23004,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Formal_Ordered_Maps a-cforma ads,Ada Containers Formal_Ordered_Sets a-cforse ads,Ada Containers Formal_Hashed_Sets a-cfhase ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id10}@anchor{2db}@anchor{gnat_rm/the_gnat_library ada-containers-formal-ordered-maps-a-cforma-ads}@anchor{2dc}
+@anchor{gnat_rm/the_gnat_library id10}@anchor{2dc}@anchor{gnat_rm/the_gnat_library ada-containers-formal-ordered-maps-a-cforma-ads}@anchor{2dd}
@section @code{Ada.Containers.Formal_Ordered_Maps} (@code{a-cforma.ads})
@@ -23011,7 +23023,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Formal_Ordered_Sets a-cforse ads,Ada Containers Formal_Vectors a-cofove ads,Ada Containers Formal_Ordered_Maps a-cforma ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-containers-formal-ordered-sets-a-cforse-ads}@anchor{2dd}@anchor{gnat_rm/the_gnat_library id11}@anchor{2de}
+@anchor{gnat_rm/the_gnat_library ada-containers-formal-ordered-sets-a-cforse-ads}@anchor{2de}@anchor{gnat_rm/the_gnat_library id11}@anchor{2df}
@section @code{Ada.Containers.Formal_Ordered_Sets} (@code{a-cforse.ads})
@@ -23030,7 +23042,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Formal_Vectors a-cofove ads,Ada Containers Formal_Indefinite_Vectors a-cfinve ads,Ada Containers Formal_Ordered_Sets a-cforse ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id12}@anchor{2df}@anchor{gnat_rm/the_gnat_library ada-containers-formal-vectors-a-cofove-ads}@anchor{2e0}
+@anchor{gnat_rm/the_gnat_library id12}@anchor{2e0}@anchor{gnat_rm/the_gnat_library ada-containers-formal-vectors-a-cofove-ads}@anchor{2e1}
@section @code{Ada.Containers.Formal_Vectors} (@code{a-cofove.ads})
@@ -23049,7 +23061,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Formal_Indefinite_Vectors a-cfinve ads,Ada Containers Functional_Vectors a-cofuve ads,Ada Containers Formal_Vectors a-cofove ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id13}@anchor{2e1}@anchor{gnat_rm/the_gnat_library ada-containers-formal-indefinite-vectors-a-cfinve-ads}@anchor{2e2}
+@anchor{gnat_rm/the_gnat_library id13}@anchor{2e2}@anchor{gnat_rm/the_gnat_library ada-containers-formal-indefinite-vectors-a-cfinve-ads}@anchor{2e3}
@section @code{Ada.Containers.Formal_Indefinite_Vectors} (@code{a-cfinve.ads})
@@ -23068,7 +23080,7 @@ efficient version than the one defined in the standard. In particular it
does not have the complex overhead required to detect cursor tampering.
@node Ada Containers Functional_Vectors a-cofuve ads,Ada Containers Functional_Sets a-cofuse ads,Ada Containers Formal_Indefinite_Vectors a-cfinve ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id14}@anchor{2e3}@anchor{gnat_rm/the_gnat_library ada-containers-functional-vectors-a-cofuve-ads}@anchor{2e4}
+@anchor{gnat_rm/the_gnat_library id14}@anchor{2e4}@anchor{gnat_rm/the_gnat_library ada-containers-functional-vectors-a-cofuve-ads}@anchor{2e5}
@section @code{Ada.Containers.Functional_Vectors} (@code{a-cofuve.ads})
@@ -23090,7 +23102,7 @@ and annotations, so that they can be removed from the final executable. The
specification of this unit is compatible with SPARK 2014.
@node Ada Containers Functional_Sets a-cofuse ads,Ada Containers Functional_Maps a-cofuma ads,Ada Containers Functional_Vectors a-cofuve ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-containers-functional-sets-a-cofuse-ads}@anchor{2e5}@anchor{gnat_rm/the_gnat_library id15}@anchor{2e6}
+@anchor{gnat_rm/the_gnat_library ada-containers-functional-sets-a-cofuse-ads}@anchor{2e6}@anchor{gnat_rm/the_gnat_library id15}@anchor{2e7}
@section @code{Ada.Containers.Functional_Sets} (@code{a-cofuse.ads})
@@ -23112,7 +23124,7 @@ and annotations, so that they can be removed from the final executable. The
specification of this unit is compatible with SPARK 2014.
@node Ada Containers Functional_Maps a-cofuma ads,Ada Containers Bounded_Holders a-coboho ads,Ada Containers Functional_Sets a-cofuse ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id16}@anchor{2e7}@anchor{gnat_rm/the_gnat_library ada-containers-functional-maps-a-cofuma-ads}@anchor{2e8}
+@anchor{gnat_rm/the_gnat_library id16}@anchor{2e8}@anchor{gnat_rm/the_gnat_library ada-containers-functional-maps-a-cofuma-ads}@anchor{2e9}
@section @code{Ada.Containers.Functional_Maps} (@code{a-cofuma.ads})
@@ -23134,7 +23146,7 @@ and annotations, so that they can be removed from the final executable. The
specification of this unit is compatible with SPARK 2014.
@node Ada Containers Bounded_Holders a-coboho ads,Ada Command_Line Environment a-colien ads,Ada Containers Functional_Maps a-cofuma ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-containers-bounded-holders-a-coboho-ads}@anchor{2e9}@anchor{gnat_rm/the_gnat_library id17}@anchor{2ea}
+@anchor{gnat_rm/the_gnat_library ada-containers-bounded-holders-a-coboho-ads}@anchor{2ea}@anchor{gnat_rm/the_gnat_library id17}@anchor{2eb}
@section @code{Ada.Containers.Bounded_Holders} (@code{a-coboho.ads})
@@ -23146,7 +23158,7 @@ This child of @code{Ada.Containers} defines a modified version of
Indefinite_Holders that avoids heap allocation.
@node Ada Command_Line Environment a-colien ads,Ada Command_Line Remove a-colire ads,Ada Containers Bounded_Holders a-coboho ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-command-line-environment-a-colien-ads}@anchor{2eb}@anchor{gnat_rm/the_gnat_library id18}@anchor{2ec}
+@anchor{gnat_rm/the_gnat_library ada-command-line-environment-a-colien-ads}@anchor{2ec}@anchor{gnat_rm/the_gnat_library id18}@anchor{2ed}
@section @code{Ada.Command_Line.Environment} (@code{a-colien.ads})
@@ -23159,7 +23171,7 @@ provides a mechanism for obtaining environment values on systems
where this concept makes sense.
@node Ada Command_Line Remove a-colire ads,Ada Command_Line Response_File a-clrefi ads,Ada Command_Line Environment a-colien ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id19}@anchor{2ed}@anchor{gnat_rm/the_gnat_library ada-command-line-remove-a-colire-ads}@anchor{2ee}
+@anchor{gnat_rm/the_gnat_library id19}@anchor{2ee}@anchor{gnat_rm/the_gnat_library ada-command-line-remove-a-colire-ads}@anchor{2ef}
@section @code{Ada.Command_Line.Remove} (@code{a-colire.ads})
@@ -23177,7 +23189,7 @@ to further calls on the subprograms in @code{Ada.Command_Line} will not
see the removed argument.
@node Ada Command_Line Response_File a-clrefi ads,Ada Direct_IO C_Streams a-diocst ads,Ada Command_Line Remove a-colire ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id20}@anchor{2ef}@anchor{gnat_rm/the_gnat_library ada-command-line-response-file-a-clrefi-ads}@anchor{2f0}
+@anchor{gnat_rm/the_gnat_library id20}@anchor{2f0}@anchor{gnat_rm/the_gnat_library ada-command-line-response-file-a-clrefi-ads}@anchor{2f1}
@section @code{Ada.Command_Line.Response_File} (@code{a-clrefi.ads})
@@ -23197,7 +23209,7 @@ Using a response file allow passing a set of arguments to an executable longer
than the maximum allowed by the system on the command line.
@node Ada Direct_IO C_Streams a-diocst ads,Ada Exceptions Is_Null_Occurrence a-einuoc ads,Ada Command_Line Response_File a-clrefi ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id21}@anchor{2f1}@anchor{gnat_rm/the_gnat_library ada-direct-io-c-streams-a-diocst-ads}@anchor{2f2}
+@anchor{gnat_rm/the_gnat_library id21}@anchor{2f2}@anchor{gnat_rm/the_gnat_library ada-direct-io-c-streams-a-diocst-ads}@anchor{2f3}
@section @code{Ada.Direct_IO.C_Streams} (@code{a-diocst.ads})
@@ -23212,7 +23224,7 @@ extracted from a file opened on the Ada side, and an Ada file
can be constructed from a stream opened on the C side.
@node Ada Exceptions Is_Null_Occurrence a-einuoc ads,Ada Exceptions Last_Chance_Handler a-elchha ads,Ada Direct_IO C_Streams a-diocst ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id22}@anchor{2f3}@anchor{gnat_rm/the_gnat_library ada-exceptions-is-null-occurrence-a-einuoc-ads}@anchor{2f4}
+@anchor{gnat_rm/the_gnat_library id22}@anchor{2f4}@anchor{gnat_rm/the_gnat_library ada-exceptions-is-null-occurrence-a-einuoc-ads}@anchor{2f5}
@section @code{Ada.Exceptions.Is_Null_Occurrence} (@code{a-einuoc.ads})
@@ -23226,7 +23238,7 @@ exception occurrence (@code{Null_Occurrence}) without raising
an exception.
@node Ada Exceptions Last_Chance_Handler a-elchha ads,Ada Exceptions Traceback a-exctra ads,Ada Exceptions Is_Null_Occurrence a-einuoc ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id23}@anchor{2f5}@anchor{gnat_rm/the_gnat_library ada-exceptions-last-chance-handler-a-elchha-ads}@anchor{2f6}
+@anchor{gnat_rm/the_gnat_library id23}@anchor{2f6}@anchor{gnat_rm/the_gnat_library ada-exceptions-last-chance-handler-a-elchha-ads}@anchor{2f7}
@section @code{Ada.Exceptions.Last_Chance_Handler} (@code{a-elchha.ads})
@@ -23240,7 +23252,7 @@ exceptions (hence the name last chance), and perform clean ups before
terminating the program. Note that this subprogram never returns.
@node Ada Exceptions Traceback a-exctra ads,Ada Sequential_IO C_Streams a-siocst ads,Ada Exceptions Last_Chance_Handler a-elchha ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-exceptions-traceback-a-exctra-ads}@anchor{2f7}@anchor{gnat_rm/the_gnat_library id24}@anchor{2f8}
+@anchor{gnat_rm/the_gnat_library ada-exceptions-traceback-a-exctra-ads}@anchor{2f8}@anchor{gnat_rm/the_gnat_library id24}@anchor{2f9}
@section @code{Ada.Exceptions.Traceback} (@code{a-exctra.ads})
@@ -23253,7 +23265,7 @@ give a traceback array of addresses based on an exception
occurrence.
@node Ada Sequential_IO C_Streams a-siocst ads,Ada Streams Stream_IO C_Streams a-ssicst ads,Ada Exceptions Traceback a-exctra ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-sequential-io-c-streams-a-siocst-ads}@anchor{2f9}@anchor{gnat_rm/the_gnat_library id25}@anchor{2fa}
+@anchor{gnat_rm/the_gnat_library ada-sequential-io-c-streams-a-siocst-ads}@anchor{2fa}@anchor{gnat_rm/the_gnat_library id25}@anchor{2fb}
@section @code{Ada.Sequential_IO.C_Streams} (@code{a-siocst.ads})
@@ -23268,7 +23280,7 @@ extracted from a file opened on the Ada side, and an Ada file
can be constructed from a stream opened on the C side.
@node Ada Streams Stream_IO C_Streams a-ssicst ads,Ada Strings Unbounded Text_IO a-suteio ads,Ada Sequential_IO C_Streams a-siocst ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id26}@anchor{2fb}@anchor{gnat_rm/the_gnat_library ada-streams-stream-io-c-streams-a-ssicst-ads}@anchor{2fc}
+@anchor{gnat_rm/the_gnat_library id26}@anchor{2fc}@anchor{gnat_rm/the_gnat_library ada-streams-stream-io-c-streams-a-ssicst-ads}@anchor{2fd}
@section @code{Ada.Streams.Stream_IO.C_Streams} (@code{a-ssicst.ads})
@@ -23283,7 +23295,7 @@ extracted from a file opened on the Ada side, and an Ada file
can be constructed from a stream opened on the C side.
@node Ada Strings Unbounded Text_IO a-suteio ads,Ada Strings Wide_Unbounded Wide_Text_IO a-swuwti ads,Ada Streams Stream_IO C_Streams a-ssicst ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-strings-unbounded-text-io-a-suteio-ads}@anchor{2fd}@anchor{gnat_rm/the_gnat_library id27}@anchor{2fe}
+@anchor{gnat_rm/the_gnat_library ada-strings-unbounded-text-io-a-suteio-ads}@anchor{2fe}@anchor{gnat_rm/the_gnat_library id27}@anchor{2ff}
@section @code{Ada.Strings.Unbounded.Text_IO} (@code{a-suteio.ads})
@@ -23300,7 +23312,7 @@ strings, avoiding the necessity for an intermediate operation
with ordinary strings.
@node Ada Strings Wide_Unbounded Wide_Text_IO a-swuwti ads,Ada Strings Wide_Wide_Unbounded Wide_Wide_Text_IO a-szuzti ads,Ada Strings Unbounded Text_IO a-suteio ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id28}@anchor{2ff}@anchor{gnat_rm/the_gnat_library ada-strings-wide-unbounded-wide-text-io-a-swuwti-ads}@anchor{300}
+@anchor{gnat_rm/the_gnat_library id28}@anchor{300}@anchor{gnat_rm/the_gnat_library ada-strings-wide-unbounded-wide-text-io-a-swuwti-ads}@anchor{301}
@section @code{Ada.Strings.Wide_Unbounded.Wide_Text_IO} (@code{a-swuwti.ads})
@@ -23317,7 +23329,7 @@ wide strings, avoiding the necessity for an intermediate operation
with ordinary wide strings.
@node Ada Strings Wide_Wide_Unbounded Wide_Wide_Text_IO a-szuzti ads,Ada Text_IO C_Streams a-tiocst ads,Ada Strings Wide_Unbounded Wide_Text_IO a-swuwti ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id29}@anchor{301}@anchor{gnat_rm/the_gnat_library ada-strings-wide-wide-unbounded-wide-wide-text-io-a-szuzti-ads}@anchor{302}
+@anchor{gnat_rm/the_gnat_library id29}@anchor{302}@anchor{gnat_rm/the_gnat_library ada-strings-wide-wide-unbounded-wide-wide-text-io-a-szuzti-ads}@anchor{303}
@section @code{Ada.Strings.Wide_Wide_Unbounded.Wide_Wide_Text_IO} (@code{a-szuzti.ads})
@@ -23334,7 +23346,7 @@ wide wide strings, avoiding the necessity for an intermediate operation
with ordinary wide wide strings.
@node Ada Text_IO C_Streams a-tiocst ads,Ada Text_IO Reset_Standard_Files a-tirsfi ads,Ada Strings Wide_Wide_Unbounded Wide_Wide_Text_IO a-szuzti ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-text-io-c-streams-a-tiocst-ads}@anchor{303}@anchor{gnat_rm/the_gnat_library id30}@anchor{304}
+@anchor{gnat_rm/the_gnat_library ada-text-io-c-streams-a-tiocst-ads}@anchor{304}@anchor{gnat_rm/the_gnat_library id30}@anchor{305}
@section @code{Ada.Text_IO.C_Streams} (@code{a-tiocst.ads})
@@ -23349,7 +23361,7 @@ extracted from a file opened on the Ada side, and an Ada file
can be constructed from a stream opened on the C side.
@node Ada Text_IO Reset_Standard_Files a-tirsfi ads,Ada Wide_Characters Unicode a-wichun ads,Ada Text_IO C_Streams a-tiocst ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-text-io-reset-standard-files-a-tirsfi-ads}@anchor{305}@anchor{gnat_rm/the_gnat_library id31}@anchor{306}
+@anchor{gnat_rm/the_gnat_library ada-text-io-reset-standard-files-a-tirsfi-ads}@anchor{306}@anchor{gnat_rm/the_gnat_library id31}@anchor{307}
@section @code{Ada.Text_IO.Reset_Standard_Files} (@code{a-tirsfi.ads})
@@ -23364,7 +23376,7 @@ execution (for example a standard input file may be redefined to be
interactive).
@node Ada Wide_Characters Unicode a-wichun ads,Ada Wide_Text_IO C_Streams a-wtcstr ads,Ada Text_IO Reset_Standard_Files a-tirsfi ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id32}@anchor{307}@anchor{gnat_rm/the_gnat_library ada-wide-characters-unicode-a-wichun-ads}@anchor{308}
+@anchor{gnat_rm/the_gnat_library id32}@anchor{308}@anchor{gnat_rm/the_gnat_library ada-wide-characters-unicode-a-wichun-ads}@anchor{309}
@section @code{Ada.Wide_Characters.Unicode} (@code{a-wichun.ads})
@@ -23377,7 +23389,7 @@ This package provides subprograms that allow categorization of
Wide_Character values according to Unicode categories.
@node Ada Wide_Text_IO C_Streams a-wtcstr ads,Ada Wide_Text_IO Reset_Standard_Files a-wrstfi ads,Ada Wide_Characters Unicode a-wichun ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-wide-text-io-c-streams-a-wtcstr-ads}@anchor{309}@anchor{gnat_rm/the_gnat_library id33}@anchor{30a}
+@anchor{gnat_rm/the_gnat_library ada-wide-text-io-c-streams-a-wtcstr-ads}@anchor{30a}@anchor{gnat_rm/the_gnat_library id33}@anchor{30b}
@section @code{Ada.Wide_Text_IO.C_Streams} (@code{a-wtcstr.ads})
@@ -23392,7 +23404,7 @@ extracted from a file opened on the Ada side, and an Ada file
can be constructed from a stream opened on the C side.
@node Ada Wide_Text_IO Reset_Standard_Files a-wrstfi ads,Ada Wide_Wide_Characters Unicode a-zchuni ads,Ada Wide_Text_IO C_Streams a-wtcstr ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library ada-wide-text-io-reset-standard-files-a-wrstfi-ads}@anchor{30b}@anchor{gnat_rm/the_gnat_library id34}@anchor{30c}
+@anchor{gnat_rm/the_gnat_library ada-wide-text-io-reset-standard-files-a-wrstfi-ads}@anchor{30c}@anchor{gnat_rm/the_gnat_library id34}@anchor{30d}
@section @code{Ada.Wide_Text_IO.Reset_Standard_Files} (@code{a-wrstfi.ads})
@@ -23407,7 +23419,7 @@ execution (for example a standard input file may be redefined to be
interactive).
@node Ada Wide_Wide_Characters Unicode a-zchuni ads,Ada Wide_Wide_Text_IO C_Streams a-ztcstr ads,Ada Wide_Text_IO Reset_Standard_Files a-wrstfi ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id35}@anchor{30d}@anchor{gnat_rm/the_gnat_library ada-wide-wide-characters-unicode-a-zchuni-ads}@anchor{30e}
+@anchor{gnat_rm/the_gnat_library id35}@anchor{30e}@anchor{gnat_rm/the_gnat_library ada-wide-wide-characters-unicode-a-zchuni-ads}@anchor{30f}
@section @code{Ada.Wide_Wide_Characters.Unicode} (@code{a-zchuni.ads})
@@ -23420,7 +23432,7 @@ This package provides subprograms that allow categorization of
Wide_Wide_Character values according to Unicode categories.
@node Ada Wide_Wide_Text_IO C_Streams a-ztcstr ads,Ada Wide_Wide_Text_IO Reset_Standard_Files a-zrstfi ads,Ada Wide_Wide_Characters Unicode a-zchuni ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id36}@anchor{30f}@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-c-streams-a-ztcstr-ads}@anchor{310}
+@anchor{gnat_rm/the_gnat_library id36}@anchor{310}@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-c-streams-a-ztcstr-ads}@anchor{311}
@section @code{Ada.Wide_Wide_Text_IO.C_Streams} (@code{a-ztcstr.ads})
@@ -23435,7 +23447,7 @@ extracted from a file opened on the Ada side, and an Ada file
can be constructed from a stream opened on the C side.
@node Ada Wide_Wide_Text_IO Reset_Standard_Files a-zrstfi ads,GNAT Altivec g-altive ads,Ada Wide_Wide_Text_IO C_Streams a-ztcstr ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id37}@anchor{311}@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-reset-standard-files-a-zrstfi-ads}@anchor{312}
+@anchor{gnat_rm/the_gnat_library id37}@anchor{312}@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-reset-standard-files-a-zrstfi-ads}@anchor{313}
@section @code{Ada.Wide_Wide_Text_IO.Reset_Standard_Files} (@code{a-zrstfi.ads})
@@ -23450,7 +23462,7 @@ change during execution (for example a standard input file may be
redefined to be interactive).
@node GNAT Altivec g-altive ads,GNAT Altivec Conversions g-altcon ads,Ada Wide_Wide_Text_IO Reset_Standard_Files a-zrstfi ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-altivec-g-altive-ads}@anchor{313}@anchor{gnat_rm/the_gnat_library id38}@anchor{314}
+@anchor{gnat_rm/the_gnat_library gnat-altivec-g-altive-ads}@anchor{314}@anchor{gnat_rm/the_gnat_library id38}@anchor{315}
@section @code{GNAT.Altivec} (@code{g-altive.ads})
@@ -23463,7 +23475,7 @@ definitions of constants and types common to all the versions of the
binding.
@node GNAT Altivec Conversions g-altcon ads,GNAT Altivec Vector_Operations g-alveop ads,GNAT Altivec g-altive ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-altivec-conversions-g-altcon-ads}@anchor{315}@anchor{gnat_rm/the_gnat_library id39}@anchor{316}
+@anchor{gnat_rm/the_gnat_library gnat-altivec-conversions-g-altcon-ads}@anchor{316}@anchor{gnat_rm/the_gnat_library id39}@anchor{317}
@section @code{GNAT.Altivec.Conversions} (@code{g-altcon.ads})
@@ -23474,7 +23486,7 @@ binding.
This package provides the Vector/View conversion routines.
@node GNAT Altivec Vector_Operations g-alveop ads,GNAT Altivec Vector_Types g-alvety ads,GNAT Altivec Conversions g-altcon ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-operations-g-alveop-ads}@anchor{317}@anchor{gnat_rm/the_gnat_library id40}@anchor{318}
+@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-operations-g-alveop-ads}@anchor{318}@anchor{gnat_rm/the_gnat_library id40}@anchor{319}
@section @code{GNAT.Altivec.Vector_Operations} (@code{g-alveop.ads})
@@ -23488,7 +23500,7 @@ library. The hard binding is provided as a separate package. This unit
is common to both bindings.
@node GNAT Altivec Vector_Types g-alvety ads,GNAT Altivec Vector_Views g-alvevi ads,GNAT Altivec Vector_Operations g-alveop ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-types-g-alvety-ads}@anchor{319}@anchor{gnat_rm/the_gnat_library id41}@anchor{31a}
+@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-types-g-alvety-ads}@anchor{31a}@anchor{gnat_rm/the_gnat_library id41}@anchor{31b}
@section @code{GNAT.Altivec.Vector_Types} (@code{g-alvety.ads})
@@ -23500,7 +23512,7 @@ This package exposes the various vector types part of the Ada binding
to AltiVec facilities.
@node GNAT Altivec Vector_Views g-alvevi ads,GNAT Array_Split g-arrspl ads,GNAT Altivec Vector_Types g-alvety ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-views-g-alvevi-ads}@anchor{31b}@anchor{gnat_rm/the_gnat_library id42}@anchor{31c}
+@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-views-g-alvevi-ads}@anchor{31c}@anchor{gnat_rm/the_gnat_library id42}@anchor{31d}
@section @code{GNAT.Altivec.Vector_Views} (@code{g-alvevi.ads})
@@ -23515,7 +23527,7 @@ vector elements and provides a simple way to initialize vector
objects.
@node GNAT Array_Split g-arrspl ads,GNAT AWK g-awk ads,GNAT Altivec Vector_Views g-alvevi ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-array-split-g-arrspl-ads}@anchor{31d}@anchor{gnat_rm/the_gnat_library id43}@anchor{31e}
+@anchor{gnat_rm/the_gnat_library gnat-array-split-g-arrspl-ads}@anchor{31e}@anchor{gnat_rm/the_gnat_library id43}@anchor{31f}
@section @code{GNAT.Array_Split} (@code{g-arrspl.ads})
@@ -23528,7 +23540,7 @@ an array wherever the separators appear, and provide direct access
to the resulting slices.
@node GNAT AWK g-awk ads,GNAT Bind_Environment g-binenv ads,GNAT Array_Split g-arrspl ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id44}@anchor{31f}@anchor{gnat_rm/the_gnat_library gnat-awk-g-awk-ads}@anchor{320}
+@anchor{gnat_rm/the_gnat_library id44}@anchor{320}@anchor{gnat_rm/the_gnat_library gnat-awk-g-awk-ads}@anchor{321}
@section @code{GNAT.AWK} (@code{g-awk.ads})
@@ -23543,7 +23555,7 @@ or more files containing formatted data. The file is viewed as a database
where each record is a line and a field is a data element in this line.
@node GNAT Bind_Environment g-binenv ads,GNAT Bounded_Buffers g-boubuf ads,GNAT AWK g-awk ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-bind-environment-g-binenv-ads}@anchor{321}@anchor{gnat_rm/the_gnat_library id45}@anchor{322}
+@anchor{gnat_rm/the_gnat_library gnat-bind-environment-g-binenv-ads}@anchor{322}@anchor{gnat_rm/the_gnat_library id45}@anchor{323}
@section @code{GNAT.Bind_Environment} (@code{g-binenv.ads})
@@ -23556,7 +23568,7 @@ These associations can be specified using the @code{-V} binder command
line switch.
@node GNAT Bounded_Buffers g-boubuf ads,GNAT Bounded_Mailboxes g-boumai ads,GNAT Bind_Environment g-binenv ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id46}@anchor{323}@anchor{gnat_rm/the_gnat_library gnat-bounded-buffers-g-boubuf-ads}@anchor{324}
+@anchor{gnat_rm/the_gnat_library id46}@anchor{324}@anchor{gnat_rm/the_gnat_library gnat-bounded-buffers-g-boubuf-ads}@anchor{325}
@section @code{GNAT.Bounded_Buffers} (@code{g-boubuf.ads})
@@ -23571,7 +23583,7 @@ useful directly or as parts of the implementations of other abstractions,
such as mailboxes.
@node GNAT Bounded_Mailboxes g-boumai ads,GNAT Bubble_Sort g-bubsor ads,GNAT Bounded_Buffers g-boubuf ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id47}@anchor{325}@anchor{gnat_rm/the_gnat_library gnat-bounded-mailboxes-g-boumai-ads}@anchor{326}
+@anchor{gnat_rm/the_gnat_library id47}@anchor{326}@anchor{gnat_rm/the_gnat_library gnat-bounded-mailboxes-g-boumai-ads}@anchor{327}
@section @code{GNAT.Bounded_Mailboxes} (@code{g-boumai.ads})
@@ -23584,7 +23596,7 @@ such as mailboxes.
Provides a thread-safe asynchronous intertask mailbox communication facility.
@node GNAT Bubble_Sort g-bubsor ads,GNAT Bubble_Sort_A g-busora ads,GNAT Bounded_Mailboxes g-boumai ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-bubsor-ads}@anchor{327}@anchor{gnat_rm/the_gnat_library id48}@anchor{328}
+@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-bubsor-ads}@anchor{328}@anchor{gnat_rm/the_gnat_library id48}@anchor{329}
@section @code{GNAT.Bubble_Sort} (@code{g-bubsor.ads})
@@ -23599,7 +23611,7 @@ data items. Exchange and comparison procedures are provided by passing
access-to-procedure values.
@node GNAT Bubble_Sort_A g-busora ads,GNAT Bubble_Sort_G g-busorg ads,GNAT Bubble_Sort g-bubsor ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id49}@anchor{329}@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-a-g-busora-ads}@anchor{32a}
+@anchor{gnat_rm/the_gnat_library id49}@anchor{32a}@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-a-g-busora-ads}@anchor{32b}
@section @code{GNAT.Bubble_Sort_A} (@code{g-busora.ads})
@@ -23615,7 +23627,7 @@ access-to-procedure values. This is an older version, retained for
compatibility. Usually @code{GNAT.Bubble_Sort} will be preferable.
@node GNAT Bubble_Sort_G g-busorg ads,GNAT Byte_Order_Mark g-byorma ads,GNAT Bubble_Sort_A g-busora ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-g-busorg-ads}@anchor{32b}@anchor{gnat_rm/the_gnat_library id50}@anchor{32c}
+@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-g-busorg-ads}@anchor{32c}@anchor{gnat_rm/the_gnat_library id50}@anchor{32d}
@section @code{GNAT.Bubble_Sort_G} (@code{g-busorg.ads})
@@ -23631,7 +23643,7 @@ if the procedures can be inlined, at the expense of duplicating code for
multiple instantiations.
@node GNAT Byte_Order_Mark g-byorma ads,GNAT Byte_Swapping g-bytswa ads,GNAT Bubble_Sort_G g-busorg ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-byte-order-mark-g-byorma-ads}@anchor{32d}@anchor{gnat_rm/the_gnat_library id51}@anchor{32e}
+@anchor{gnat_rm/the_gnat_library gnat-byte-order-mark-g-byorma-ads}@anchor{32e}@anchor{gnat_rm/the_gnat_library id51}@anchor{32f}
@section @code{GNAT.Byte_Order_Mark} (@code{g-byorma.ads})
@@ -23647,7 +23659,7 @@ the encoding of the string. The routine includes detection of special XML
sequences for various UCS input formats.
@node GNAT Byte_Swapping g-bytswa ads,GNAT Calendar g-calend ads,GNAT Byte_Order_Mark g-byorma ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-byte-swapping-g-bytswa-ads}@anchor{32f}@anchor{gnat_rm/the_gnat_library id52}@anchor{330}
+@anchor{gnat_rm/the_gnat_library gnat-byte-swapping-g-bytswa-ads}@anchor{330}@anchor{gnat_rm/the_gnat_library id52}@anchor{331}
@section @code{GNAT.Byte_Swapping} (@code{g-bytswa.ads})
@@ -23661,7 +23673,7 @@ General routines for swapping the bytes in 2-, 4-, and 8-byte quantities.
Machine-specific implementations are available in some cases.
@node GNAT Calendar g-calend ads,GNAT Calendar Time_IO g-catiio ads,GNAT Byte_Swapping g-bytswa ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-calendar-g-calend-ads}@anchor{331}@anchor{gnat_rm/the_gnat_library id53}@anchor{332}
+@anchor{gnat_rm/the_gnat_library gnat-calendar-g-calend-ads}@anchor{332}@anchor{gnat_rm/the_gnat_library id53}@anchor{333}
@section @code{GNAT.Calendar} (@code{g-calend.ads})
@@ -23675,7 +23687,7 @@ Also provides conversion of @code{Ada.Calendar.Time} values to and from the
C @code{timeval} format.
@node GNAT Calendar Time_IO g-catiio ads,GNAT CRC32 g-crc32 ads,GNAT Calendar g-calend ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id54}@anchor{333}@anchor{gnat_rm/the_gnat_library gnat-calendar-time-io-g-catiio-ads}@anchor{334}
+@anchor{gnat_rm/the_gnat_library id54}@anchor{334}@anchor{gnat_rm/the_gnat_library gnat-calendar-time-io-g-catiio-ads}@anchor{335}
@section @code{GNAT.Calendar.Time_IO} (@code{g-catiio.ads})
@@ -23686,7 +23698,7 @@ C @code{timeval} format.
@geindex GNAT.Calendar.Time_IO (g-catiio.ads)
@node GNAT CRC32 g-crc32 ads,GNAT Case_Util g-casuti ads,GNAT Calendar Time_IO g-catiio ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id55}@anchor{335}@anchor{gnat_rm/the_gnat_library gnat-crc32-g-crc32-ads}@anchor{336}
+@anchor{gnat_rm/the_gnat_library id55}@anchor{336}@anchor{gnat_rm/the_gnat_library gnat-crc32-g-crc32-ads}@anchor{337}
@section @code{GNAT.CRC32} (@code{g-crc32.ads})
@@ -23703,7 +23715,7 @@ of this algorithm see
Aug. 1988. Sarwate, D.V.
@node GNAT Case_Util g-casuti ads,GNAT CGI g-cgi ads,GNAT CRC32 g-crc32 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id56}@anchor{337}@anchor{gnat_rm/the_gnat_library gnat-case-util-g-casuti-ads}@anchor{338}
+@anchor{gnat_rm/the_gnat_library id56}@anchor{338}@anchor{gnat_rm/the_gnat_library gnat-case-util-g-casuti-ads}@anchor{339}
@section @code{GNAT.Case_Util} (@code{g-casuti.ads})
@@ -23718,7 +23730,7 @@ without the overhead of the full casing tables
in @code{Ada.Characters.Handling}.
@node GNAT CGI g-cgi ads,GNAT CGI Cookie g-cgicoo ads,GNAT Case_Util g-casuti ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id57}@anchor{339}@anchor{gnat_rm/the_gnat_library gnat-cgi-g-cgi-ads}@anchor{33a}
+@anchor{gnat_rm/the_gnat_library id57}@anchor{33a}@anchor{gnat_rm/the_gnat_library gnat-cgi-g-cgi-ads}@anchor{33b}
@section @code{GNAT.CGI} (@code{g-cgi.ads})
@@ -23733,7 +23745,7 @@ builds a table whose index is the key and provides some services to deal
with this table.
@node GNAT CGI Cookie g-cgicoo ads,GNAT CGI Debug g-cgideb ads,GNAT CGI g-cgi ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-cgi-cookie-g-cgicoo-ads}@anchor{33b}@anchor{gnat_rm/the_gnat_library id58}@anchor{33c}
+@anchor{gnat_rm/the_gnat_library gnat-cgi-cookie-g-cgicoo-ads}@anchor{33c}@anchor{gnat_rm/the_gnat_library id58}@anchor{33d}
@section @code{GNAT.CGI.Cookie} (@code{g-cgicoo.ads})
@@ -23748,7 +23760,7 @@ Common Gateway Interface (CGI). It exports services to deal with Web
cookies (piece of information kept in the Web client software).
@node GNAT CGI Debug g-cgideb ads,GNAT Command_Line g-comlin ads,GNAT CGI Cookie g-cgicoo ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-cgi-debug-g-cgideb-ads}@anchor{33d}@anchor{gnat_rm/the_gnat_library id59}@anchor{33e}
+@anchor{gnat_rm/the_gnat_library gnat-cgi-debug-g-cgideb-ads}@anchor{33e}@anchor{gnat_rm/the_gnat_library id59}@anchor{33f}
@section @code{GNAT.CGI.Debug} (@code{g-cgideb.ads})
@@ -23760,7 +23772,7 @@ This is a package to help debugging CGI (Common Gateway Interface)
programs written in Ada.
@node GNAT Command_Line g-comlin ads,GNAT Compiler_Version g-comver ads,GNAT CGI Debug g-cgideb ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id60}@anchor{33f}@anchor{gnat_rm/the_gnat_library gnat-command-line-g-comlin-ads}@anchor{340}
+@anchor{gnat_rm/the_gnat_library id60}@anchor{340}@anchor{gnat_rm/the_gnat_library gnat-command-line-g-comlin-ads}@anchor{341}
@section @code{GNAT.Command_Line} (@code{g-comlin.ads})
@@ -23773,7 +23785,7 @@ including the ability to scan for named switches with optional parameters
and expand file names using wild card notations.
@node GNAT Compiler_Version g-comver ads,GNAT Ctrl_C g-ctrl_c ads,GNAT Command_Line g-comlin ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-compiler-version-g-comver-ads}@anchor{341}@anchor{gnat_rm/the_gnat_library id61}@anchor{342}
+@anchor{gnat_rm/the_gnat_library gnat-compiler-version-g-comver-ads}@anchor{342}@anchor{gnat_rm/the_gnat_library id61}@anchor{343}
@section @code{GNAT.Compiler_Version} (@code{g-comver.ads})
@@ -23791,7 +23803,7 @@ of the compiler if a consistent tool set is used to compile all units
of a partition).
@node GNAT Ctrl_C g-ctrl_c ads,GNAT Current_Exception g-curexc ads,GNAT Compiler_Version g-comver ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-ctrl-c-g-ctrl-c-ads}@anchor{343}@anchor{gnat_rm/the_gnat_library id62}@anchor{344}
+@anchor{gnat_rm/the_gnat_library gnat-ctrl-c-g-ctrl-c-ads}@anchor{344}@anchor{gnat_rm/the_gnat_library id62}@anchor{345}
@section @code{GNAT.Ctrl_C} (@code{g-ctrl_c.ads})
@@ -23802,7 +23814,7 @@ of a partition).
Provides a simple interface to handle Ctrl-C keyboard events.
@node GNAT Current_Exception g-curexc ads,GNAT Debug_Pools g-debpoo ads,GNAT Ctrl_C g-ctrl_c ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id63}@anchor{345}@anchor{gnat_rm/the_gnat_library gnat-current-exception-g-curexc-ads}@anchor{346}
+@anchor{gnat_rm/the_gnat_library id63}@anchor{346}@anchor{gnat_rm/the_gnat_library gnat-current-exception-g-curexc-ads}@anchor{347}
@section @code{GNAT.Current_Exception} (@code{g-curexc.ads})
@@ -23819,7 +23831,7 @@ This is particularly useful in simulating typical facilities for
obtaining information about exceptions provided by Ada 83 compilers.
@node GNAT Debug_Pools g-debpoo ads,GNAT Debug_Utilities g-debuti ads,GNAT Current_Exception g-curexc ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-debug-pools-g-debpoo-ads}@anchor{347}@anchor{gnat_rm/the_gnat_library id64}@anchor{348}
+@anchor{gnat_rm/the_gnat_library gnat-debug-pools-g-debpoo-ads}@anchor{348}@anchor{gnat_rm/the_gnat_library id64}@anchor{349}
@section @code{GNAT.Debug_Pools} (@code{g-debpoo.ads})
@@ -23836,7 +23848,7 @@ problems.
See @code{The GNAT Debug_Pool Facility} section in the @cite{GNAT User's Guide}.
@node GNAT Debug_Utilities g-debuti ads,GNAT Decode_String g-decstr ads,GNAT Debug_Pools g-debpoo ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id65}@anchor{349}@anchor{gnat_rm/the_gnat_library gnat-debug-utilities-g-debuti-ads}@anchor{34a}
+@anchor{gnat_rm/the_gnat_library id65}@anchor{34a}@anchor{gnat_rm/the_gnat_library gnat-debug-utilities-g-debuti-ads}@anchor{34b}
@section @code{GNAT.Debug_Utilities} (@code{g-debuti.ads})
@@ -23849,7 +23861,7 @@ to and from string images of address values. Supports both C and Ada formats
for hexadecimal literals.
@node GNAT Decode_String g-decstr ads,GNAT Decode_UTF8_String g-deutst ads,GNAT Debug_Utilities g-debuti ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-decode-string-g-decstr-ads}@anchor{34b}@anchor{gnat_rm/the_gnat_library id66}@anchor{34c}
+@anchor{gnat_rm/the_gnat_library gnat-decode-string-g-decstr-ads}@anchor{34c}@anchor{gnat_rm/the_gnat_library id66}@anchor{34d}
@section @code{GNAT.Decode_String} (@code{g-decstr.ads})
@@ -23873,7 +23885,7 @@ Useful in conjunction with Unicode character coding. Note there is a
preinstantiation for UTF-8. See next entry.
@node GNAT Decode_UTF8_String g-deutst ads,GNAT Directory_Operations g-dirope ads,GNAT Decode_String g-decstr ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-decode-utf8-string-g-deutst-ads}@anchor{34d}@anchor{gnat_rm/the_gnat_library id67}@anchor{34e}
+@anchor{gnat_rm/the_gnat_library gnat-decode-utf8-string-g-deutst-ads}@anchor{34e}@anchor{gnat_rm/the_gnat_library id67}@anchor{34f}
@section @code{GNAT.Decode_UTF8_String} (@code{g-deutst.ads})
@@ -23894,7 +23906,7 @@ preinstantiation for UTF-8. See next entry.
A preinstantiation of GNAT.Decode_Strings for UTF-8 encoding.
@node GNAT Directory_Operations g-dirope ads,GNAT Directory_Operations Iteration g-diopit ads,GNAT Decode_UTF8_String g-deutst ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-directory-operations-g-dirope-ads}@anchor{34f}@anchor{gnat_rm/the_gnat_library id68}@anchor{350}
+@anchor{gnat_rm/the_gnat_library gnat-directory-operations-g-dirope-ads}@anchor{350}@anchor{gnat_rm/the_gnat_library id68}@anchor{351}
@section @code{GNAT.Directory_Operations} (@code{g-dirope.ads})
@@ -23907,7 +23919,7 @@ the current directory, making new directories, and scanning the files in a
directory.
@node GNAT Directory_Operations Iteration g-diopit ads,GNAT Dynamic_HTables g-dynhta ads,GNAT Directory_Operations g-dirope ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id69}@anchor{351}@anchor{gnat_rm/the_gnat_library gnat-directory-operations-iteration-g-diopit-ads}@anchor{352}
+@anchor{gnat_rm/the_gnat_library id69}@anchor{352}@anchor{gnat_rm/the_gnat_library gnat-directory-operations-iteration-g-diopit-ads}@anchor{353}
@section @code{GNAT.Directory_Operations.Iteration} (@code{g-diopit.ads})
@@ -23919,7 +23931,7 @@ A child unit of GNAT.Directory_Operations providing additional operations
for iterating through directories.
@node GNAT Dynamic_HTables g-dynhta ads,GNAT Dynamic_Tables g-dyntab ads,GNAT Directory_Operations Iteration g-diopit ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id70}@anchor{353}@anchor{gnat_rm/the_gnat_library gnat-dynamic-htables-g-dynhta-ads}@anchor{354}
+@anchor{gnat_rm/the_gnat_library id70}@anchor{354}@anchor{gnat_rm/the_gnat_library gnat-dynamic-htables-g-dynhta-ads}@anchor{355}
@section @code{GNAT.Dynamic_HTables} (@code{g-dynhta.ads})
@@ -23937,7 +23949,7 @@ dynamic instances of the hash table, while an instantiation of
@code{GNAT.HTable} creates a single instance of the hash table.
@node GNAT Dynamic_Tables g-dyntab ads,GNAT Encode_String g-encstr ads,GNAT Dynamic_HTables g-dynhta ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-dynamic-tables-g-dyntab-ads}@anchor{355}@anchor{gnat_rm/the_gnat_library id71}@anchor{356}
+@anchor{gnat_rm/the_gnat_library gnat-dynamic-tables-g-dyntab-ads}@anchor{356}@anchor{gnat_rm/the_gnat_library id71}@anchor{357}
@section @code{GNAT.Dynamic_Tables} (@code{g-dyntab.ads})
@@ -23957,7 +23969,7 @@ dynamic instances of the table, while an instantiation of
@code{GNAT.Table} creates a single instance of the table type.
@node GNAT Encode_String g-encstr ads,GNAT Encode_UTF8_String g-enutst ads,GNAT Dynamic_Tables g-dyntab ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id72}@anchor{357}@anchor{gnat_rm/the_gnat_library gnat-encode-string-g-encstr-ads}@anchor{358}
+@anchor{gnat_rm/the_gnat_library id72}@anchor{358}@anchor{gnat_rm/the_gnat_library gnat-encode-string-g-encstr-ads}@anchor{359}
@section @code{GNAT.Encode_String} (@code{g-encstr.ads})
@@ -23979,7 +23991,7 @@ encoding method. Useful in conjunction with Unicode character coding.
Note there is a preinstantiation for UTF-8. See next entry.
@node GNAT Encode_UTF8_String g-enutst ads,GNAT Exception_Actions g-excact ads,GNAT Encode_String g-encstr ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-encode-utf8-string-g-enutst-ads}@anchor{359}@anchor{gnat_rm/the_gnat_library id73}@anchor{35a}
+@anchor{gnat_rm/the_gnat_library gnat-encode-utf8-string-g-enutst-ads}@anchor{35a}@anchor{gnat_rm/the_gnat_library id73}@anchor{35b}
@section @code{GNAT.Encode_UTF8_String} (@code{g-enutst.ads})
@@ -24000,7 +24012,7 @@ Note there is a preinstantiation for UTF-8. See next entry.
A preinstantiation of GNAT.Encode_Strings for UTF-8 encoding.
@node GNAT Exception_Actions g-excact ads,GNAT Exception_Traces g-exctra ads,GNAT Encode_UTF8_String g-enutst ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-exception-actions-g-excact-ads}@anchor{35b}@anchor{gnat_rm/the_gnat_library id74}@anchor{35c}
+@anchor{gnat_rm/the_gnat_library gnat-exception-actions-g-excact-ads}@anchor{35c}@anchor{gnat_rm/the_gnat_library id74}@anchor{35d}
@section @code{GNAT.Exception_Actions} (@code{g-excact.ads})
@@ -24013,7 +24025,7 @@ for specific exceptions, or when any exception is raised. This
can be used for instance to force a core dump to ease debugging.
@node GNAT Exception_Traces g-exctra ads,GNAT Exceptions g-expect ads,GNAT Exception_Actions g-excact ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-exception-traces-g-exctra-ads}@anchor{35d}@anchor{gnat_rm/the_gnat_library id75}@anchor{35e}
+@anchor{gnat_rm/the_gnat_library gnat-exception-traces-g-exctra-ads}@anchor{35e}@anchor{gnat_rm/the_gnat_library id75}@anchor{35f}
@section @code{GNAT.Exception_Traces} (@code{g-exctra.ads})
@@ -24027,7 +24039,7 @@ Provides an interface allowing to control automatic output upon exception
occurrences.
@node GNAT Exceptions g-expect ads,GNAT Expect g-expect ads,GNAT Exception_Traces g-exctra ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id76}@anchor{35f}@anchor{gnat_rm/the_gnat_library gnat-exceptions-g-expect-ads}@anchor{360}
+@anchor{gnat_rm/the_gnat_library id76}@anchor{360}@anchor{gnat_rm/the_gnat_library gnat-exceptions-g-expect-ads}@anchor{361}
@section @code{GNAT.Exceptions} (@code{g-expect.ads})
@@ -24048,7 +24060,7 @@ predefined exceptions, and for example allow raising
@code{Constraint_Error} with a message from a pure subprogram.
@node GNAT Expect g-expect ads,GNAT Expect TTY g-exptty ads,GNAT Exceptions g-expect ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-expect-g-expect-ads}@anchor{361}@anchor{gnat_rm/the_gnat_library id77}@anchor{362}
+@anchor{gnat_rm/the_gnat_library gnat-expect-g-expect-ads}@anchor{362}@anchor{gnat_rm/the_gnat_library id77}@anchor{363}
@section @code{GNAT.Expect} (@code{g-expect.ads})
@@ -24064,7 +24076,7 @@ It is not implemented for cross ports, and in particular is not
implemented for VxWorks or LynxOS.
@node GNAT Expect TTY g-exptty ads,GNAT Float_Control g-flocon ads,GNAT Expect g-expect ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id78}@anchor{363}@anchor{gnat_rm/the_gnat_library gnat-expect-tty-g-exptty-ads}@anchor{364}
+@anchor{gnat_rm/the_gnat_library id78}@anchor{364}@anchor{gnat_rm/the_gnat_library gnat-expect-tty-g-exptty-ads}@anchor{365}
@section @code{GNAT.Expect.TTY} (@code{g-exptty.ads})
@@ -24076,7 +24088,7 @@ ports. It is not implemented for cross ports, and
in particular is not implemented for VxWorks or LynxOS.
@node GNAT Float_Control g-flocon ads,GNAT Formatted_String g-forstr ads,GNAT Expect TTY g-exptty ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id79}@anchor{365}@anchor{gnat_rm/the_gnat_library gnat-float-control-g-flocon-ads}@anchor{366}
+@anchor{gnat_rm/the_gnat_library id79}@anchor{366}@anchor{gnat_rm/the_gnat_library gnat-float-control-g-flocon-ads}@anchor{367}
@section @code{GNAT.Float_Control} (@code{g-flocon.ads})
@@ -24090,7 +24102,7 @@ library calls may cause this mode to be modified, and the Reset procedure
in this package can be used to reestablish the required mode.
@node GNAT Formatted_String g-forstr ads,GNAT Heap_Sort g-heasor ads,GNAT Float_Control g-flocon ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id80}@anchor{367}@anchor{gnat_rm/the_gnat_library gnat-formatted-string-g-forstr-ads}@anchor{368}
+@anchor{gnat_rm/the_gnat_library id80}@anchor{368}@anchor{gnat_rm/the_gnat_library gnat-formatted-string-g-forstr-ads}@anchor{369}
@section @code{GNAT.Formatted_String} (@code{g-forstr.ads})
@@ -24105,7 +24117,7 @@ derived from Integer, Float or enumerations as values for the
formatted string.
@node GNAT Heap_Sort g-heasor ads,GNAT Heap_Sort_A g-hesora ads,GNAT Formatted_String g-forstr ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-heasor-ads}@anchor{369}@anchor{gnat_rm/the_gnat_library id81}@anchor{36a}
+@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-heasor-ads}@anchor{36a}@anchor{gnat_rm/the_gnat_library id81}@anchor{36b}
@section @code{GNAT.Heap_Sort} (@code{g-heasor.ads})
@@ -24119,7 +24131,7 @@ access-to-procedure values. The algorithm used is a modified heap sort
that performs approximately N*log(N) comparisons in the worst case.
@node GNAT Heap_Sort_A g-hesora ads,GNAT Heap_Sort_G g-hesorg ads,GNAT Heap_Sort g-heasor ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id82}@anchor{36b}@anchor{gnat_rm/the_gnat_library gnat-heap-sort-a-g-hesora-ads}@anchor{36c}
+@anchor{gnat_rm/the_gnat_library id82}@anchor{36c}@anchor{gnat_rm/the_gnat_library gnat-heap-sort-a-g-hesora-ads}@anchor{36d}
@section @code{GNAT.Heap_Sort_A} (@code{g-hesora.ads})
@@ -24135,7 +24147,7 @@ This differs from @code{GNAT.Heap_Sort} in having a less convenient
interface, but may be slightly more efficient.
@node GNAT Heap_Sort_G g-hesorg ads,GNAT HTable g-htable ads,GNAT Heap_Sort_A g-hesora ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id83}@anchor{36d}@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-g-hesorg-ads}@anchor{36e}
+@anchor{gnat_rm/the_gnat_library id83}@anchor{36e}@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-g-hesorg-ads}@anchor{36f}
@section @code{GNAT.Heap_Sort_G} (@code{g-hesorg.ads})
@@ -24149,7 +24161,7 @@ if the procedures can be inlined, at the expense of duplicating code for
multiple instantiations.
@node GNAT HTable g-htable ads,GNAT IO g-io ads,GNAT Heap_Sort_G g-hesorg ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id84}@anchor{36f}@anchor{gnat_rm/the_gnat_library gnat-htable-g-htable-ads}@anchor{370}
+@anchor{gnat_rm/the_gnat_library id84}@anchor{370}@anchor{gnat_rm/the_gnat_library gnat-htable-g-htable-ads}@anchor{371}
@section @code{GNAT.HTable} (@code{g-htable.ads})
@@ -24162,7 +24174,7 @@ data. Provides two approaches, one a simple static approach, and the other
allowing arbitrary dynamic hash tables.
@node GNAT IO g-io ads,GNAT IO_Aux g-io_aux ads,GNAT HTable g-htable ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id85}@anchor{371}@anchor{gnat_rm/the_gnat_library gnat-io-g-io-ads}@anchor{372}
+@anchor{gnat_rm/the_gnat_library id85}@anchor{372}@anchor{gnat_rm/the_gnat_library gnat-io-g-io-ads}@anchor{373}
@section @code{GNAT.IO} (@code{g-io.ads})
@@ -24178,7 +24190,7 @@ Standard_Input, and writing characters, strings and integers to either
Standard_Output or Standard_Error.
@node GNAT IO_Aux g-io_aux ads,GNAT Lock_Files g-locfil ads,GNAT IO g-io ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-io-aux-g-io-aux-ads}@anchor{373}@anchor{gnat_rm/the_gnat_library id86}@anchor{374}
+@anchor{gnat_rm/the_gnat_library gnat-io-aux-g-io-aux-ads}@anchor{374}@anchor{gnat_rm/the_gnat_library id86}@anchor{375}
@section @code{GNAT.IO_Aux} (@code{g-io_aux.ads})
@@ -24192,7 +24204,7 @@ Provides some auxiliary functions for use with Text_IO, including a test
for whether a file exists, and functions for reading a line of text.
@node GNAT Lock_Files g-locfil ads,GNAT MBBS_Discrete_Random g-mbdira ads,GNAT IO_Aux g-io_aux ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id87}@anchor{375}@anchor{gnat_rm/the_gnat_library gnat-lock-files-g-locfil-ads}@anchor{376}
+@anchor{gnat_rm/the_gnat_library id87}@anchor{376}@anchor{gnat_rm/the_gnat_library gnat-lock-files-g-locfil-ads}@anchor{377}
@section @code{GNAT.Lock_Files} (@code{g-locfil.ads})
@@ -24206,7 +24218,7 @@ Provides a general interface for using files as locks. Can be used for
providing program level synchronization.
@node GNAT MBBS_Discrete_Random g-mbdira ads,GNAT MBBS_Float_Random g-mbflra ads,GNAT Lock_Files g-locfil ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id88}@anchor{377}@anchor{gnat_rm/the_gnat_library gnat-mbbs-discrete-random-g-mbdira-ads}@anchor{378}
+@anchor{gnat_rm/the_gnat_library id88}@anchor{378}@anchor{gnat_rm/the_gnat_library gnat-mbbs-discrete-random-g-mbdira-ads}@anchor{379}
@section @code{GNAT.MBBS_Discrete_Random} (@code{g-mbdira.ads})
@@ -24218,7 +24230,7 @@ The original implementation of @code{Ada.Numerics.Discrete_Random}. Uses
a modified version of the Blum-Blum-Shub generator.
@node GNAT MBBS_Float_Random g-mbflra ads,GNAT MD5 g-md5 ads,GNAT MBBS_Discrete_Random g-mbdira ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id89}@anchor{379}@anchor{gnat_rm/the_gnat_library gnat-mbbs-float-random-g-mbflra-ads}@anchor{37a}
+@anchor{gnat_rm/the_gnat_library id89}@anchor{37a}@anchor{gnat_rm/the_gnat_library gnat-mbbs-float-random-g-mbflra-ads}@anchor{37b}
@section @code{GNAT.MBBS_Float_Random} (@code{g-mbflra.ads})
@@ -24230,7 +24242,7 @@ The original implementation of @code{Ada.Numerics.Float_Random}. Uses
a modified version of the Blum-Blum-Shub generator.
@node GNAT MD5 g-md5 ads,GNAT Memory_Dump g-memdum ads,GNAT MBBS_Float_Random g-mbflra ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id90}@anchor{37b}@anchor{gnat_rm/the_gnat_library gnat-md5-g-md5-ads}@anchor{37c}
+@anchor{gnat_rm/the_gnat_library id90}@anchor{37c}@anchor{gnat_rm/the_gnat_library gnat-md5-g-md5-ads}@anchor{37d}
@section @code{GNAT.MD5} (@code{g-md5.ads})
@@ -24243,7 +24255,7 @@ the HMAC-MD5 message authentication function as described in RFC 2104 and
FIPS PUB 198.
@node GNAT Memory_Dump g-memdum ads,GNAT Most_Recent_Exception g-moreex ads,GNAT MD5 g-md5 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id91}@anchor{37d}@anchor{gnat_rm/the_gnat_library gnat-memory-dump-g-memdum-ads}@anchor{37e}
+@anchor{gnat_rm/the_gnat_library id91}@anchor{37e}@anchor{gnat_rm/the_gnat_library gnat-memory-dump-g-memdum-ads}@anchor{37f}
@section @code{GNAT.Memory_Dump} (@code{g-memdum.ads})
@@ -24256,7 +24268,7 @@ standard output or standard error files. Uses GNAT.IO for actual
output.
@node GNAT Most_Recent_Exception g-moreex ads,GNAT OS_Lib g-os_lib ads,GNAT Memory_Dump g-memdum ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id92}@anchor{37f}@anchor{gnat_rm/the_gnat_library gnat-most-recent-exception-g-moreex-ads}@anchor{380}
+@anchor{gnat_rm/the_gnat_library id92}@anchor{380}@anchor{gnat_rm/the_gnat_library gnat-most-recent-exception-g-moreex-ads}@anchor{381}
@section @code{GNAT.Most_Recent_Exception} (@code{g-moreex.ads})
@@ -24270,7 +24282,7 @@ various logging purposes, including duplicating functionality of some
Ada 83 implementation dependent extensions.
@node GNAT OS_Lib g-os_lib ads,GNAT Perfect_Hash_Generators g-pehage ads,GNAT Most_Recent_Exception g-moreex ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-os-lib-g-os-lib-ads}@anchor{381}@anchor{gnat_rm/the_gnat_library id93}@anchor{382}
+@anchor{gnat_rm/the_gnat_library gnat-os-lib-g-os-lib-ads}@anchor{382}@anchor{gnat_rm/the_gnat_library id93}@anchor{383}
@section @code{GNAT.OS_Lib} (@code{g-os_lib.ads})
@@ -24286,7 +24298,7 @@ including a portable spawn procedure, and access to environment variables
and error return codes.
@node GNAT Perfect_Hash_Generators g-pehage ads,GNAT Random_Numbers g-rannum ads,GNAT OS_Lib g-os_lib ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-perfect-hash-generators-g-pehage-ads}@anchor{383}@anchor{gnat_rm/the_gnat_library id94}@anchor{384}
+@anchor{gnat_rm/the_gnat_library gnat-perfect-hash-generators-g-pehage-ads}@anchor{384}@anchor{gnat_rm/the_gnat_library id94}@anchor{385}
@section @code{GNAT.Perfect_Hash_Generators} (@code{g-pehage.ads})
@@ -24304,7 +24316,7 @@ hashcode are in the same order. These hashing functions are very
convenient for use with realtime applications.
@node GNAT Random_Numbers g-rannum ads,GNAT Regexp g-regexp ads,GNAT Perfect_Hash_Generators g-pehage ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-random-numbers-g-rannum-ads}@anchor{385}@anchor{gnat_rm/the_gnat_library id95}@anchor{386}
+@anchor{gnat_rm/the_gnat_library gnat-random-numbers-g-rannum-ads}@anchor{386}@anchor{gnat_rm/the_gnat_library id95}@anchor{387}
@section @code{GNAT.Random_Numbers} (@code{g-rannum.ads})
@@ -24316,7 +24328,7 @@ Provides random number capabilities which extend those available in the
standard Ada library and are more convenient to use.
@node GNAT Regexp g-regexp ads,GNAT Registry g-regist ads,GNAT Random_Numbers g-rannum ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-regexp-g-regexp-ads}@anchor{250}@anchor{gnat_rm/the_gnat_library id96}@anchor{387}
+@anchor{gnat_rm/the_gnat_library gnat-regexp-g-regexp-ads}@anchor{251}@anchor{gnat_rm/the_gnat_library id96}@anchor{388}
@section @code{GNAT.Regexp} (@code{g-regexp.ads})
@@ -24332,7 +24344,7 @@ simplest of the three pattern matching packages provided, and is particularly
suitable for 'file globbing' applications.
@node GNAT Registry g-regist ads,GNAT Regpat g-regpat ads,GNAT Regexp g-regexp ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-registry-g-regist-ads}@anchor{388}@anchor{gnat_rm/the_gnat_library id97}@anchor{389}
+@anchor{gnat_rm/the_gnat_library gnat-registry-g-regist-ads}@anchor{389}@anchor{gnat_rm/the_gnat_library id97}@anchor{38a}
@section @code{GNAT.Registry} (@code{g-regist.ads})
@@ -24346,7 +24358,7 @@ registry API, but at a lower level of abstraction, refer to the Win32.Winreg
package provided with the Win32Ada binding
@node GNAT Regpat g-regpat ads,GNAT Rewrite_Data g-rewdat ads,GNAT Registry g-regist ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id98}@anchor{38a}@anchor{gnat_rm/the_gnat_library gnat-regpat-g-regpat-ads}@anchor{38b}
+@anchor{gnat_rm/the_gnat_library id98}@anchor{38b}@anchor{gnat_rm/the_gnat_library gnat-regpat-g-regpat-ads}@anchor{38c}
@section @code{GNAT.Regpat} (@code{g-regpat.ads})
@@ -24361,7 +24373,7 @@ from the original V7 style regular expression library written in C by
Henry Spencer (and binary compatible with this C library).
@node GNAT Rewrite_Data g-rewdat ads,GNAT Secondary_Stack_Info g-sestin ads,GNAT Regpat g-regpat ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id99}@anchor{38c}@anchor{gnat_rm/the_gnat_library gnat-rewrite-data-g-rewdat-ads}@anchor{38d}
+@anchor{gnat_rm/the_gnat_library id99}@anchor{38d}@anchor{gnat_rm/the_gnat_library gnat-rewrite-data-g-rewdat-ads}@anchor{38e}
@section @code{GNAT.Rewrite_Data} (@code{g-rewdat.ads})
@@ -24375,7 +24387,7 @@ full content to be processed is not loaded into memory all at once. This makes
this interface usable for large files or socket streams.
@node GNAT Secondary_Stack_Info g-sestin ads,GNAT Semaphores g-semaph ads,GNAT Rewrite_Data g-rewdat ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id100}@anchor{38e}@anchor{gnat_rm/the_gnat_library gnat-secondary-stack-info-g-sestin-ads}@anchor{38f}
+@anchor{gnat_rm/the_gnat_library id100}@anchor{38f}@anchor{gnat_rm/the_gnat_library gnat-secondary-stack-info-g-sestin-ads}@anchor{390}
@section @code{GNAT.Secondary_Stack_Info} (@code{g-sestin.ads})
@@ -24387,7 +24399,7 @@ Provide the capability to query the high water mark of the current task's
secondary stack.
@node GNAT Semaphores g-semaph ads,GNAT Serial_Communications g-sercom ads,GNAT Secondary_Stack_Info g-sestin ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id101}@anchor{390}@anchor{gnat_rm/the_gnat_library gnat-semaphores-g-semaph-ads}@anchor{391}
+@anchor{gnat_rm/the_gnat_library id101}@anchor{391}@anchor{gnat_rm/the_gnat_library gnat-semaphores-g-semaph-ads}@anchor{392}
@section @code{GNAT.Semaphores} (@code{g-semaph.ads})
@@ -24398,7 +24410,7 @@ secondary stack.
Provides classic counting and binary semaphores using protected types.
@node GNAT Serial_Communications g-sercom ads,GNAT SHA1 g-sha1 ads,GNAT Semaphores g-semaph ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-serial-communications-g-sercom-ads}@anchor{392}@anchor{gnat_rm/the_gnat_library id102}@anchor{393}
+@anchor{gnat_rm/the_gnat_library gnat-serial-communications-g-sercom-ads}@anchor{393}@anchor{gnat_rm/the_gnat_library id102}@anchor{394}
@section @code{GNAT.Serial_Communications} (@code{g-sercom.ads})
@@ -24410,7 +24422,7 @@ Provides a simple interface to send and receive data over a serial
port. This is only supported on GNU/Linux and Windows.
@node GNAT SHA1 g-sha1 ads,GNAT SHA224 g-sha224 ads,GNAT Serial_Communications g-sercom ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-sha1-g-sha1-ads}@anchor{394}@anchor{gnat_rm/the_gnat_library id103}@anchor{395}
+@anchor{gnat_rm/the_gnat_library gnat-sha1-g-sha1-ads}@anchor{395}@anchor{gnat_rm/the_gnat_library id103}@anchor{396}
@section @code{GNAT.SHA1} (@code{g-sha1.ads})
@@ -24423,7 +24435,7 @@ and RFC 3174, and the HMAC-SHA1 message authentication function as described
in RFC 2104 and FIPS PUB 198.
@node GNAT SHA224 g-sha224 ads,GNAT SHA256 g-sha256 ads,GNAT SHA1 g-sha1 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-sha224-g-sha224-ads}@anchor{396}@anchor{gnat_rm/the_gnat_library id104}@anchor{397}
+@anchor{gnat_rm/the_gnat_library gnat-sha224-g-sha224-ads}@anchor{397}@anchor{gnat_rm/the_gnat_library id104}@anchor{398}
@section @code{GNAT.SHA224} (@code{g-sha224.ads})
@@ -24436,7 +24448,7 @@ and the HMAC-SHA224 message authentication function as described
in RFC 2104 and FIPS PUB 198.
@node GNAT SHA256 g-sha256 ads,GNAT SHA384 g-sha384 ads,GNAT SHA224 g-sha224 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id105}@anchor{398}@anchor{gnat_rm/the_gnat_library gnat-sha256-g-sha256-ads}@anchor{399}
+@anchor{gnat_rm/the_gnat_library id105}@anchor{399}@anchor{gnat_rm/the_gnat_library gnat-sha256-g-sha256-ads}@anchor{39a}
@section @code{GNAT.SHA256} (@code{g-sha256.ads})
@@ -24449,7 +24461,7 @@ and the HMAC-SHA256 message authentication function as described
in RFC 2104 and FIPS PUB 198.
@node GNAT SHA384 g-sha384 ads,GNAT SHA512 g-sha512 ads,GNAT SHA256 g-sha256 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-sha384-g-sha384-ads}@anchor{39a}@anchor{gnat_rm/the_gnat_library id106}@anchor{39b}
+@anchor{gnat_rm/the_gnat_library gnat-sha384-g-sha384-ads}@anchor{39b}@anchor{gnat_rm/the_gnat_library id106}@anchor{39c}
@section @code{GNAT.SHA384} (@code{g-sha384.ads})
@@ -24462,7 +24474,7 @@ and the HMAC-SHA384 message authentication function as described
in RFC 2104 and FIPS PUB 198.
@node GNAT SHA512 g-sha512 ads,GNAT Signals g-signal ads,GNAT SHA384 g-sha384 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-sha512-g-sha512-ads}@anchor{39c}@anchor{gnat_rm/the_gnat_library id107}@anchor{39d}
+@anchor{gnat_rm/the_gnat_library gnat-sha512-g-sha512-ads}@anchor{39d}@anchor{gnat_rm/the_gnat_library id107}@anchor{39e}
@section @code{GNAT.SHA512} (@code{g-sha512.ads})
@@ -24475,7 +24487,7 @@ and the HMAC-SHA512 message authentication function as described
in RFC 2104 and FIPS PUB 198.
@node GNAT Signals g-signal ads,GNAT Sockets g-socket ads,GNAT SHA512 g-sha512 ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-signals-g-signal-ads}@anchor{39e}@anchor{gnat_rm/the_gnat_library id108}@anchor{39f}
+@anchor{gnat_rm/the_gnat_library gnat-signals-g-signal-ads}@anchor{39f}@anchor{gnat_rm/the_gnat_library id108}@anchor{3a0}
@section @code{GNAT.Signals} (@code{g-signal.ads})
@@ -24487,7 +24499,7 @@ Provides the ability to manipulate the blocked status of signals on supported
targets.
@node GNAT Sockets g-socket ads,GNAT Source_Info g-souinf ads,GNAT Signals g-signal ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id109}@anchor{3a0}@anchor{gnat_rm/the_gnat_library gnat-sockets-g-socket-ads}@anchor{3a1}
+@anchor{gnat_rm/the_gnat_library id109}@anchor{3a1}@anchor{gnat_rm/the_gnat_library gnat-sockets-g-socket-ads}@anchor{3a2}
@section @code{GNAT.Sockets} (@code{g-socket.ads})
@@ -24502,7 +24514,7 @@ on all native GNAT ports and on VxWorks cross prots. It is not implemented for
the LynxOS cross port.
@node GNAT Source_Info g-souinf ads,GNAT Spelling_Checker g-speche ads,GNAT Sockets g-socket ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-source-info-g-souinf-ads}@anchor{3a2}@anchor{gnat_rm/the_gnat_library id110}@anchor{3a3}
+@anchor{gnat_rm/the_gnat_library gnat-source-info-g-souinf-ads}@anchor{3a3}@anchor{gnat_rm/the_gnat_library id110}@anchor{3a4}
@section @code{GNAT.Source_Info} (@code{g-souinf.ads})
@@ -24516,7 +24528,7 @@ subprograms yielding the date and time of the current compilation (like the
C macros @code{__DATE__} and @code{__TIME__})
@node GNAT Spelling_Checker g-speche ads,GNAT Spelling_Checker_Generic g-spchge ads,GNAT Source_Info g-souinf ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-g-speche-ads}@anchor{3a4}@anchor{gnat_rm/the_gnat_library id111}@anchor{3a5}
+@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-g-speche-ads}@anchor{3a5}@anchor{gnat_rm/the_gnat_library id111}@anchor{3a6}
@section @code{GNAT.Spelling_Checker} (@code{g-speche.ads})
@@ -24528,7 +24540,7 @@ Provides a function for determining whether one string is a plausible
near misspelling of another string.
@node GNAT Spelling_Checker_Generic g-spchge ads,GNAT Spitbol Patterns g-spipat ads,GNAT Spelling_Checker g-speche ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id112}@anchor{3a6}@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-generic-g-spchge-ads}@anchor{3a7}
+@anchor{gnat_rm/the_gnat_library id112}@anchor{3a7}@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-generic-g-spchge-ads}@anchor{3a8}
@section @code{GNAT.Spelling_Checker_Generic} (@code{g-spchge.ads})
@@ -24541,7 +24553,7 @@ determining whether one string is a plausible near misspelling of another
string.
@node GNAT Spitbol Patterns g-spipat ads,GNAT Spitbol g-spitbo ads,GNAT Spelling_Checker_Generic g-spchge ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id113}@anchor{3a8}@anchor{gnat_rm/the_gnat_library gnat-spitbol-patterns-g-spipat-ads}@anchor{3a9}
+@anchor{gnat_rm/the_gnat_library id113}@anchor{3a9}@anchor{gnat_rm/the_gnat_library gnat-spitbol-patterns-g-spipat-ads}@anchor{3aa}
@section @code{GNAT.Spitbol.Patterns} (@code{g-spipat.ads})
@@ -24557,7 +24569,7 @@ the SNOBOL4 dynamic pattern construction and matching capabilities, using the
efficient algorithm developed by Robert Dewar for the SPITBOL system.
@node GNAT Spitbol g-spitbo ads,GNAT Spitbol Table_Boolean g-sptabo ads,GNAT Spitbol Patterns g-spipat ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-spitbol-g-spitbo-ads}@anchor{3aa}@anchor{gnat_rm/the_gnat_library id114}@anchor{3ab}
+@anchor{gnat_rm/the_gnat_library gnat-spitbol-g-spitbo-ads}@anchor{3ab}@anchor{gnat_rm/the_gnat_library id114}@anchor{3ac}
@section @code{GNAT.Spitbol} (@code{g-spitbo.ads})
@@ -24572,7 +24584,7 @@ useful for constructing arbitrary mappings from strings in the style of
the SNOBOL4 TABLE function.
@node GNAT Spitbol Table_Boolean g-sptabo ads,GNAT Spitbol Table_Integer g-sptain ads,GNAT Spitbol g-spitbo ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-boolean-g-sptabo-ads}@anchor{3ac}@anchor{gnat_rm/the_gnat_library id115}@anchor{3ad}
+@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-boolean-g-sptabo-ads}@anchor{3ad}@anchor{gnat_rm/the_gnat_library id115}@anchor{3ae}
@section @code{GNAT.Spitbol.Table_Boolean} (@code{g-sptabo.ads})
@@ -24587,7 +24599,7 @@ for type @code{Standard.Boolean}, giving an implementation of sets of
string values.
@node GNAT Spitbol Table_Integer g-sptain ads,GNAT Spitbol Table_VString g-sptavs ads,GNAT Spitbol Table_Boolean g-sptabo ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-integer-g-sptain-ads}@anchor{3ae}@anchor{gnat_rm/the_gnat_library id116}@anchor{3af}
+@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-integer-g-sptain-ads}@anchor{3af}@anchor{gnat_rm/the_gnat_library id116}@anchor{3b0}
@section @code{GNAT.Spitbol.Table_Integer} (@code{g-sptain.ads})
@@ -24604,7 +24616,7 @@ for type @code{Standard.Integer}, giving an implementation of maps
from string to integer values.
@node GNAT Spitbol Table_VString g-sptavs ads,GNAT SSE g-sse ads,GNAT Spitbol Table_Integer g-sptain ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id117}@anchor{3b0}@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-vstring-g-sptavs-ads}@anchor{3b1}
+@anchor{gnat_rm/the_gnat_library id117}@anchor{3b1}@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-vstring-g-sptavs-ads}@anchor{3b2}
@section @code{GNAT.Spitbol.Table_VString} (@code{g-sptavs.ads})
@@ -24621,7 +24633,7 @@ a variable length string type, giving an implementation of general
maps from strings to strings.
@node GNAT SSE g-sse ads,GNAT SSE Vector_Types g-ssvety ads,GNAT Spitbol Table_VString g-sptavs ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id118}@anchor{3b2}@anchor{gnat_rm/the_gnat_library gnat-sse-g-sse-ads}@anchor{3b3}
+@anchor{gnat_rm/the_gnat_library id118}@anchor{3b3}@anchor{gnat_rm/the_gnat_library gnat-sse-g-sse-ads}@anchor{3b4}
@section @code{GNAT.SSE} (@code{g-sse.ads})
@@ -24633,7 +24645,7 @@ targets. It exposes vector component types together with a general
introduction to the binding contents and use.
@node GNAT SSE Vector_Types g-ssvety ads,GNAT String_Hash g-strhas ads,GNAT SSE g-sse ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-sse-vector-types-g-ssvety-ads}@anchor{3b4}@anchor{gnat_rm/the_gnat_library id119}@anchor{3b5}
+@anchor{gnat_rm/the_gnat_library gnat-sse-vector-types-g-ssvety-ads}@anchor{3b5}@anchor{gnat_rm/the_gnat_library id119}@anchor{3b6}
@section @code{GNAT.SSE.Vector_Types} (@code{g-ssvety.ads})
@@ -24642,7 +24654,7 @@ introduction to the binding contents and use.
SSE vector types for use with SSE related intrinsics.
@node GNAT String_Hash g-strhas ads,GNAT Strings g-string ads,GNAT SSE Vector_Types g-ssvety ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-string-hash-g-strhas-ads}@anchor{3b6}@anchor{gnat_rm/the_gnat_library id120}@anchor{3b7}
+@anchor{gnat_rm/the_gnat_library gnat-string-hash-g-strhas-ads}@anchor{3b7}@anchor{gnat_rm/the_gnat_library id120}@anchor{3b8}
@section @code{GNAT.String_Hash} (@code{g-strhas.ads})
@@ -24654,7 +24666,7 @@ Provides a generic hash function working on arrays of scalars. Both the scalar
type and the hash result type are parameters.
@node GNAT Strings g-string ads,GNAT String_Split g-strspl ads,GNAT String_Hash g-strhas ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-strings-g-string-ads}@anchor{3b8}@anchor{gnat_rm/the_gnat_library id121}@anchor{3b9}
+@anchor{gnat_rm/the_gnat_library gnat-strings-g-string-ads}@anchor{3b9}@anchor{gnat_rm/the_gnat_library id121}@anchor{3ba}
@section @code{GNAT.Strings} (@code{g-string.ads})
@@ -24664,7 +24676,7 @@ Common String access types and related subprograms. Basically it
defines a string access and an array of string access types.
@node GNAT String_Split g-strspl ads,GNAT Table g-table ads,GNAT Strings g-string ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-string-split-g-strspl-ads}@anchor{3ba}@anchor{gnat_rm/the_gnat_library id122}@anchor{3bb}
+@anchor{gnat_rm/the_gnat_library gnat-string-split-g-strspl-ads}@anchor{3bb}@anchor{gnat_rm/the_gnat_library id122}@anchor{3bc}
@section @code{GNAT.String_Split} (@code{g-strspl.ads})
@@ -24678,7 +24690,7 @@ to the resulting slices. This package is instantiated from
@code{GNAT.Array_Split}.
@node GNAT Table g-table ads,GNAT Task_Lock g-tasloc ads,GNAT String_Split g-strspl ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-table-g-table-ads}@anchor{3bc}@anchor{gnat_rm/the_gnat_library id123}@anchor{3bd}
+@anchor{gnat_rm/the_gnat_library gnat-table-g-table-ads}@anchor{3bd}@anchor{gnat_rm/the_gnat_library id123}@anchor{3be}
@section @code{GNAT.Table} (@code{g-table.ads})
@@ -24698,7 +24710,7 @@ while an instantiation of @code{GNAT.Dynamic_Tables} creates a type that can be
used to define dynamic instances of the table.
@node GNAT Task_Lock g-tasloc ads,GNAT Time_Stamp g-timsta ads,GNAT Table g-table ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id124}@anchor{3be}@anchor{gnat_rm/the_gnat_library gnat-task-lock-g-tasloc-ads}@anchor{3bf}
+@anchor{gnat_rm/the_gnat_library id124}@anchor{3bf}@anchor{gnat_rm/the_gnat_library gnat-task-lock-g-tasloc-ads}@anchor{3c0}
@section @code{GNAT.Task_Lock} (@code{g-tasloc.ads})
@@ -24715,7 +24727,7 @@ single global task lock. Appropriate for use in situations where contention
between tasks is very rarely expected.
@node GNAT Time_Stamp g-timsta ads,GNAT Threads g-thread ads,GNAT Task_Lock g-tasloc ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id125}@anchor{3c0}@anchor{gnat_rm/the_gnat_library gnat-time-stamp-g-timsta-ads}@anchor{3c1}
+@anchor{gnat_rm/the_gnat_library id125}@anchor{3c1}@anchor{gnat_rm/the_gnat_library gnat-time-stamp-g-timsta-ads}@anchor{3c2}
@section @code{GNAT.Time_Stamp} (@code{g-timsta.ads})
@@ -24730,7 +24742,7 @@ represents the current date and time in ISO 8601 format. This is a very simple
routine with minimal code and there are no dependencies on any other unit.
@node GNAT Threads g-thread ads,GNAT Traceback g-traceb ads,GNAT Time_Stamp g-timsta ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-threads-g-thread-ads}@anchor{3c2}@anchor{gnat_rm/the_gnat_library id126}@anchor{3c3}
+@anchor{gnat_rm/the_gnat_library gnat-threads-g-thread-ads}@anchor{3c3}@anchor{gnat_rm/the_gnat_library id126}@anchor{3c4}
@section @code{GNAT.Threads} (@code{g-thread.ads})
@@ -24747,7 +24759,7 @@ further details if your program has threads that are created by a non-Ada
environment which then accesses Ada code.
@node GNAT Traceback g-traceb ads,GNAT Traceback Symbolic g-trasym ads,GNAT Threads g-thread ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id127}@anchor{3c4}@anchor{gnat_rm/the_gnat_library gnat-traceback-g-traceb-ads}@anchor{3c5}
+@anchor{gnat_rm/the_gnat_library id127}@anchor{3c5}@anchor{gnat_rm/the_gnat_library gnat-traceback-g-traceb-ads}@anchor{3c6}
@section @code{GNAT.Traceback} (@code{g-traceb.ads})
@@ -24759,7 +24771,7 @@ Provides a facility for obtaining non-symbolic traceback information, useful
in various debugging situations.
@node GNAT Traceback Symbolic g-trasym ads,GNAT UTF_32 g-table ads,GNAT Traceback g-traceb ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-traceback-symbolic-g-trasym-ads}@anchor{3c6}@anchor{gnat_rm/the_gnat_library id128}@anchor{3c7}
+@anchor{gnat_rm/the_gnat_library gnat-traceback-symbolic-g-trasym-ads}@anchor{3c7}@anchor{gnat_rm/the_gnat_library id128}@anchor{3c8}
@section @code{GNAT.Traceback.Symbolic} (@code{g-trasym.ads})
@@ -24768,7 +24780,7 @@ in various debugging situations.
@geindex Trace back facilities
@node GNAT UTF_32 g-table ads,GNAT Wide_Spelling_Checker g-u3spch ads,GNAT Traceback Symbolic g-trasym ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id129}@anchor{3c8}@anchor{gnat_rm/the_gnat_library gnat-utf-32-g-table-ads}@anchor{3c9}
+@anchor{gnat_rm/the_gnat_library id129}@anchor{3c9}@anchor{gnat_rm/the_gnat_library gnat-utf-32-g-table-ads}@anchor{3ca}
@section @code{GNAT.UTF_32} (@code{g-table.ads})
@@ -24787,7 +24799,7 @@ lower case to upper case fold routine corresponding to
the Ada 2005 rules for identifier equivalence.
@node GNAT Wide_Spelling_Checker g-u3spch ads,GNAT Wide_Spelling_Checker g-wispch ads,GNAT UTF_32 g-table ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-wide-spelling-checker-g-u3spch-ads}@anchor{3ca}@anchor{gnat_rm/the_gnat_library id130}@anchor{3cb}
+@anchor{gnat_rm/the_gnat_library gnat-wide-spelling-checker-g-u3spch-ads}@anchor{3cb}@anchor{gnat_rm/the_gnat_library id130}@anchor{3cc}
@section @code{GNAT.Wide_Spelling_Checker} (@code{g-u3spch.ads})
@@ -24800,7 +24812,7 @@ near misspelling of another wide wide string, where the strings are represented
using the UTF_32_String type defined in System.Wch_Cnv.
@node GNAT Wide_Spelling_Checker g-wispch ads,GNAT Wide_String_Split g-wistsp ads,GNAT Wide_Spelling_Checker g-u3spch ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-wide-spelling-checker-g-wispch-ads}@anchor{3cc}@anchor{gnat_rm/the_gnat_library id131}@anchor{3cd}
+@anchor{gnat_rm/the_gnat_library gnat-wide-spelling-checker-g-wispch-ads}@anchor{3cd}@anchor{gnat_rm/the_gnat_library id131}@anchor{3ce}
@section @code{GNAT.Wide_Spelling_Checker} (@code{g-wispch.ads})
@@ -24812,7 +24824,7 @@ Provides a function for determining whether one wide string is a plausible
near misspelling of another wide string.
@node GNAT Wide_String_Split g-wistsp ads,GNAT Wide_Wide_Spelling_Checker g-zspche ads,GNAT Wide_Spelling_Checker g-wispch ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id132}@anchor{3ce}@anchor{gnat_rm/the_gnat_library gnat-wide-string-split-g-wistsp-ads}@anchor{3cf}
+@anchor{gnat_rm/the_gnat_library id132}@anchor{3cf}@anchor{gnat_rm/the_gnat_library gnat-wide-string-split-g-wistsp-ads}@anchor{3d0}
@section @code{GNAT.Wide_String_Split} (@code{g-wistsp.ads})
@@ -24826,7 +24838,7 @@ to the resulting slices. This package is instantiated from
@code{GNAT.Array_Split}.
@node GNAT Wide_Wide_Spelling_Checker g-zspche ads,GNAT Wide_Wide_String_Split g-zistsp ads,GNAT Wide_String_Split g-wistsp ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-wide-wide-spelling-checker-g-zspche-ads}@anchor{3d0}@anchor{gnat_rm/the_gnat_library id133}@anchor{3d1}
+@anchor{gnat_rm/the_gnat_library gnat-wide-wide-spelling-checker-g-zspche-ads}@anchor{3d1}@anchor{gnat_rm/the_gnat_library id133}@anchor{3d2}
@section @code{GNAT.Wide_Wide_Spelling_Checker} (@code{g-zspche.ads})
@@ -24838,7 +24850,7 @@ Provides a function for determining whether one wide wide string is a plausible
near misspelling of another wide wide string.
@node GNAT Wide_Wide_String_Split g-zistsp ads,Interfaces C Extensions i-cexten ads,GNAT Wide_Wide_Spelling_Checker g-zspche ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library gnat-wide-wide-string-split-g-zistsp-ads}@anchor{3d2}@anchor{gnat_rm/the_gnat_library id134}@anchor{3d3}
+@anchor{gnat_rm/the_gnat_library gnat-wide-wide-string-split-g-zistsp-ads}@anchor{3d3}@anchor{gnat_rm/the_gnat_library id134}@anchor{3d4}
@section @code{GNAT.Wide_Wide_String_Split} (@code{g-zistsp.ads})
@@ -24852,7 +24864,7 @@ to the resulting slices. This package is instantiated from
@code{GNAT.Array_Split}.
@node Interfaces C Extensions i-cexten ads,Interfaces C Streams i-cstrea ads,GNAT Wide_Wide_String_Split g-zistsp ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library interfaces-c-extensions-i-cexten-ads}@anchor{3d4}@anchor{gnat_rm/the_gnat_library id135}@anchor{3d5}
+@anchor{gnat_rm/the_gnat_library interfaces-c-extensions-i-cexten-ads}@anchor{3d5}@anchor{gnat_rm/the_gnat_library id135}@anchor{3d6}
@section @code{Interfaces.C.Extensions} (@code{i-cexten.ads})
@@ -24863,7 +24875,7 @@ for use with either manually or automatically generated bindings
to C libraries.
@node Interfaces C Streams i-cstrea ads,Interfaces Packed_Decimal i-pacdec ads,Interfaces C Extensions i-cexten ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library interfaces-c-streams-i-cstrea-ads}@anchor{3d6}@anchor{gnat_rm/the_gnat_library id136}@anchor{3d7}
+@anchor{gnat_rm/the_gnat_library interfaces-c-streams-i-cstrea-ads}@anchor{3d7}@anchor{gnat_rm/the_gnat_library id136}@anchor{3d8}
@section @code{Interfaces.C.Streams} (@code{i-cstrea.ads})
@@ -24876,7 +24888,7 @@ This package is a binding for the most commonly used operations
on C streams.
@node Interfaces Packed_Decimal i-pacdec ads,Interfaces VxWorks i-vxwork ads,Interfaces C Streams i-cstrea ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library interfaces-packed-decimal-i-pacdec-ads}@anchor{3d8}@anchor{gnat_rm/the_gnat_library id137}@anchor{3d9}
+@anchor{gnat_rm/the_gnat_library interfaces-packed-decimal-i-pacdec-ads}@anchor{3d9}@anchor{gnat_rm/the_gnat_library id137}@anchor{3da}
@section @code{Interfaces.Packed_Decimal} (@code{i-pacdec.ads})
@@ -24891,7 +24903,7 @@ from a packed decimal format compatible with that used on IBM
mainframes.
@node Interfaces VxWorks i-vxwork ads,Interfaces VxWorks Int_Connection i-vxinco ads,Interfaces Packed_Decimal i-pacdec ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id138}@anchor{3da}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-i-vxwork-ads}@anchor{3db}
+@anchor{gnat_rm/the_gnat_library id138}@anchor{3db}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-i-vxwork-ads}@anchor{3dc}
@section @code{Interfaces.VxWorks} (@code{i-vxwork.ads})
@@ -24907,7 +24919,7 @@ In particular, it interfaces with the
VxWorks hardware interrupt facilities.
@node Interfaces VxWorks Int_Connection i-vxinco ads,Interfaces VxWorks IO i-vxwoio ads,Interfaces VxWorks i-vxwork ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library interfaces-vxworks-int-connection-i-vxinco-ads}@anchor{3dc}@anchor{gnat_rm/the_gnat_library id139}@anchor{3dd}
+@anchor{gnat_rm/the_gnat_library interfaces-vxworks-int-connection-i-vxinco-ads}@anchor{3dd}@anchor{gnat_rm/the_gnat_library id139}@anchor{3de}
@section @code{Interfaces.VxWorks.Int_Connection} (@code{i-vxinco.ads})
@@ -24923,7 +24935,7 @@ intConnect() with a custom routine for installing interrupt
handlers.
@node Interfaces VxWorks IO i-vxwoio ads,System Address_Image s-addima ads,Interfaces VxWorks Int_Connection i-vxinco ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library interfaces-vxworks-io-i-vxwoio-ads}@anchor{3de}@anchor{gnat_rm/the_gnat_library id140}@anchor{3df}
+@anchor{gnat_rm/the_gnat_library interfaces-vxworks-io-i-vxwoio-ads}@anchor{3df}@anchor{gnat_rm/the_gnat_library id140}@anchor{3e0}
@section @code{Interfaces.VxWorks.IO} (@code{i-vxwoio.ads})
@@ -24946,7 +24958,7 @@ function codes. A particular use of this package is
to enable the use of Get_Immediate under VxWorks.
@node System Address_Image s-addima ads,System Assertions s-assert ads,Interfaces VxWorks IO i-vxwoio ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id141}@anchor{3e0}@anchor{gnat_rm/the_gnat_library system-address-image-s-addima-ads}@anchor{3e1}
+@anchor{gnat_rm/the_gnat_library id141}@anchor{3e1}@anchor{gnat_rm/the_gnat_library system-address-image-s-addima-ads}@anchor{3e2}
@section @code{System.Address_Image} (@code{s-addima.ads})
@@ -24962,7 +24974,7 @@ function that gives an (implementation dependent)
string which identifies an address.
@node System Assertions s-assert ads,System Atomic_Counters s-atocou ads,System Address_Image s-addima ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-assertions-s-assert-ads}@anchor{3e2}@anchor{gnat_rm/the_gnat_library id142}@anchor{3e3}
+@anchor{gnat_rm/the_gnat_library system-assertions-s-assert-ads}@anchor{3e3}@anchor{gnat_rm/the_gnat_library id142}@anchor{3e4}
@section @code{System.Assertions} (@code{s-assert.ads})
@@ -24978,7 +24990,7 @@ by an run-time assertion failure, as well as the routine that
is used internally to raise this assertion.
@node System Atomic_Counters s-atocou ads,System Memory s-memory ads,System Assertions s-assert ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id143}@anchor{3e4}@anchor{gnat_rm/the_gnat_library system-atomic-counters-s-atocou-ads}@anchor{3e5}
+@anchor{gnat_rm/the_gnat_library id143}@anchor{3e5}@anchor{gnat_rm/the_gnat_library system-atomic-counters-s-atocou-ads}@anchor{3e6}
@section @code{System.Atomic_Counters} (@code{s-atocou.ads})
@@ -24992,7 +25004,7 @@ on most targets, including all Alpha, ia64, PowerPC, SPARC V9,
x86, and x86_64 platforms.
@node System Memory s-memory ads,System Multiprocessors s-multip ads,System Atomic_Counters s-atocou ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-memory-s-memory-ads}@anchor{3e6}@anchor{gnat_rm/the_gnat_library id144}@anchor{3e7}
+@anchor{gnat_rm/the_gnat_library system-memory-s-memory-ads}@anchor{3e7}@anchor{gnat_rm/the_gnat_library id144}@anchor{3e8}
@section @code{System.Memory} (@code{s-memory.ads})
@@ -25010,7 +25022,7 @@ calls to this unit may be made for low level allocation uses (for
example see the body of @code{GNAT.Tables}).
@node System Multiprocessors s-multip ads,System Multiprocessors Dispatching_Domains s-mudido ads,System Memory s-memory ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id145}@anchor{3e8}@anchor{gnat_rm/the_gnat_library system-multiprocessors-s-multip-ads}@anchor{3e9}
+@anchor{gnat_rm/the_gnat_library id145}@anchor{3e9}@anchor{gnat_rm/the_gnat_library system-multiprocessors-s-multip-ads}@anchor{3ea}
@section @code{System.Multiprocessors} (@code{s-multip.ads})
@@ -25023,7 +25035,7 @@ in GNAT we also make it available in Ada 95 and Ada 2005 (where it is
technically an implementation-defined addition).
@node System Multiprocessors Dispatching_Domains s-mudido ads,System Partition_Interface s-parint ads,System Multiprocessors s-multip ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-multiprocessors-dispatching-domains-s-mudido-ads}@anchor{3ea}@anchor{gnat_rm/the_gnat_library id146}@anchor{3eb}
+@anchor{gnat_rm/the_gnat_library system-multiprocessors-dispatching-domains-s-mudido-ads}@anchor{3eb}@anchor{gnat_rm/the_gnat_library id146}@anchor{3ec}
@section @code{System.Multiprocessors.Dispatching_Domains} (@code{s-mudido.ads})
@@ -25036,7 +25048,7 @@ in GNAT we also make it available in Ada 95 and Ada 2005 (where it is
technically an implementation-defined addition).
@node System Partition_Interface s-parint ads,System Pool_Global s-pooglo ads,System Multiprocessors Dispatching_Domains s-mudido ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id147}@anchor{3ec}@anchor{gnat_rm/the_gnat_library system-partition-interface-s-parint-ads}@anchor{3ed}
+@anchor{gnat_rm/the_gnat_library id147}@anchor{3ed}@anchor{gnat_rm/the_gnat_library system-partition-interface-s-parint-ads}@anchor{3ee}
@section @code{System.Partition_Interface} (@code{s-parint.ads})
@@ -25049,7 +25061,7 @@ is used primarily in a distribution context when using Annex E
with @code{GLADE}.
@node System Pool_Global s-pooglo ads,System Pool_Local s-pooloc ads,System Partition_Interface s-parint ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id148}@anchor{3ee}@anchor{gnat_rm/the_gnat_library system-pool-global-s-pooglo-ads}@anchor{3ef}
+@anchor{gnat_rm/the_gnat_library id148}@anchor{3ef}@anchor{gnat_rm/the_gnat_library system-pool-global-s-pooglo-ads}@anchor{3f0}
@section @code{System.Pool_Global} (@code{s-pooglo.ads})
@@ -25066,7 +25078,7 @@ declared. It uses malloc/free to allocate/free and does not attempt to
do any automatic reclamation.
@node System Pool_Local s-pooloc ads,System Restrictions s-restri ads,System Pool_Global s-pooglo ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-pool-local-s-pooloc-ads}@anchor{3f0}@anchor{gnat_rm/the_gnat_library id149}@anchor{3f1}
+@anchor{gnat_rm/the_gnat_library system-pool-local-s-pooloc-ads}@anchor{3f1}@anchor{gnat_rm/the_gnat_library id149}@anchor{3f2}
@section @code{System.Pool_Local} (@code{s-pooloc.ads})
@@ -25083,7 +25095,7 @@ a list of allocated blocks, so that all storage allocated for the pool can
be freed automatically when the pool is finalized.
@node System Restrictions s-restri ads,System Rident s-rident ads,System Pool_Local s-pooloc ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id150}@anchor{3f2}@anchor{gnat_rm/the_gnat_library system-restrictions-s-restri-ads}@anchor{3f3}
+@anchor{gnat_rm/the_gnat_library id150}@anchor{3f3}@anchor{gnat_rm/the_gnat_library system-restrictions-s-restri-ads}@anchor{3f4}
@section @code{System.Restrictions} (@code{s-restri.ads})
@@ -25099,7 +25111,7 @@ compiler determined information on which restrictions
are violated by one or more packages in the partition.
@node System Rident s-rident ads,System Strings Stream_Ops s-ststop ads,System Restrictions s-restri ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-rident-s-rident-ads}@anchor{3f4}@anchor{gnat_rm/the_gnat_library id151}@anchor{3f5}
+@anchor{gnat_rm/the_gnat_library system-rident-s-rident-ads}@anchor{3f5}@anchor{gnat_rm/the_gnat_library id151}@anchor{3f6}
@section @code{System.Rident} (@code{s-rident.ads})
@@ -25115,7 +25127,7 @@ since the necessary instantiation is included in
package System.Restrictions.
@node System Strings Stream_Ops s-ststop ads,System Unsigned_Types s-unstyp ads,System Rident s-rident ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library id152}@anchor{3f6}@anchor{gnat_rm/the_gnat_library system-strings-stream-ops-s-ststop-ads}@anchor{3f7}
+@anchor{gnat_rm/the_gnat_library id152}@anchor{3f7}@anchor{gnat_rm/the_gnat_library system-strings-stream-ops-s-ststop-ads}@anchor{3f8}
@section @code{System.Strings.Stream_Ops} (@code{s-ststop.ads})
@@ -25131,7 +25143,7 @@ stream attributes are applied to string types, but the subprograms in this
package can be used directly by application programs.
@node System Unsigned_Types s-unstyp ads,System Wch_Cnv s-wchcnv ads,System Strings Stream_Ops s-ststop ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-unsigned-types-s-unstyp-ads}@anchor{3f8}@anchor{gnat_rm/the_gnat_library id153}@anchor{3f9}
+@anchor{gnat_rm/the_gnat_library system-unsigned-types-s-unstyp-ads}@anchor{3f9}@anchor{gnat_rm/the_gnat_library id153}@anchor{3fa}
@section @code{System.Unsigned_Types} (@code{s-unstyp.ads})
@@ -25144,7 +25156,7 @@ also contains some related definitions for other specialized types
used by the compiler in connection with packed array types.
@node System Wch_Cnv s-wchcnv ads,System Wch_Con s-wchcon ads,System Unsigned_Types s-unstyp ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-wch-cnv-s-wchcnv-ads}@anchor{3fa}@anchor{gnat_rm/the_gnat_library id154}@anchor{3fb}
+@anchor{gnat_rm/the_gnat_library system-wch-cnv-s-wchcnv-ads}@anchor{3fb}@anchor{gnat_rm/the_gnat_library id154}@anchor{3fc}
@section @code{System.Wch_Cnv} (@code{s-wchcnv.ads})
@@ -25165,7 +25177,7 @@ encoding method. It uses definitions in
package @code{System.Wch_Con}.
@node System Wch_Con s-wchcon ads,,System Wch_Cnv s-wchcnv ads,The GNAT Library
-@anchor{gnat_rm/the_gnat_library system-wch-con-s-wchcon-ads}@anchor{3fc}@anchor{gnat_rm/the_gnat_library id155}@anchor{3fd}
+@anchor{gnat_rm/the_gnat_library system-wch-con-s-wchcon-ads}@anchor{3fd}@anchor{gnat_rm/the_gnat_library id155}@anchor{3fe}
@section @code{System.Wch_Con} (@code{s-wchcon.ads})
@@ -25177,7 +25189,7 @@ in ordinary strings. These definitions are used by
the package @code{System.Wch_Cnv}.
@node Interfacing to Other Languages,Specialized Needs Annexes,The GNAT Library,Top
-@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-other-languages}@anchor{11}@anchor{gnat_rm/interfacing_to_other_languages doc}@anchor{3fe}@anchor{gnat_rm/interfacing_to_other_languages id1}@anchor{3ff}
+@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-other-languages}@anchor{11}@anchor{gnat_rm/interfacing_to_other_languages doc}@anchor{3ff}@anchor{gnat_rm/interfacing_to_other_languages id1}@anchor{400}
@chapter Interfacing to Other Languages
@@ -25195,7 +25207,7 @@ provided.
@end menu
@node Interfacing to C,Interfacing to C++,,Interfacing to Other Languages
-@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-c}@anchor{400}@anchor{gnat_rm/interfacing_to_other_languages id2}@anchor{401}
+@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-c}@anchor{401}@anchor{gnat_rm/interfacing_to_other_languages id2}@anchor{402}
@section Interfacing to C
@@ -25333,7 +25345,7 @@ of the length corresponding to the @code{type'Size} value in Ada.
@end itemize
@node Interfacing to C++,Interfacing to COBOL,Interfacing to C,Interfacing to Other Languages
-@anchor{gnat_rm/interfacing_to_other_languages id4}@anchor{402}@anchor{gnat_rm/interfacing_to_other_languages id3}@anchor{45}
+@anchor{gnat_rm/interfacing_to_other_languages id4}@anchor{403}@anchor{gnat_rm/interfacing_to_other_languages id3}@anchor{45}
@section Interfacing to C++
@@ -25390,7 +25402,7 @@ The @code{External_Name} is the name of the C++ RTTI symbol. You can then
cover a specific C++ exception in an exception handler.
@node Interfacing to COBOL,Interfacing to Fortran,Interfacing to C++,Interfacing to Other Languages
-@anchor{gnat_rm/interfacing_to_other_languages id5}@anchor{403}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-cobol}@anchor{404}
+@anchor{gnat_rm/interfacing_to_other_languages id5}@anchor{404}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-cobol}@anchor{405}
@section Interfacing to COBOL
@@ -25398,7 +25410,7 @@ Interfacing to COBOL is achieved as described in section B.4 of
the Ada Reference Manual.
@node Interfacing to Fortran,Interfacing to non-GNAT Ada code,Interfacing to COBOL,Interfacing to Other Languages
-@anchor{gnat_rm/interfacing_to_other_languages id6}@anchor{405}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-fortran}@anchor{406}
+@anchor{gnat_rm/interfacing_to_other_languages id6}@anchor{406}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-fortran}@anchor{407}
@section Interfacing to Fortran
@@ -25408,7 +25420,7 @@ multi-dimensional array causes the array to be stored in column-major
order as required for convenient interface to Fortran.
@node Interfacing to non-GNAT Ada code,,Interfacing to Fortran,Interfacing to Other Languages
-@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-non-gnat-ada-code}@anchor{407}@anchor{gnat_rm/interfacing_to_other_languages id7}@anchor{408}
+@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-non-gnat-ada-code}@anchor{408}@anchor{gnat_rm/interfacing_to_other_languages id7}@anchor{409}
@section Interfacing to non-GNAT Ada code
@@ -25432,7 +25444,7 @@ values or simple record types without variants, or simple array
types with fixed bounds.
@node Specialized Needs Annexes,Implementation of Specific Ada Features,Interfacing to Other Languages,Top
-@anchor{gnat_rm/specialized_needs_annexes specialized-needs-annexes}@anchor{12}@anchor{gnat_rm/specialized_needs_annexes doc}@anchor{409}@anchor{gnat_rm/specialized_needs_annexes id1}@anchor{40a}
+@anchor{gnat_rm/specialized_needs_annexes specialized-needs-annexes}@anchor{12}@anchor{gnat_rm/specialized_needs_annexes doc}@anchor{40a}@anchor{gnat_rm/specialized_needs_annexes id1}@anchor{40b}
@chapter Specialized Needs Annexes
@@ -25473,7 +25485,7 @@ in Ada 2005) is fully implemented.
@end table
@node Implementation of Specific Ada Features,Implementation of Ada 2012 Features,Specialized Needs Annexes,Top
-@anchor{gnat_rm/implementation_of_specific_ada_features implementation-of-specific-ada-features}@anchor{13}@anchor{gnat_rm/implementation_of_specific_ada_features doc}@anchor{40b}@anchor{gnat_rm/implementation_of_specific_ada_features id1}@anchor{40c}
+@anchor{gnat_rm/implementation_of_specific_ada_features implementation-of-specific-ada-features}@anchor{13}@anchor{gnat_rm/implementation_of_specific_ada_features doc}@anchor{40c}@anchor{gnat_rm/implementation_of_specific_ada_features id1}@anchor{40d}
@chapter Implementation of Specific Ada Features
@@ -25491,7 +25503,7 @@ facilities.
@end menu
@node Machine Code Insertions,GNAT Implementation of Tasking,,Implementation of Specific Ada Features
-@anchor{gnat_rm/implementation_of_specific_ada_features machine-code-insertions}@anchor{164}@anchor{gnat_rm/implementation_of_specific_ada_features id2}@anchor{40d}
+@anchor{gnat_rm/implementation_of_specific_ada_features machine-code-insertions}@anchor{164}@anchor{gnat_rm/implementation_of_specific_ada_features id2}@anchor{40e}
@section Machine Code Insertions
@@ -25659,7 +25671,7 @@ according to normal visibility rules. In particular if there is no
qualification is required.
@node GNAT Implementation of Tasking,GNAT Implementation of Shared Passive Packages,Machine Code Insertions,Implementation of Specific Ada Features
-@anchor{gnat_rm/implementation_of_specific_ada_features id3}@anchor{40e}@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-tasking}@anchor{40f}
+@anchor{gnat_rm/implementation_of_specific_ada_features id3}@anchor{40f}@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-tasking}@anchor{410}
@section GNAT Implementation of Tasking
@@ -25675,7 +25687,7 @@ to compliance with the Real-Time Systems Annex.
@end menu
@node Mapping Ada Tasks onto the Underlying Kernel Threads,Ensuring Compliance with the Real-Time Annex,,GNAT Implementation of Tasking
-@anchor{gnat_rm/implementation_of_specific_ada_features mapping-ada-tasks-onto-the-underlying-kernel-threads}@anchor{410}@anchor{gnat_rm/implementation_of_specific_ada_features id4}@anchor{411}
+@anchor{gnat_rm/implementation_of_specific_ada_features mapping-ada-tasks-onto-the-underlying-kernel-threads}@anchor{411}@anchor{gnat_rm/implementation_of_specific_ada_features id4}@anchor{412}
@subsection Mapping Ada Tasks onto the Underlying Kernel Threads
@@ -25744,7 +25756,7 @@ support this functionality when the parent contains more than one task.
@geindex Forking a new process
@node Ensuring Compliance with the Real-Time Annex,Support for Locking Policies,Mapping Ada Tasks onto the Underlying Kernel Threads,GNAT Implementation of Tasking
-@anchor{gnat_rm/implementation_of_specific_ada_features id5}@anchor{412}@anchor{gnat_rm/implementation_of_specific_ada_features ensuring-compliance-with-the-real-time-annex}@anchor{413}
+@anchor{gnat_rm/implementation_of_specific_ada_features id5}@anchor{413}@anchor{gnat_rm/implementation_of_specific_ada_features ensuring-compliance-with-the-real-time-annex}@anchor{414}
@subsection Ensuring Compliance with the Real-Time Annex
@@ -25795,7 +25807,7 @@ placed at the end.
@c Support_for_Locking_Policies
@node Support for Locking Policies,,Ensuring Compliance with the Real-Time Annex,GNAT Implementation of Tasking
-@anchor{gnat_rm/implementation_of_specific_ada_features support-for-locking-policies}@anchor{414}
+@anchor{gnat_rm/implementation_of_specific_ada_features support-for-locking-policies}@anchor{415}
@subsection Support for Locking Policies
@@ -25829,7 +25841,7 @@ then ceiling locking is used.
Otherwise, the @code{Ceiling_Locking} policy is ignored.
@node GNAT Implementation of Shared Passive Packages,Code Generation for Array Aggregates,GNAT Implementation of Tasking,Implementation of Specific Ada Features
-@anchor{gnat_rm/implementation_of_specific_ada_features id6}@anchor{415}@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-shared-passive-packages}@anchor{416}
+@anchor{gnat_rm/implementation_of_specific_ada_features id6}@anchor{416}@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-shared-passive-packages}@anchor{417}
@section GNAT Implementation of Shared Passive Packages
@@ -25930,7 +25942,7 @@ GNAT supports shared passive packages on all platforms
except for OpenVMS.
@node Code Generation for Array Aggregates,The Size of Discriminated Records with Default Discriminants,GNAT Implementation of Shared Passive Packages,Implementation of Specific Ada Features
-@anchor{gnat_rm/implementation_of_specific_ada_features code-generation-for-array-aggregates}@anchor{417}@anchor{gnat_rm/implementation_of_specific_ada_features id7}@anchor{418}
+@anchor{gnat_rm/implementation_of_specific_ada_features code-generation-for-array-aggregates}@anchor{418}@anchor{gnat_rm/implementation_of_specific_ada_features id7}@anchor{419}
@section Code Generation for Array Aggregates
@@ -25961,7 +25973,7 @@ component values and static subtypes also lead to simpler code.
@end menu
@node Static constant aggregates with static bounds,Constant aggregates with unconstrained nominal types,,Code Generation for Array Aggregates
-@anchor{gnat_rm/implementation_of_specific_ada_features static-constant-aggregates-with-static-bounds}@anchor{419}@anchor{gnat_rm/implementation_of_specific_ada_features id8}@anchor{41a}
+@anchor{gnat_rm/implementation_of_specific_ada_features static-constant-aggregates-with-static-bounds}@anchor{41a}@anchor{gnat_rm/implementation_of_specific_ada_features id8}@anchor{41b}
@subsection Static constant aggregates with static bounds
@@ -26008,7 +26020,7 @@ Zero2: constant two_dim := (others => (others => 0));
@end example
@node Constant aggregates with unconstrained nominal types,Aggregates with static bounds,Static constant aggregates with static bounds,Code Generation for Array Aggregates
-@anchor{gnat_rm/implementation_of_specific_ada_features constant-aggregates-with-unconstrained-nominal-types}@anchor{41b}@anchor{gnat_rm/implementation_of_specific_ada_features id9}@anchor{41c}
+@anchor{gnat_rm/implementation_of_specific_ada_features constant-aggregates-with-unconstrained-nominal-types}@anchor{41c}@anchor{gnat_rm/implementation_of_specific_ada_features id9}@anchor{41d}
@subsection Constant aggregates with unconstrained nominal types
@@ -26023,7 +26035,7 @@ Cr_Unc : constant One_Unc := (12,24,36);
@end example
@node Aggregates with static bounds,Aggregates with nonstatic bounds,Constant aggregates with unconstrained nominal types,Code Generation for Array Aggregates
-@anchor{gnat_rm/implementation_of_specific_ada_features id10}@anchor{41d}@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-static-bounds}@anchor{41e}
+@anchor{gnat_rm/implementation_of_specific_ada_features id10}@anchor{41e}@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-static-bounds}@anchor{41f}
@subsection Aggregates with static bounds
@@ -26051,7 +26063,7 @@ end loop;
@end example
@node Aggregates with nonstatic bounds,Aggregates in assignment statements,Aggregates with static bounds,Code Generation for Array Aggregates
-@anchor{gnat_rm/implementation_of_specific_ada_features id11}@anchor{41f}@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-nonstatic-bounds}@anchor{420}
+@anchor{gnat_rm/implementation_of_specific_ada_features id11}@anchor{420}@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-nonstatic-bounds}@anchor{421}
@subsection Aggregates with nonstatic bounds
@@ -26062,7 +26074,7 @@ have to be applied to sub-arrays individually, if they do not have statically
compatible subtypes.
@node Aggregates in assignment statements,,Aggregates with nonstatic bounds,Code Generation for Array Aggregates
-@anchor{gnat_rm/implementation_of_specific_ada_features id12}@anchor{421}@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-in-assignment-statements}@anchor{422}
+@anchor{gnat_rm/implementation_of_specific_ada_features id12}@anchor{422}@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-in-assignment-statements}@anchor{423}
@subsection Aggregates in assignment statements
@@ -26104,7 +26116,7 @@ a temporary (created either by the front-end or the code generator) and then
that temporary will be copied onto the target.
@node The Size of Discriminated Records with Default Discriminants,Strict Conformance to the Ada Reference Manual,Code Generation for Array Aggregates,Implementation of Specific Ada Features
-@anchor{gnat_rm/implementation_of_specific_ada_features id13}@anchor{423}@anchor{gnat_rm/implementation_of_specific_ada_features the-size-of-discriminated-records-with-default-discriminants}@anchor{424}
+@anchor{gnat_rm/implementation_of_specific_ada_features id13}@anchor{424}@anchor{gnat_rm/implementation_of_specific_ada_features the-size-of-discriminated-records-with-default-discriminants}@anchor{425}
@section The Size of Discriminated Records with Default Discriminants
@@ -26184,7 +26196,7 @@ say) must be consistent, so it is imperative that the object, once created,
remain invariant.
@node Strict Conformance to the Ada Reference Manual,,The Size of Discriminated Records with Default Discriminants,Implementation of Specific Ada Features
-@anchor{gnat_rm/implementation_of_specific_ada_features strict-conformance-to-the-ada-reference-manual}@anchor{425}@anchor{gnat_rm/implementation_of_specific_ada_features id14}@anchor{426}
+@anchor{gnat_rm/implementation_of_specific_ada_features strict-conformance-to-the-ada-reference-manual}@anchor{426}@anchor{gnat_rm/implementation_of_specific_ada_features id14}@anchor{427}
@section Strict Conformance to the Ada Reference Manual
@@ -26211,7 +26223,7 @@ behavior (although at the cost of a significant performance penalty), so
infinite and NaN values are properly generated.
@node Implementation of Ada 2012 Features,Obsolescent Features,Implementation of Specific Ada Features,Top
-@anchor{gnat_rm/implementation_of_ada_2012_features doc}@anchor{427}@anchor{gnat_rm/implementation_of_ada_2012_features implementation-of-ada-2012-features}@anchor{14}@anchor{gnat_rm/implementation_of_ada_2012_features id1}@anchor{428}
+@anchor{gnat_rm/implementation_of_ada_2012_features doc}@anchor{428}@anchor{gnat_rm/implementation_of_ada_2012_features implementation-of-ada-2012-features}@anchor{14}@anchor{gnat_rm/implementation_of_ada_2012_features id1}@anchor{429}
@chapter Implementation of Ada 2012 Features
@@ -28377,7 +28389,7 @@ RM References: H.04 (8/1)
@end itemize
@node Obsolescent Features,Compatibility and Porting Guide,Implementation of Ada 2012 Features,Top
-@anchor{gnat_rm/obsolescent_features id1}@anchor{429}@anchor{gnat_rm/obsolescent_features doc}@anchor{42a}@anchor{gnat_rm/obsolescent_features obsolescent-features}@anchor{15}
+@anchor{gnat_rm/obsolescent_features id1}@anchor{42a}@anchor{gnat_rm/obsolescent_features doc}@anchor{42b}@anchor{gnat_rm/obsolescent_features obsolescent-features}@anchor{15}
@chapter Obsolescent Features
@@ -28396,7 +28408,7 @@ compatibility purposes.
@end menu
@node pragma No_Run_Time,pragma Ravenscar,,Obsolescent Features
-@anchor{gnat_rm/obsolescent_features id2}@anchor{42b}@anchor{gnat_rm/obsolescent_features pragma-no-run-time}@anchor{42c}
+@anchor{gnat_rm/obsolescent_features id2}@anchor{42c}@anchor{gnat_rm/obsolescent_features pragma-no-run-time}@anchor{42d}
@section pragma No_Run_Time
@@ -28409,7 +28421,7 @@ preferred usage is to use an appropriately configured run-time that
includes just those features that are to be made accessible.
@node pragma Ravenscar,pragma Restricted_Run_Time,pragma No_Run_Time,Obsolescent Features
-@anchor{gnat_rm/obsolescent_features id3}@anchor{42d}@anchor{gnat_rm/obsolescent_features pragma-ravenscar}@anchor{42e}
+@anchor{gnat_rm/obsolescent_features id3}@anchor{42e}@anchor{gnat_rm/obsolescent_features pragma-ravenscar}@anchor{42f}
@section pragma Ravenscar
@@ -28418,7 +28430,7 @@ The pragma @code{Ravenscar} has exactly the same effect as pragma
is part of the new Ada 2005 standard.
@node pragma Restricted_Run_Time,pragma Task_Info,pragma Ravenscar,Obsolescent Features
-@anchor{gnat_rm/obsolescent_features pragma-restricted-run-time}@anchor{42f}@anchor{gnat_rm/obsolescent_features id4}@anchor{430}
+@anchor{gnat_rm/obsolescent_features pragma-restricted-run-time}@anchor{430}@anchor{gnat_rm/obsolescent_features id4}@anchor{431}
@section pragma Restricted_Run_Time
@@ -28428,7 +28440,7 @@ preferred since the Ada 2005 pragma @code{Profile} is intended for
this kind of implementation dependent addition.
@node pragma Task_Info,package System Task_Info s-tasinf ads,pragma Restricted_Run_Time,Obsolescent Features
-@anchor{gnat_rm/obsolescent_features pragma-task-info}@anchor{431}@anchor{gnat_rm/obsolescent_features id5}@anchor{432}
+@anchor{gnat_rm/obsolescent_features pragma-task-info}@anchor{432}@anchor{gnat_rm/obsolescent_features id5}@anchor{433}
@section pragma Task_Info
@@ -28454,7 +28466,7 @@ in the spec of package System.Task_Info in the runtime
library.
@node package System Task_Info s-tasinf ads,,pragma Task_Info,Obsolescent Features
-@anchor{gnat_rm/obsolescent_features package-system-task-info}@anchor{433}@anchor{gnat_rm/obsolescent_features package-system-task-info-s-tasinf-ads}@anchor{434}
+@anchor{gnat_rm/obsolescent_features package-system-task-info}@anchor{434}@anchor{gnat_rm/obsolescent_features package-system-task-info-s-tasinf-ads}@anchor{435}
@section package System.Task_Info (@code{s-tasinf.ads})
@@ -28464,7 +28476,7 @@ to support the @code{Task_Info} pragma. The predefined Ada package
standard replacement for GNAT's @code{Task_Info} functionality.
@node Compatibility and Porting Guide,GNU Free Documentation License,Obsolescent Features,Top
-@anchor{gnat_rm/compatibility_and_porting_guide compatibility-and-porting-guide}@anchor{16}@anchor{gnat_rm/compatibility_and_porting_guide doc}@anchor{435}@anchor{gnat_rm/compatibility_and_porting_guide id1}@anchor{436}
+@anchor{gnat_rm/compatibility_and_porting_guide compatibility-and-porting-guide}@anchor{16}@anchor{gnat_rm/compatibility_and_porting_guide doc}@anchor{436}@anchor{gnat_rm/compatibility_and_porting_guide id1}@anchor{437}
@chapter Compatibility and Porting Guide
@@ -28486,7 +28498,7 @@ applications developed in other Ada environments.
@end menu
@node Writing Portable Fixed-Point Declarations,Compatibility with Ada 83,,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide id2}@anchor{437}@anchor{gnat_rm/compatibility_and_porting_guide writing-portable-fixed-point-declarations}@anchor{438}
+@anchor{gnat_rm/compatibility_and_porting_guide id2}@anchor{438}@anchor{gnat_rm/compatibility_and_porting_guide writing-portable-fixed-point-declarations}@anchor{439}
@section Writing Portable Fixed-Point Declarations
@@ -28608,7 +28620,7 @@ If you follow this scheme you will be guaranteed that your fixed-point
types will be portable.
@node Compatibility with Ada 83,Compatibility between Ada 95 and Ada 2005,Writing Portable Fixed-Point Declarations,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-ada-83}@anchor{439}@anchor{gnat_rm/compatibility_and_porting_guide id3}@anchor{43a}
+@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-ada-83}@anchor{43a}@anchor{gnat_rm/compatibility_and_porting_guide id3}@anchor{43b}
@section Compatibility with Ada 83
@@ -28636,7 +28648,7 @@ following subsections treat the most likely issues to be encountered.
@end menu
@node Legal Ada 83 programs that are illegal in Ada 95,More deterministic semantics,,Compatibility with Ada 83
-@anchor{gnat_rm/compatibility_and_porting_guide id4}@anchor{43b}@anchor{gnat_rm/compatibility_and_porting_guide legal-ada-83-programs-that-are-illegal-in-ada-95}@anchor{43c}
+@anchor{gnat_rm/compatibility_and_porting_guide id4}@anchor{43c}@anchor{gnat_rm/compatibility_and_porting_guide legal-ada-83-programs-that-are-illegal-in-ada-95}@anchor{43d}
@subsection Legal Ada 83 programs that are illegal in Ada 95
@@ -28736,7 +28748,7 @@ the fix is usually simply to add the @code{(<>)} to the generic declaration.
@end itemize
@node More deterministic semantics,Changed semantics,Legal Ada 83 programs that are illegal in Ada 95,Compatibility with Ada 83
-@anchor{gnat_rm/compatibility_and_porting_guide more-deterministic-semantics}@anchor{43d}@anchor{gnat_rm/compatibility_and_porting_guide id5}@anchor{43e}
+@anchor{gnat_rm/compatibility_and_porting_guide more-deterministic-semantics}@anchor{43e}@anchor{gnat_rm/compatibility_and_porting_guide id5}@anchor{43f}
@subsection More deterministic semantics
@@ -28764,7 +28776,7 @@ which open select branches are executed.
@end itemize
@node Changed semantics,Other language compatibility issues,More deterministic semantics,Compatibility with Ada 83
-@anchor{gnat_rm/compatibility_and_porting_guide id6}@anchor{43f}@anchor{gnat_rm/compatibility_and_porting_guide changed-semantics}@anchor{440}
+@anchor{gnat_rm/compatibility_and_porting_guide id6}@anchor{440}@anchor{gnat_rm/compatibility_and_porting_guide changed-semantics}@anchor{441}
@subsection Changed semantics
@@ -28806,7 +28818,7 @@ covers only the restricted range.
@end itemize
@node Other language compatibility issues,,Changed semantics,Compatibility with Ada 83
-@anchor{gnat_rm/compatibility_and_porting_guide other-language-compatibility-issues}@anchor{441}@anchor{gnat_rm/compatibility_and_porting_guide id7}@anchor{442}
+@anchor{gnat_rm/compatibility_and_porting_guide other-language-compatibility-issues}@anchor{442}@anchor{gnat_rm/compatibility_and_porting_guide id7}@anchor{443}
@subsection Other language compatibility issues
@@ -28839,7 +28851,7 @@ include @code{pragma Interface} and the floating point type attributes
@end itemize
@node Compatibility between Ada 95 and Ada 2005,Implementation-dependent characteristics,Compatibility with Ada 83,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide compatibility-between-ada-95-and-ada-2005}@anchor{443}@anchor{gnat_rm/compatibility_and_porting_guide id8}@anchor{444}
+@anchor{gnat_rm/compatibility_and_porting_guide compatibility-between-ada-95-and-ada-2005}@anchor{444}@anchor{gnat_rm/compatibility_and_porting_guide id8}@anchor{445}
@section Compatibility between Ada 95 and Ada 2005
@@ -28911,7 +28923,7 @@ can declare a function returning a value from an anonymous access type.
@end itemize
@node Implementation-dependent characteristics,Compatibility with Other Ada Systems,Compatibility between Ada 95 and Ada 2005,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide implementation-dependent-characteristics}@anchor{445}@anchor{gnat_rm/compatibility_and_porting_guide id9}@anchor{446}
+@anchor{gnat_rm/compatibility_and_porting_guide implementation-dependent-characteristics}@anchor{446}@anchor{gnat_rm/compatibility_and_porting_guide id9}@anchor{447}
@section Implementation-dependent characteristics
@@ -28934,7 +28946,7 @@ transition from certain Ada 83 compilers.
@end menu
@node Implementation-defined pragmas,Implementation-defined attributes,,Implementation-dependent characteristics
-@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-pragmas}@anchor{447}@anchor{gnat_rm/compatibility_and_porting_guide id10}@anchor{448}
+@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-pragmas}@anchor{448}@anchor{gnat_rm/compatibility_and_porting_guide id10}@anchor{449}
@subsection Implementation-defined pragmas
@@ -28956,7 +28968,7 @@ avoiding compiler rejection of units that contain such pragmas; they are not
relevant in a GNAT context and hence are not otherwise implemented.
@node Implementation-defined attributes,Libraries,Implementation-defined pragmas,Implementation-dependent characteristics
-@anchor{gnat_rm/compatibility_and_porting_guide id11}@anchor{449}@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-attributes}@anchor{44a}
+@anchor{gnat_rm/compatibility_and_porting_guide id11}@anchor{44a}@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-attributes}@anchor{44b}
@subsection Implementation-defined attributes
@@ -28970,7 +28982,7 @@ Ada 83, GNAT supplies the attributes @code{Bit}, @code{Machine_Size} and
@code{Type_Class}.
@node Libraries,Elaboration order,Implementation-defined attributes,Implementation-dependent characteristics
-@anchor{gnat_rm/compatibility_and_porting_guide libraries}@anchor{44b}@anchor{gnat_rm/compatibility_and_porting_guide id12}@anchor{44c}
+@anchor{gnat_rm/compatibility_and_porting_guide libraries}@anchor{44c}@anchor{gnat_rm/compatibility_and_porting_guide id12}@anchor{44d}
@subsection Libraries
@@ -28999,7 +29011,7 @@ be preferable to retrofit the application using modular types.
@end itemize
@node Elaboration order,Target-specific aspects,Libraries,Implementation-dependent characteristics
-@anchor{gnat_rm/compatibility_and_porting_guide elaboration-order}@anchor{44d}@anchor{gnat_rm/compatibility_and_porting_guide id13}@anchor{44e}
+@anchor{gnat_rm/compatibility_and_porting_guide elaboration-order}@anchor{44e}@anchor{gnat_rm/compatibility_and_porting_guide id13}@anchor{44f}
@subsection Elaboration order
@@ -29035,7 +29047,7 @@ pragmas either globally (as an effect of the @emph{-gnatE} switch) or locally
@end itemize
@node Target-specific aspects,,Elaboration order,Implementation-dependent characteristics
-@anchor{gnat_rm/compatibility_and_porting_guide target-specific-aspects}@anchor{44f}@anchor{gnat_rm/compatibility_and_porting_guide id14}@anchor{450}
+@anchor{gnat_rm/compatibility_and_porting_guide target-specific-aspects}@anchor{450}@anchor{gnat_rm/compatibility_and_porting_guide id14}@anchor{451}
@subsection Target-specific aspects
@@ -29048,10 +29060,10 @@ on the robustness of the original design. Moreover, Ada 95 (and thus
Ada 2005 and Ada 2012) are sometimes
incompatible with typical Ada 83 compiler practices regarding implicit
packing, the meaning of the Size attribute, and the size of access values.
-GNAT's approach to these issues is described in @ref{451,,Representation Clauses}.
+GNAT's approach to these issues is described in @ref{452,,Representation Clauses}.
@node Compatibility with Other Ada Systems,Representation Clauses,Implementation-dependent characteristics,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide id15}@anchor{452}@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-other-ada-systems}@anchor{453}
+@anchor{gnat_rm/compatibility_and_porting_guide id15}@anchor{453}@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-other-ada-systems}@anchor{454}
@section Compatibility with Other Ada Systems
@@ -29094,7 +29106,7 @@ far beyond this minimal set, as described in the next section.
@end itemize
@node Representation Clauses,Compatibility with HP Ada 83,Compatibility with Other Ada Systems,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide representation-clauses}@anchor{451}@anchor{gnat_rm/compatibility_and_porting_guide id16}@anchor{454}
+@anchor{gnat_rm/compatibility_and_porting_guide representation-clauses}@anchor{452}@anchor{gnat_rm/compatibility_and_porting_guide id16}@anchor{455}
@section Representation Clauses
@@ -29187,7 +29199,7 @@ with thin pointers.
@end itemize
@node Compatibility with HP Ada 83,,Representation Clauses,Compatibility and Porting Guide
-@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-hp-ada-83}@anchor{455}@anchor{gnat_rm/compatibility_and_porting_guide id17}@anchor{456}
+@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-hp-ada-83}@anchor{456}@anchor{gnat_rm/compatibility_and_porting_guide id17}@anchor{457}
@section Compatibility with HP Ada 83
@@ -29217,7 +29229,7 @@ extension of package System.
@end itemize
@node GNU Free Documentation License,Index,Compatibility and Porting Guide,Top
-@anchor{share/gnu_free_documentation_license gnu-fdl}@anchor{1}@anchor{share/gnu_free_documentation_license doc}@anchor{457}@anchor{share/gnu_free_documentation_license gnu-free-documentation-license}@anchor{458}
+@anchor{share/gnu_free_documentation_license gnu-fdl}@anchor{1}@anchor{share/gnu_free_documentation_license doc}@anchor{458}@anchor{share/gnu_free_documentation_license gnu-free-documentation-license}@anchor{459}
@chapter GNU Free Documentation License
diff --git a/gcc/ada/gnat_ugn.texi b/gcc/ada/gnat_ugn.texi
index 947506799a5..05fdf4c84d0 100644
--- a/gcc/ada/gnat_ugn.texi
+++ b/gcc/ada/gnat_ugn.texi
@@ -21,7 +21,7 @@
@copying
@quotation
-GNAT User's Guide for Native Platforms , Oct 20, 2017
+GNAT User's Guide for Native Platforms , Nov 09, 2017
AdaCore
@@ -446,6 +446,7 @@ Platform-Specific Information
* Run-Time Libraries::
* Specifying a Run-Time Library::
+* GNU/Linux Topics::
* Microsoft Windows Topics::
* Mac OS Topics::
@@ -457,6 +458,10 @@ Specifying a Run-Time Library
* Choosing the Scheduling Policy::
+GNU/Linux Topics
+
+* Required Packages on GNU/Linux;: Required Packages on GNU/Linux.
+
Microsoft Windows Topics
* Using GNAT on Windows::
@@ -6456,7 +6461,7 @@ package Animals is
type Dog is new Animal and Carnivore and Domestic with record
Tooth_Count : Natural;
- Owner : String (1 .. 30);
+ Owner : Chars_Ptr;
end record;
pragma Import (C_Plus_Plus, Dog);
@@ -23449,6 +23454,7 @@ topics related to the GNAT implementation on Windows and Mac OS.
@menu
* Run-Time Libraries::
* Specifying a Run-Time Library::
+* GNU/Linux Topics::
* Microsoft Windows Topics::
* Mac OS Topics::
@@ -23615,7 +23621,7 @@ ZCX
@end multitable
-@node Specifying a Run-Time Library,Microsoft Windows Topics,Run-Time Libraries,Platform-Specific Information
+@node Specifying a Run-Time Library,GNU/Linux Topics,Run-Time Libraries,Platform-Specific Information
@anchor{gnat_ugn/platform_specific_information specifying-a-run-time-library}@anchor{1d6}@anchor{gnat_ugn/platform_specific_information id4}@anchor{1d7}
@section Specifying a Run-Time Library
@@ -23770,10 +23776,60 @@ Ignore : constant Boolean :=
It gets the effective user id, and if it's not 0 (i.e. root), it raises
Program_Error.
+@geindex Linux
+
+@geindex GNU/Linux
+
+@node GNU/Linux Topics,Microsoft Windows Topics,Specifying a Run-Time Library,Platform-Specific Information
+@anchor{gnat_ugn/platform_specific_information id6}@anchor{1da}@anchor{gnat_ugn/platform_specific_information gnu-linux-topics}@anchor{1db}
+@section GNU/Linux Topics
+
+
+This section describes topics that are specific to GNU/Linux platforms.
+
+@menu
+* Required Packages on GNU/Linux;: Required Packages on GNU/Linux.
+
+@end menu
+
+@node Required Packages on GNU/Linux,,,GNU/Linux Topics
+@anchor{gnat_ugn/platform_specific_information id7}@anchor{1dc}@anchor{gnat_ugn/platform_specific_information required-packages-on-gnu-linux}@anchor{1dd}
+@subsection Required Packages on GNU/Linux:
+
+
+GNAT requires the C library developer's package to be installed.
+The name of of that package depends on your GNU/Linux distribution:
+
+
+@itemize *
+
+@item
+RedHat, SUSE: @code{glibc-devel};
+
+@item
+Debian, Ubuntu: @code{libc6-dev} (normally installed by default).
+@end itemize
+
+If using the 32-bit version of GNAT on a 64-bit version of GNU/Linux,
+you'll need the 32-bit version of that package instead:
+
+
+@itemize *
+
+@item
+RedHat, SUSE: @code{glibc-devel.i686};
+
+@item
+Debian, Ubuntu: @code{libc6-dev:i386}.
+@end itemize
+
+Other GNU/Linux distributions might be choosing a different name
+for that package.
+
@geindex Windows
-@node Microsoft Windows Topics,Mac OS Topics,Specifying a Run-Time Library,Platform-Specific Information
-@anchor{gnat_ugn/platform_specific_information id6}@anchor{1da}@anchor{gnat_ugn/platform_specific_information microsoft-windows-topics}@anchor{2c}
+@node Microsoft Windows Topics,Mac OS Topics,GNU/Linux Topics,Platform-Specific Information
+@anchor{gnat_ugn/platform_specific_information microsoft-windows-topics}@anchor{2c}@anchor{gnat_ugn/platform_specific_information id8}@anchor{1de}
@section Microsoft Windows Topics
@@ -23796,7 +23852,7 @@ platforms.
@end menu
@node Using GNAT on Windows,Using a network installation of GNAT,,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information using-gnat-on-windows}@anchor{1db}@anchor{gnat_ugn/platform_specific_information id7}@anchor{1dc}
+@anchor{gnat_ugn/platform_specific_information using-gnat-on-windows}@anchor{1df}@anchor{gnat_ugn/platform_specific_information id9}@anchor{1e0}
@subsection Using GNAT on Windows
@@ -23873,7 +23929,7 @@ uninstall or integrate different GNAT products.
@end itemize
@node Using a network installation of GNAT,CONSOLE and WINDOWS subsystems,Using GNAT on Windows,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information id8}@anchor{1dd}@anchor{gnat_ugn/platform_specific_information using-a-network-installation-of-gnat}@anchor{1de}
+@anchor{gnat_ugn/platform_specific_information id10}@anchor{1e1}@anchor{gnat_ugn/platform_specific_information using-a-network-installation-of-gnat}@anchor{1e2}
@subsection Using a network installation of GNAT
@@ -23900,7 +23956,7 @@ transfer of large amounts of data across the network and will likely cause
serious performance penalty.
@node CONSOLE and WINDOWS subsystems,Temporary Files,Using a network installation of GNAT,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information console-and-windows-subsystems}@anchor{1df}@anchor{gnat_ugn/platform_specific_information id9}@anchor{1e0}
+@anchor{gnat_ugn/platform_specific_information id11}@anchor{1e3}@anchor{gnat_ugn/platform_specific_information console-and-windows-subsystems}@anchor{1e4}
@subsection CONSOLE and WINDOWS subsystems
@@ -23925,7 +23981,7 @@ $ gnatmake winprog -largs -mwindows
@end quotation
@node Temporary Files,Disabling Command Line Argument Expansion,CONSOLE and WINDOWS subsystems,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information id10}@anchor{1e1}@anchor{gnat_ugn/platform_specific_information temporary-files}@anchor{1e2}
+@anchor{gnat_ugn/platform_specific_information id12}@anchor{1e5}@anchor{gnat_ugn/platform_specific_information temporary-files}@anchor{1e6}
@subsection Temporary Files
@@ -23964,7 +24020,7 @@ environments where you may not have write access to some
directories.
@node Disabling Command Line Argument Expansion,Mixed-Language Programming on Windows,Temporary Files,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information disabling-command-line-argument-expansion}@anchor{1e3}
+@anchor{gnat_ugn/platform_specific_information disabling-command-line-argument-expansion}@anchor{1e7}
@subsection Disabling Command Line Argument Expansion
@@ -24035,7 +24091,7 @@ Ada.Command_Line.Argument (1) -> "'*.txt'"
@end example
@node Mixed-Language Programming on Windows,Windows Specific Add-Ons,Disabling Command Line Argument Expansion,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information id11}@anchor{1e4}@anchor{gnat_ugn/platform_specific_information mixed-language-programming-on-windows}@anchor{1e5}
+@anchor{gnat_ugn/platform_specific_information id13}@anchor{1e8}@anchor{gnat_ugn/platform_specific_information mixed-language-programming-on-windows}@anchor{1e9}
@subsection Mixed-Language Programming on Windows
@@ -24057,12 +24113,12 @@ to use the Microsoft tools for your C++ code, you have two choices:
Encapsulate your C++ code in a DLL to be linked with your Ada
application. In this case, use the Microsoft or whatever environment to
build the DLL and use GNAT to build your executable
-(@ref{1e6,,Using DLLs with GNAT}).
+(@ref{1ea,,Using DLLs with GNAT}).
@item
Or you can encapsulate your Ada code in a DLL to be linked with the
other part of your application. In this case, use GNAT to build the DLL
-(@ref{1e7,,Building DLLs with GNAT Project files}) and use the Microsoft
+(@ref{1eb,,Building DLLs with GNAT Project files}) and use the Microsoft
or whatever environment to build your executable.
@end itemize
@@ -24119,7 +24175,7 @@ native SEH support is used.
@end menu
@node Windows Calling Conventions,Introduction to Dynamic Link Libraries DLLs,,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information windows-calling-conventions}@anchor{1e8}@anchor{gnat_ugn/platform_specific_information id12}@anchor{1e9}
+@anchor{gnat_ugn/platform_specific_information windows-calling-conventions}@anchor{1ec}@anchor{gnat_ugn/platform_specific_information id14}@anchor{1ed}
@subsubsection Windows Calling Conventions
@@ -24164,7 +24220,7 @@ are available for Windows:
@end menu
@node C Calling Convention,Stdcall Calling Convention,,Windows Calling Conventions
-@anchor{gnat_ugn/platform_specific_information c-calling-convention}@anchor{1ea}@anchor{gnat_ugn/platform_specific_information id13}@anchor{1eb}
+@anchor{gnat_ugn/platform_specific_information c-calling-convention}@anchor{1ee}@anchor{gnat_ugn/platform_specific_information id15}@anchor{1ef}
@subsubsection @code{C} Calling Convention
@@ -24206,10 +24262,10 @@ is missing, as in the above example, this parameter is set to be the
When importing a variable defined in C, you should always use the @code{C}
calling convention unless the object containing the variable is part of a
DLL (in which case you should use the @code{Stdcall} calling
-convention, @ref{1ec,,Stdcall Calling Convention}).
+convention, @ref{1f0,,Stdcall Calling Convention}).
@node Stdcall Calling Convention,Win32 Calling Convention,C Calling Convention,Windows Calling Conventions
-@anchor{gnat_ugn/platform_specific_information stdcall-calling-convention}@anchor{1ec}@anchor{gnat_ugn/platform_specific_information id14}@anchor{1ed}
+@anchor{gnat_ugn/platform_specific_information stdcall-calling-convention}@anchor{1f0}@anchor{gnat_ugn/platform_specific_information id16}@anchor{1f1}
@subsubsection @code{Stdcall} Calling Convention
@@ -24306,7 +24362,7 @@ Note that to ease building cross-platform bindings this convention
will be handled as a @code{C} calling convention on non-Windows platforms.
@node Win32 Calling Convention,DLL Calling Convention,Stdcall Calling Convention,Windows Calling Conventions
-@anchor{gnat_ugn/platform_specific_information win32-calling-convention}@anchor{1ee}@anchor{gnat_ugn/platform_specific_information id15}@anchor{1ef}
+@anchor{gnat_ugn/platform_specific_information win32-calling-convention}@anchor{1f2}@anchor{gnat_ugn/platform_specific_information id17}@anchor{1f3}
@subsubsection @code{Win32} Calling Convention
@@ -24314,7 +24370,7 @@ This convention, which is GNAT-specific is fully equivalent to the
@code{Stdcall} calling convention described above.
@node DLL Calling Convention,,Win32 Calling Convention,Windows Calling Conventions
-@anchor{gnat_ugn/platform_specific_information dll-calling-convention}@anchor{1f0}@anchor{gnat_ugn/platform_specific_information id16}@anchor{1f1}
+@anchor{gnat_ugn/platform_specific_information id18}@anchor{1f4}@anchor{gnat_ugn/platform_specific_information dll-calling-convention}@anchor{1f5}
@subsubsection @code{DLL} Calling Convention
@@ -24322,7 +24378,7 @@ This convention, which is GNAT-specific is fully equivalent to the
@code{Stdcall} calling convention described above.
@node Introduction to Dynamic Link Libraries DLLs,Using DLLs with GNAT,Windows Calling Conventions,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information introduction-to-dynamic-link-libraries-dlls}@anchor{1f2}@anchor{gnat_ugn/platform_specific_information id17}@anchor{1f3}
+@anchor{gnat_ugn/platform_specific_information id19}@anchor{1f6}@anchor{gnat_ugn/platform_specific_information introduction-to-dynamic-link-libraries-dlls}@anchor{1f7}
@subsubsection Introduction to Dynamic Link Libraries (DLLs)
@@ -24406,10 +24462,10 @@ As a side note, an interesting difference between Microsoft DLLs and
Unix shared libraries, is the fact that on most Unix systems all public
routines are exported by default in a Unix shared library, while under
Windows it is possible (but not required) to list exported routines in
-a definition file (see @ref{1f4,,The Definition File}).
+a definition file (see @ref{1f8,,The Definition File}).
@node Using DLLs with GNAT,Building DLLs with GNAT Project files,Introduction to Dynamic Link Libraries DLLs,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information id18}@anchor{1f5}@anchor{gnat_ugn/platform_specific_information using-dlls-with-gnat}@anchor{1e6}
+@anchor{gnat_ugn/platform_specific_information id20}@anchor{1f9}@anchor{gnat_ugn/platform_specific_information using-dlls-with-gnat}@anchor{1ea}
@subsubsection Using DLLs with GNAT
@@ -24500,7 +24556,7 @@ example a fictitious DLL called @code{API.dll}.
@end menu
@node Creating an Ada Spec for the DLL Services,Creating an Import Library,,Using DLLs with GNAT
-@anchor{gnat_ugn/platform_specific_information creating-an-ada-spec-for-the-dll-services}@anchor{1f6}@anchor{gnat_ugn/platform_specific_information id19}@anchor{1f7}
+@anchor{gnat_ugn/platform_specific_information id21}@anchor{1fa}@anchor{gnat_ugn/platform_specific_information creating-an-ada-spec-for-the-dll-services}@anchor{1fb}
@subsubsection Creating an Ada Spec for the DLL Services
@@ -24540,7 +24596,7 @@ end API;
@end quotation
@node Creating an Import Library,,Creating an Ada Spec for the DLL Services,Using DLLs with GNAT
-@anchor{gnat_ugn/platform_specific_information id20}@anchor{1f8}@anchor{gnat_ugn/platform_specific_information creating-an-import-library}@anchor{1f9}
+@anchor{gnat_ugn/platform_specific_information id22}@anchor{1fc}@anchor{gnat_ugn/platform_specific_information creating-an-import-library}@anchor{1fd}
@subsubsection Creating an Import Library
@@ -24554,7 +24610,7 @@ as in this case it is possible to link directly against the
DLL. Otherwise read on.
@geindex Definition file
-@anchor{gnat_ugn/platform_specific_information the-definition-file}@anchor{1f4}
+@anchor{gnat_ugn/platform_specific_information the-definition-file}@anchor{1f8}
@subsubheading The Definition File
@@ -24602,17 +24658,17 @@ EXPORTS
@end table
Note that you must specify the correct suffix (@code{@@@emph{nn}})
-(see @ref{1e8,,Windows Calling Conventions}) for a Stdcall
+(see @ref{1ec,,Windows Calling Conventions}) for a Stdcall
calling convention function in the exported symbols list.
There can actually be other sections in a definition file, but these
sections are not relevant to the discussion at hand.
-@anchor{gnat_ugn/platform_specific_information create-def-file-automatically}@anchor{1fa}
+@anchor{gnat_ugn/platform_specific_information create-def-file-automatically}@anchor{1fe}
@subsubheading Creating a Definition File Automatically
You can automatically create the definition file @code{API.def}
-(see @ref{1f4,,The Definition File}) from a DLL.
+(see @ref{1f8,,The Definition File}) from a DLL.
For that use the @code{dlltool} program as follows:
@quotation
@@ -24622,7 +24678,7 @@ $ dlltool API.dll -z API.def --export-all-symbols
@end example
Note that if some routines in the DLL have the @code{Stdcall} convention
-(@ref{1e8,,Windows Calling Conventions}) with stripped @code{@@@emph{nn}}
+(@ref{1ec,,Windows Calling Conventions}) with stripped @code{@@@emph{nn}}
suffix then you'll have to edit @code{api.def} to add it, and specify
@code{-k} to @code{gnatdll} when creating the import library.
@@ -24646,13 +24702,13 @@ tells you what symbol is expected. You just have to go back to the
definition file and add the right suffix.
@end itemize
@end quotation
-@anchor{gnat_ugn/platform_specific_information gnat-style-import-library}@anchor{1fb}
+@anchor{gnat_ugn/platform_specific_information gnat-style-import-library}@anchor{1ff}
@subsubheading GNAT-Style Import Library
To create a static import library from @code{API.dll} with the GNAT tools
you should create the .def file, then use @code{gnatdll} tool
-(see @ref{1fc,,Using gnatdll}) as follows:
+(see @ref{200,,Using gnatdll}) as follows:
@quotation
@@ -24668,15 +24724,15 @@ definition file name is @code{xyz.def}, the import library name will
be @code{libxyz.a}. Note that in the previous example option
@code{-e} could have been removed because the name of the definition
file (before the @code{.def} suffix) is the same as the name of the
-DLL (@ref{1fc,,Using gnatdll} for more information about @code{gnatdll}).
+DLL (@ref{200,,Using gnatdll} for more information about @code{gnatdll}).
@end quotation
-@anchor{gnat_ugn/platform_specific_information msvs-style-import-library}@anchor{1fd}
+@anchor{gnat_ugn/platform_specific_information msvs-style-import-library}@anchor{201}
@subsubheading Microsoft-Style Import Library
A Microsoft import library is needed only if you plan to make an
Ada DLL available to applications developed with Microsoft
-tools (@ref{1e5,,Mixed-Language Programming on Windows}).
+tools (@ref{1e9,,Mixed-Language Programming on Windows}).
To create a Microsoft-style import library for @code{API.dll} you
should create the .def file, then build the actual import library using
@@ -24700,7 +24756,7 @@ See the Microsoft documentation for further details about the usage of
@end quotation
@node Building DLLs with GNAT Project files,Building DLLs with GNAT,Using DLLs with GNAT,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information id21}@anchor{1fe}@anchor{gnat_ugn/platform_specific_information building-dlls-with-gnat-project-files}@anchor{1e7}
+@anchor{gnat_ugn/platform_specific_information id23}@anchor{202}@anchor{gnat_ugn/platform_specific_information building-dlls-with-gnat-project-files}@anchor{1eb}
@subsubsection Building DLLs with GNAT Project files
@@ -24716,7 +24772,7 @@ when inside the @code{DllMain} routine which is used for auto-initialization
of shared libraries, so it is not possible to have library level tasks in SALs.
@node Building DLLs with GNAT,Building DLLs with gnatdll,Building DLLs with GNAT Project files,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information building-dlls-with-gnat}@anchor{1ff}@anchor{gnat_ugn/platform_specific_information id22}@anchor{200}
+@anchor{gnat_ugn/platform_specific_information building-dlls-with-gnat}@anchor{203}@anchor{gnat_ugn/platform_specific_information id24}@anchor{204}
@subsubsection Building DLLs with GNAT
@@ -24747,7 +24803,7 @@ $ gcc -shared -shared-libgcc -o api.dll obj1.o obj2.o ...
It is important to note that in this case all symbols found in the
object files are automatically exported. It is possible to restrict
the set of symbols to export by passing to @code{gcc} a definition
-file (see @ref{1f4,,The Definition File}).
+file (see @ref{1f8,,The Definition File}).
For example:
@example
@@ -24785,7 +24841,7 @@ $ gnatmake main -Iapilib -bargs -shared -largs -Lapilib -lAPI
@end quotation
@node Building DLLs with gnatdll,Ada DLLs and Finalization,Building DLLs with GNAT,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information building-dlls-with-gnatdll}@anchor{201}@anchor{gnat_ugn/platform_specific_information id23}@anchor{202}
+@anchor{gnat_ugn/platform_specific_information building-dlls-with-gnatdll}@anchor{205}@anchor{gnat_ugn/platform_specific_information id25}@anchor{206}
@subsubsection Building DLLs with gnatdll
@@ -24793,8 +24849,8 @@ $ gnatmake main -Iapilib -bargs -shared -largs -Lapilib -lAPI
@geindex building
Note that it is preferred to use GNAT Project files
-(@ref{1e7,,Building DLLs with GNAT Project files}) or the built-in GNAT
-DLL support (@ref{1ff,,Building DLLs with GNAT}) or to build DLLs.
+(@ref{1eb,,Building DLLs with GNAT Project files}) or the built-in GNAT
+DLL support (@ref{203,,Building DLLs with GNAT}) or to build DLLs.
This section explains how to build DLLs containing Ada code using
@code{gnatdll}. These DLLs will be referred to as Ada DLLs in the
@@ -24810,20 +24866,20 @@ non-Ada applications are as follows:
You need to mark each Ada entity exported by the DLL with a @code{C} or
@code{Stdcall} calling convention to avoid any Ada name mangling for the
entities exported by the DLL
-(see @ref{203,,Exporting Ada Entities}). You can
+(see @ref{207,,Exporting Ada Entities}). You can
skip this step if you plan to use the Ada DLL only from Ada applications.
@item
Your Ada code must export an initialization routine which calls the routine
@code{adainit} generated by @code{gnatbind} to perform the elaboration of
-the Ada code in the DLL (@ref{204,,Ada DLLs and Elaboration}). The initialization
+the Ada code in the DLL (@ref{208,,Ada DLLs and Elaboration}). The initialization
routine exported by the Ada DLL must be invoked by the clients of the DLL
to initialize the DLL.
@item
When useful, the DLL should also export a finalization routine which calls
routine @code{adafinal} generated by @code{gnatbind} to perform the
-finalization of the Ada code in the DLL (@ref{205,,Ada DLLs and Finalization}).
+finalization of the Ada code in the DLL (@ref{209,,Ada DLLs and Finalization}).
The finalization routine exported by the Ada DLL must be invoked by the
clients of the DLL when the DLL services are no further needed.
@@ -24833,11 +24889,11 @@ of the programming languages to which you plan to make the DLL available.
@item
You must provide a definition file listing the exported entities
-(@ref{1f4,,The Definition File}).
+(@ref{1f8,,The Definition File}).
@item
Finally you must use @code{gnatdll} to produce the DLL and the import
-library (@ref{1fc,,Using gnatdll}).
+library (@ref{200,,Using gnatdll}).
@end itemize
Note that a relocatable DLL stripped using the @code{strip}
@@ -24857,7 +24913,7 @@ chapter of the @emph{GPRbuild User's Guide}.
@end menu
@node Limitations When Using Ada DLLs from Ada,Exporting Ada Entities,,Building DLLs with gnatdll
-@anchor{gnat_ugn/platform_specific_information limitations-when-using-ada-dlls-from-ada}@anchor{206}
+@anchor{gnat_ugn/platform_specific_information limitations-when-using-ada-dlls-from-ada}@anchor{20a}
@subsubsection Limitations When Using Ada DLLs from Ada
@@ -24878,7 +24934,7 @@ It is completely safe to exchange plain elementary, array or record types,
Windows object handles, etc.
@node Exporting Ada Entities,Ada DLLs and Elaboration,Limitations When Using Ada DLLs from Ada,Building DLLs with gnatdll
-@anchor{gnat_ugn/platform_specific_information exporting-ada-entities}@anchor{203}@anchor{gnat_ugn/platform_specific_information id24}@anchor{207}
+@anchor{gnat_ugn/platform_specific_information exporting-ada-entities}@anchor{207}@anchor{gnat_ugn/platform_specific_information id26}@anchor{20b}
@subsubsection Exporting Ada Entities
@@ -24978,10 +25034,10 @@ end API;
Note that if you do not export the Ada entities with a @code{C} or
@code{Stdcall} convention you will have to provide the mangled Ada names
in the definition file of the Ada DLL
-(@ref{208,,Creating the Definition File}).
+(@ref{20c,,Creating the Definition File}).
@node Ada DLLs and Elaboration,,Exporting Ada Entities,Building DLLs with gnatdll
-@anchor{gnat_ugn/platform_specific_information ada-dlls-and-elaboration}@anchor{204}@anchor{gnat_ugn/platform_specific_information id25}@anchor{209}
+@anchor{gnat_ugn/platform_specific_information ada-dlls-and-elaboration}@anchor{208}@anchor{gnat_ugn/platform_specific_information id27}@anchor{20d}
@subsubsection Ada DLLs and Elaboration
@@ -24999,7 +25055,7 @@ the Ada elaboration routine @code{adainit} generated by the GNAT binder
(@ref{b4,,Binding with Non-Ada Main Programs}). See the body of
@code{Initialize_Api} for an example. Note that the GNAT binder is
automatically invoked during the DLL build process by the @code{gnatdll}
-tool (@ref{1fc,,Using gnatdll}).
+tool (@ref{200,,Using gnatdll}).
When a DLL is loaded, Windows systematically invokes a routine called
@code{DllMain}. It would therefore be possible to call @code{adainit}
@@ -25012,7 +25068,7 @@ time), which means that the GNAT run-time will deadlock waiting for the
newly created task to complete its initialization.
@node Ada DLLs and Finalization,Creating a Spec for Ada DLLs,Building DLLs with gnatdll,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information ada-dlls-and-finalization}@anchor{205}@anchor{gnat_ugn/platform_specific_information id26}@anchor{20a}
+@anchor{gnat_ugn/platform_specific_information id28}@anchor{20e}@anchor{gnat_ugn/platform_specific_information ada-dlls-and-finalization}@anchor{209}
@subsubsection Ada DLLs and Finalization
@@ -25027,10 +25083,10 @@ routine @code{adafinal} generated by the GNAT binder
See the body of @code{Finalize_Api} for an
example. As already pointed out the GNAT binder is automatically invoked
during the DLL build process by the @code{gnatdll} tool
-(@ref{1fc,,Using gnatdll}).
+(@ref{200,,Using gnatdll}).
@node Creating a Spec for Ada DLLs,GNAT and Windows Resources,Ada DLLs and Finalization,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information id27}@anchor{20b}@anchor{gnat_ugn/platform_specific_information creating-a-spec-for-ada-dlls}@anchor{20c}
+@anchor{gnat_ugn/platform_specific_information id29}@anchor{20f}@anchor{gnat_ugn/platform_specific_information creating-a-spec-for-ada-dlls}@anchor{210}
@subsubsection Creating a Spec for Ada DLLs
@@ -25088,7 +25144,7 @@ end API;
@end menu
@node Creating the Definition File,Using gnatdll,,Creating a Spec for Ada DLLs
-@anchor{gnat_ugn/platform_specific_information id28}@anchor{20d}@anchor{gnat_ugn/platform_specific_information creating-the-definition-file}@anchor{208}
+@anchor{gnat_ugn/platform_specific_information creating-the-definition-file}@anchor{20c}@anchor{gnat_ugn/platform_specific_information id30}@anchor{211}
@subsubsection Creating the Definition File
@@ -25124,7 +25180,7 @@ EXPORTS
@end quotation
@node Using gnatdll,,Creating the Definition File,Creating a Spec for Ada DLLs
-@anchor{gnat_ugn/platform_specific_information id29}@anchor{20e}@anchor{gnat_ugn/platform_specific_information using-gnatdll}@anchor{1fc}
+@anchor{gnat_ugn/platform_specific_information using-gnatdll}@anchor{200}@anchor{gnat_ugn/platform_specific_information id31}@anchor{212}
@subsubsection Using @code{gnatdll}
@@ -25335,7 +25391,7 @@ asks @code{gnatlink} to generate the routines @code{DllMain} and
is loaded into memory.
@item
-@code{gnatdll} uses @code{dlltool} (see @ref{20f,,Using dlltool}) to build the
+@code{gnatdll} uses @code{dlltool} (see @ref{213,,Using dlltool}) to build the
export table (@code{api.exp}). The export table contains the relocation
information in a form which can be used during the final link to ensure
that the Windows loader is able to place the DLL anywhere in memory.
@@ -25374,7 +25430,7 @@ $ gnatbind -n api
$ gnatlink api api.exp -o api.dll -mdll
@end example
@end itemize
-@anchor{gnat_ugn/platform_specific_information using-dlltool}@anchor{20f}
+@anchor{gnat_ugn/platform_specific_information using-dlltool}@anchor{213}
@subsubheading Using @code{dlltool}
@@ -25433,7 +25489,7 @@ DLL in the static import library generated by @code{dlltool} with switch
@item @code{-k}
Kill @code{@@@emph{nn}} from exported names
-(@ref{1e8,,Windows Calling Conventions}
+(@ref{1ec,,Windows Calling Conventions}
for a discussion about @code{Stdcall}-style symbols.
@end table
@@ -25489,7 +25545,7 @@ Use @code{assembler-name} as the assembler. The default is @code{as}.
@end table
@node GNAT and Windows Resources,Using GNAT DLLs from Microsoft Visual Studio Applications,Creating a Spec for Ada DLLs,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information gnat-and-windows-resources}@anchor{210}@anchor{gnat_ugn/platform_specific_information id30}@anchor{211}
+@anchor{gnat_ugn/platform_specific_information gnat-and-windows-resources}@anchor{214}@anchor{gnat_ugn/platform_specific_information id32}@anchor{215}
@subsubsection GNAT and Windows Resources
@@ -25584,7 +25640,7 @@ the corresponding Microsoft documentation.
@end menu
@node Building Resources,Compiling Resources,,GNAT and Windows Resources
-@anchor{gnat_ugn/platform_specific_information building-resources}@anchor{212}@anchor{gnat_ugn/platform_specific_information id31}@anchor{213}
+@anchor{gnat_ugn/platform_specific_information building-resources}@anchor{216}@anchor{gnat_ugn/platform_specific_information id33}@anchor{217}
@subsubsection Building Resources
@@ -25604,7 +25660,7 @@ complete description of the resource script language can be found in the
Microsoft documentation.
@node Compiling Resources,Using Resources,Building Resources,GNAT and Windows Resources
-@anchor{gnat_ugn/platform_specific_information compiling-resources}@anchor{214}@anchor{gnat_ugn/platform_specific_information id32}@anchor{215}
+@anchor{gnat_ugn/platform_specific_information compiling-resources}@anchor{218}@anchor{gnat_ugn/platform_specific_information id34}@anchor{219}
@subsubsection Compiling Resources
@@ -25646,7 +25702,7 @@ $ windres -i myres.res -o myres.o
@end quotation
@node Using Resources,,Compiling Resources,GNAT and Windows Resources
-@anchor{gnat_ugn/platform_specific_information using-resources}@anchor{216}@anchor{gnat_ugn/platform_specific_information id33}@anchor{217}
+@anchor{gnat_ugn/platform_specific_information using-resources}@anchor{21a}@anchor{gnat_ugn/platform_specific_information id35}@anchor{21b}
@subsubsection Using Resources
@@ -25666,7 +25722,7 @@ $ gnatmake myprog -largs myres.o
@end quotation
@node Using GNAT DLLs from Microsoft Visual Studio Applications,Debugging a DLL,GNAT and Windows Resources,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information using-gnat-dll-from-msvs}@anchor{218}@anchor{gnat_ugn/platform_specific_information using-gnat-dlls-from-microsoft-visual-studio-applications}@anchor{219}
+@anchor{gnat_ugn/platform_specific_information using-gnat-dll-from-msvs}@anchor{21c}@anchor{gnat_ugn/platform_specific_information using-gnat-dlls-from-microsoft-visual-studio-applications}@anchor{21d}
@subsubsection Using GNAT DLLs from Microsoft Visual Studio Applications
@@ -25700,7 +25756,7 @@ $ gprbuild -p mylib.gpr
@item
Produce a .def file for the symbols you need to interface with, either by
hand or automatically with possibly some manual adjustments
-(see @ref{1fa,,Creating Definition File Automatically}):
+(see @ref{1fe,,Creating Definition File Automatically}):
@end enumerate
@quotation
@@ -25717,7 +25773,7 @@ $ dlltool libmylib.dll -z libmylib.def --export-all-symbols
Make sure that MSVS command-line tools are accessible on the path.
@item
-Create the Microsoft-style import library (see @ref{1fd,,MSVS-Style Import Library}):
+Create the Microsoft-style import library (see @ref{201,,MSVS-Style Import Library}):
@end enumerate
@quotation
@@ -25759,7 +25815,7 @@ or copy the DLL into into the directory containing the .exe.
@end enumerate
@node Debugging a DLL,Setting Stack Size from gnatlink,Using GNAT DLLs from Microsoft Visual Studio Applications,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information id34}@anchor{21a}@anchor{gnat_ugn/platform_specific_information debugging-a-dll}@anchor{21b}
+@anchor{gnat_ugn/platform_specific_information id36}@anchor{21e}@anchor{gnat_ugn/platform_specific_information debugging-a-dll}@anchor{21f}
@subsubsection Debugging a DLL
@@ -25797,7 +25853,7 @@ tools suite used to build the DLL.
@end menu
@node Program and DLL Both Built with GCC/GNAT,Program Built with Foreign Tools and DLL Built with GCC/GNAT,,Debugging a DLL
-@anchor{gnat_ugn/platform_specific_information program-and-dll-both-built-with-gcc-gnat}@anchor{21c}@anchor{gnat_ugn/platform_specific_information id35}@anchor{21d}
+@anchor{gnat_ugn/platform_specific_information id37}@anchor{220}@anchor{gnat_ugn/platform_specific_information program-and-dll-both-built-with-gcc-gnat}@anchor{221}
@subsubsection Program and DLL Both Built with GCC/GNAT
@@ -25807,7 +25863,7 @@ the process. Let's suppose here that the main procedure is named
@code{ada_main} and that in the DLL there is an entry point named
@code{ada_dll}.
-The DLL (@ref{1f2,,Introduction to Dynamic Link Libraries (DLLs)}) and
+The DLL (@ref{1f7,,Introduction to Dynamic Link Libraries (DLLs)}) and
program must have been built with the debugging information (see GNAT -g
switch). Here are the step-by-step instructions for debugging it:
@@ -25847,7 +25903,7 @@ you can use the standard approach to debug the whole program
(@ref{24,,Running and Debugging Ada Programs}).
@node Program Built with Foreign Tools and DLL Built with GCC/GNAT,,Program and DLL Both Built with GCC/GNAT,Debugging a DLL
-@anchor{gnat_ugn/platform_specific_information id36}@anchor{21e}@anchor{gnat_ugn/platform_specific_information program-built-with-foreign-tools-and-dll-built-with-gcc-gnat}@anchor{21f}
+@anchor{gnat_ugn/platform_specific_information program-built-with-foreign-tools-and-dll-built-with-gcc-gnat}@anchor{222}@anchor{gnat_ugn/platform_specific_information id38}@anchor{223}
@subsubsection Program Built with Foreign Tools and DLL Built with GCC/GNAT
@@ -25864,7 +25920,7 @@ example some C code built with Microsoft Visual C) and that there is a
DLL named @code{test.dll} containing an Ada entry point named
@code{ada_dll}.
-The DLL (see @ref{1f2,,Introduction to Dynamic Link Libraries (DLLs)}) must have
+The DLL (see @ref{1f7,,Introduction to Dynamic Link Libraries (DLLs)}) must have
been built with debugging information (see the GNAT @code{-g} option).
@subsubheading Debugging the DLL Directly
@@ -26003,7 +26059,7 @@ approach to debug a program as described in
@ref{24,,Running and Debugging Ada Programs}.
@node Setting Stack Size from gnatlink,Setting Heap Size from gnatlink,Debugging a DLL,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information id37}@anchor{220}@anchor{gnat_ugn/platform_specific_information setting-stack-size-from-gnatlink}@anchor{136}
+@anchor{gnat_ugn/platform_specific_information setting-stack-size-from-gnatlink}@anchor{136}@anchor{gnat_ugn/platform_specific_information id39}@anchor{224}
@subsubsection Setting Stack Size from @code{gnatlink}
@@ -26046,7 +26102,7 @@ because the comma is a separator for this option.
@end itemize
@node Setting Heap Size from gnatlink,,Setting Stack Size from gnatlink,Mixed-Language Programming on Windows
-@anchor{gnat_ugn/platform_specific_information setting-heap-size-from-gnatlink}@anchor{137}@anchor{gnat_ugn/platform_specific_information id38}@anchor{221}
+@anchor{gnat_ugn/platform_specific_information setting-heap-size-from-gnatlink}@anchor{137}@anchor{gnat_ugn/platform_specific_information id40}@anchor{225}
@subsubsection Setting Heap Size from @code{gnatlink}
@@ -26079,7 +26135,7 @@ because the comma is a separator for this option.
@end itemize
@node Windows Specific Add-Ons,,Mixed-Language Programming on Windows,Microsoft Windows Topics
-@anchor{gnat_ugn/platform_specific_information windows-specific-add-ons}@anchor{222}@anchor{gnat_ugn/platform_specific_information win32-specific-addons}@anchor{223}
+@anchor{gnat_ugn/platform_specific_information windows-specific-add-ons}@anchor{226}@anchor{gnat_ugn/platform_specific_information win32-specific-addons}@anchor{227}
@subsection Windows Specific Add-Ons
@@ -26092,7 +26148,7 @@ This section describes the Windows specific add-ons.
@end menu
@node Win32Ada,wPOSIX,,Windows Specific Add-Ons
-@anchor{gnat_ugn/platform_specific_information win32ada}@anchor{224}@anchor{gnat_ugn/platform_specific_information id39}@anchor{225}
+@anchor{gnat_ugn/platform_specific_information win32ada}@anchor{228}@anchor{gnat_ugn/platform_specific_information id41}@anchor{229}
@subsubsection Win32Ada
@@ -26123,7 +26179,7 @@ gprbuild p.gpr
@end quotation
@node wPOSIX,,Win32Ada,Windows Specific Add-Ons
-@anchor{gnat_ugn/platform_specific_information wposix}@anchor{226}@anchor{gnat_ugn/platform_specific_information id40}@anchor{227}
+@anchor{gnat_ugn/platform_specific_information id42}@anchor{22a}@anchor{gnat_ugn/platform_specific_information wposix}@anchor{22b}
@subsubsection wPOSIX
@@ -26156,7 +26212,7 @@ gprbuild p.gpr
@end quotation
@node Mac OS Topics,,Microsoft Windows Topics,Platform-Specific Information
-@anchor{gnat_ugn/platform_specific_information mac-os-topics}@anchor{2d}@anchor{gnat_ugn/platform_specific_information id41}@anchor{228}
+@anchor{gnat_ugn/platform_specific_information mac-os-topics}@anchor{2d}@anchor{gnat_ugn/platform_specific_information id43}@anchor{22c}
@section Mac OS Topics
@@ -26171,7 +26227,7 @@ platform.
@end menu
@node Codesigning the Debugger,,,Mac OS Topics
-@anchor{gnat_ugn/platform_specific_information codesigning-the-debugger}@anchor{229}
+@anchor{gnat_ugn/platform_specific_information codesigning-the-debugger}@anchor{22d}
@subsection Codesigning the Debugger
@@ -26252,7 +26308,7 @@ the location where you installed GNAT. Also, be sure that users are
in the Unix group @code{_developer}.
@node Example of Binder Output File,Elaboration Order Handling in GNAT,Platform-Specific Information,Top
-@anchor{gnat_ugn/example_of_binder_output example-of-binder-output-file}@anchor{e}@anchor{gnat_ugn/example_of_binder_output doc}@anchor{22a}@anchor{gnat_ugn/example_of_binder_output id1}@anchor{22b}
+@anchor{gnat_ugn/example_of_binder_output example-of-binder-output-file}@anchor{e}@anchor{gnat_ugn/example_of_binder_output doc}@anchor{22e}@anchor{gnat_ugn/example_of_binder_output id1}@anchor{22f}
@chapter Example of Binder Output File
@@ -27004,7 +27060,7 @@ elaboration code in your own application).
@c -- Example: A |withing| unit has a |with| clause, it |withs| a |withed| unit
@node Elaboration Order Handling in GNAT,Inline Assembler,Example of Binder Output File,Top
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-order-handling-in-gnat}@anchor{f}@anchor{gnat_ugn/elaboration_order_handling_in_gnat doc}@anchor{22c}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id1}@anchor{22d}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-order-handling-in-gnat}@anchor{f}@anchor{gnat_ugn/elaboration_order_handling_in_gnat doc}@anchor{230}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id1}@anchor{231}
@chapter Elaboration Order Handling in GNAT
@@ -27037,7 +27093,7 @@ GNAT, either automatically or with explicit programming features.
@end menu
@node Elaboration Code,Elaboration Order,,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-code}@anchor{22e}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id2}@anchor{22f}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-code}@anchor{232}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id2}@anchor{233}
@section Elaboration Code
@@ -27179,7 +27235,7 @@ elaborated.
@end itemize
@node Elaboration Order,Checking the Elaboration Order,Elaboration Code,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-order}@anchor{230}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id3}@anchor{231}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-order}@anchor{234}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id3}@anchor{235}
@section Elaboration Order
@@ -27329,7 +27385,7 @@ avoids ABE problems should be chosen, however a compiler may not always find
such an order due to complications with respect to control and data flow.
@node Checking the Elaboration Order,Controlling the Elaboration Order in Ada,Elaboration Order,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat id4}@anchor{232}@anchor{gnat_ugn/elaboration_order_handling_in_gnat checking-the-elaboration-order}@anchor{233}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat id4}@anchor{236}@anchor{gnat_ugn/elaboration_order_handling_in_gnat checking-the-elaboration-order}@anchor{237}
@section Checking the Elaboration Order
@@ -27391,7 +27447,7 @@ order.
@end itemize
@node Controlling the Elaboration Order in Ada,Controlling the Elaboration Order in GNAT,Checking the Elaboration Order,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat controlling-the-elaboration-order-in-ada}@anchor{234}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id5}@anchor{235}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat controlling-the-elaboration-order-in-ada}@anchor{238}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id5}@anchor{239}
@section Controlling the Elaboration Order in Ada
@@ -27719,7 +27775,7 @@ is that the program continues to stay in the last state (one or more correct
orders exist) even if maintenance changes the bodies of targets.
@node Controlling the Elaboration Order in GNAT,Common Elaboration-model Traits,Controlling the Elaboration Order in Ada,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat id6}@anchor{236}@anchor{gnat_ugn/elaboration_order_handling_in_gnat controlling-the-elaboration-order-in-gnat}@anchor{237}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat id6}@anchor{23a}@anchor{gnat_ugn/elaboration_order_handling_in_gnat controlling-the-elaboration-order-in-gnat}@anchor{23b}
@section Controlling the Elaboration Order in GNAT
@@ -27776,7 +27832,7 @@ effect.
@end itemize
@node Common Elaboration-model Traits,Dynamic Elaboration Model in GNAT,Controlling the Elaboration Order in GNAT,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat common-elaboration-model-traits}@anchor{238}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id7}@anchor{239}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat common-elaboration-model-traits}@anchor{23c}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id7}@anchor{23d}
@section Common Elaboration-model Traits
@@ -27845,7 +27901,7 @@ data and control flow. The warnings can be suppressed with compiler switch
@code{-gnatws}.
@node Dynamic Elaboration Model in GNAT,Static Elaboration Model in GNAT,Common Elaboration-model Traits,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat dynamic-elaboration-model-in-gnat}@anchor{23a}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id8}@anchor{23b}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat dynamic-elaboration-model-in-gnat}@anchor{23e}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id8}@anchor{23f}
@section Dynamic Elaboration Model in GNAT
@@ -27902,7 +27958,7 @@ is in effect.
@end example
@node Static Elaboration Model in GNAT,SPARK Elaboration Model in GNAT,Dynamic Elaboration Model in GNAT,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat static-elaboration-model-in-gnat}@anchor{23c}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id9}@anchor{23d}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat static-elaboration-model-in-gnat}@anchor{240}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id9}@anchor{241}
@section Static Elaboration Model in GNAT
@@ -28045,7 +28101,7 @@ elaborated prior to the body of @code{Static_Model}.
@end itemize
@node SPARK Elaboration Model in GNAT,Mixing Elaboration Models,Static Elaboration Model in GNAT,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat id10}@anchor{23e}@anchor{gnat_ugn/elaboration_order_handling_in_gnat spark-elaboration-model-in-gnat}@anchor{23f}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat id10}@anchor{242}@anchor{gnat_ugn/elaboration_order_handling_in_gnat spark-elaboration-model-in-gnat}@anchor{243}
@section SPARK Elaboration Model in GNAT
@@ -28068,7 +28124,7 @@ external, and compiler switch @code{-gnatd.v} is in effect.
@end example
@node Mixing Elaboration Models,Elaboration Circularities,SPARK Elaboration Model in GNAT,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat mixing-elaboration-models}@anchor{240}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id11}@anchor{241}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat mixing-elaboration-models}@anchor{244}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id11}@anchor{245}
@section Mixing Elaboration Models
@@ -28112,7 +28168,7 @@ warning: "y.ads" which has static elaboration checks
The warnings can be suppressed by binder switch @code{-ws}.
@node Elaboration Circularities,Resolving Elaboration Circularities,Mixing Elaboration Models,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat id12}@anchor{242}@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-circularities}@anchor{243}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat id12}@anchor{246}@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-circularities}@anchor{247}
@section Elaboration Circularities
@@ -28171,7 +28227,7 @@ they @emph{with}, must be elaborated prior to @code{Client}. However, @code{Serv
@code{Client}, and this leads to a circularity.
@node Resolving Elaboration Circularities,Resolving Task Issues,Elaboration Circularities,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat id13}@anchor{244}@anchor{gnat_ugn/elaboration_order_handling_in_gnat resolving-elaboration-circularities}@anchor{245}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat id13}@anchor{248}@anchor{gnat_ugn/elaboration_order_handling_in_gnat resolving-elaboration-circularities}@anchor{249}
@section Resolving Elaboration Circularities
@@ -28316,7 +28372,7 @@ run-time checks.
@end itemize
@node Resolving Task Issues,Elaboration-related Compiler Switches,Resolving Elaboration Circularities,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat id14}@anchor{246}@anchor{gnat_ugn/elaboration_order_handling_in_gnat resolving-task-issues}@anchor{247}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat id14}@anchor{24a}@anchor{gnat_ugn/elaboration_order_handling_in_gnat resolving-task-issues}@anchor{24b}
@section Resolving Task Issues
@@ -28612,7 +28668,7 @@ static model will verify that no entry calls take place at elaboration time.
@end itemize
@node Elaboration-related Compiler Switches,Summary of Procedures for Elaboration Control,Resolving Task Issues,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-related-compiler-switches}@anchor{248}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id15}@anchor{249}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat elaboration-related-compiler-switches}@anchor{24c}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id15}@anchor{24d}
@section Elaboration-related Compiler Switches
@@ -28904,7 +28960,7 @@ In the example above, the elaboration of declaration @code{Ptr} is assigned
@end table
@node Summary of Procedures for Elaboration Control,Inspecting the Chosen Elaboration Order,Elaboration-related Compiler Switches,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat summary-of-procedures-for-elaboration-control}@anchor{24a}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id16}@anchor{24b}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat summary-of-procedures-for-elaboration-control}@anchor{24e}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id16}@anchor{24f}
@section Summary of Procedures for Elaboration Control
@@ -28949,7 +29005,7 @@ program using the dynamic model by using compiler switch @code{-gnatE}.
@end itemize
@node Inspecting the Chosen Elaboration Order,,Summary of Procedures for Elaboration Control,Elaboration Order Handling in GNAT
-@anchor{gnat_ugn/elaboration_order_handling_in_gnat inspecting-the-chosen-elaboration-order}@anchor{24c}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id17}@anchor{24d}
+@anchor{gnat_ugn/elaboration_order_handling_in_gnat inspecting-the-chosen-elaboration-order}@anchor{250}@anchor{gnat_ugn/elaboration_order_handling_in_gnat id17}@anchor{251}
@section Inspecting the Chosen Elaboration Order
@@ -29086,7 +29142,7 @@ gdbstr (body)
@end example
@node Inline Assembler,GNU Free Documentation License,Elaboration Order Handling in GNAT,Top
-@anchor{gnat_ugn/inline_assembler inline-assembler}@anchor{10}@anchor{gnat_ugn/inline_assembler doc}@anchor{24e}@anchor{gnat_ugn/inline_assembler id1}@anchor{24f}
+@anchor{gnat_ugn/inline_assembler inline-assembler}@anchor{10}@anchor{gnat_ugn/inline_assembler doc}@anchor{252}@anchor{gnat_ugn/inline_assembler id1}@anchor{253}
@chapter Inline Assembler
@@ -29145,7 +29201,7 @@ and with assembly language programming.
@end menu
@node Basic Assembler Syntax,A Simple Example of Inline Assembler,,Inline Assembler
-@anchor{gnat_ugn/inline_assembler id2}@anchor{250}@anchor{gnat_ugn/inline_assembler basic-assembler-syntax}@anchor{251}
+@anchor{gnat_ugn/inline_assembler id2}@anchor{254}@anchor{gnat_ugn/inline_assembler basic-assembler-syntax}@anchor{255}
@section Basic Assembler Syntax
@@ -29261,7 +29317,7 @@ Intel: Destination first; for example @code{mov eax, 4}@w{ }
@node A Simple Example of Inline Assembler,Output Variables in Inline Assembler,Basic Assembler Syntax,Inline Assembler
-@anchor{gnat_ugn/inline_assembler a-simple-example-of-inline-assembler}@anchor{252}@anchor{gnat_ugn/inline_assembler id3}@anchor{253}
+@anchor{gnat_ugn/inline_assembler a-simple-example-of-inline-assembler}@anchor{256}@anchor{gnat_ugn/inline_assembler id3}@anchor{257}
@section A Simple Example of Inline Assembler
@@ -29410,7 +29466,7 @@ If there are no errors, @code{as} will generate an object file
@code{nothing.out}.
@node Output Variables in Inline Assembler,Input Variables in Inline Assembler,A Simple Example of Inline Assembler,Inline Assembler
-@anchor{gnat_ugn/inline_assembler id4}@anchor{254}@anchor{gnat_ugn/inline_assembler output-variables-in-inline-assembler}@anchor{255}
+@anchor{gnat_ugn/inline_assembler id4}@anchor{258}@anchor{gnat_ugn/inline_assembler output-variables-in-inline-assembler}@anchor{259}
@section Output Variables in Inline Assembler
@@ -29777,7 +29833,7 @@ end Get_Flags_3;
@end quotation
@node Input Variables in Inline Assembler,Inlining Inline Assembler Code,Output Variables in Inline Assembler,Inline Assembler
-@anchor{gnat_ugn/inline_assembler id5}@anchor{256}@anchor{gnat_ugn/inline_assembler input-variables-in-inline-assembler}@anchor{257}
+@anchor{gnat_ugn/inline_assembler id5}@anchor{25a}@anchor{gnat_ugn/inline_assembler input-variables-in-inline-assembler}@anchor{25b}
@section Input Variables in Inline Assembler
@@ -29866,7 +29922,7 @@ _increment__incr.1:
@end quotation
@node Inlining Inline Assembler Code,Other Asm Functionality,Input Variables in Inline Assembler,Inline Assembler
-@anchor{gnat_ugn/inline_assembler id6}@anchor{258}@anchor{gnat_ugn/inline_assembler inlining-inline-assembler-code}@anchor{259}
+@anchor{gnat_ugn/inline_assembler id6}@anchor{25c}@anchor{gnat_ugn/inline_assembler inlining-inline-assembler-code}@anchor{25d}
@section Inlining Inline Assembler Code
@@ -29937,7 +29993,7 @@ movl %esi,%eax
thus saving the overhead of stack frame setup and an out-of-line call.
@node Other Asm Functionality,,Inlining Inline Assembler Code,Inline Assembler
-@anchor{gnat_ugn/inline_assembler other-asm-functionality}@anchor{25a}@anchor{gnat_ugn/inline_assembler id7}@anchor{25b}
+@anchor{gnat_ugn/inline_assembler other-asm-functionality}@anchor{25e}@anchor{gnat_ugn/inline_assembler id7}@anchor{25f}
@section Other @code{Asm} Functionality
@@ -29952,7 +30008,7 @@ and @code{Volatile}, which inhibits unwanted optimizations.
@end menu
@node The Clobber Parameter,The Volatile Parameter,,Other Asm Functionality
-@anchor{gnat_ugn/inline_assembler the-clobber-parameter}@anchor{25c}@anchor{gnat_ugn/inline_assembler id8}@anchor{25d}
+@anchor{gnat_ugn/inline_assembler the-clobber-parameter}@anchor{260}@anchor{gnat_ugn/inline_assembler id8}@anchor{261}
@subsection The @code{Clobber} Parameter
@@ -30016,7 +30072,7 @@ Use 'register' name @code{memory} if you changed a memory location
@end itemize
@node The Volatile Parameter,,The Clobber Parameter,Other Asm Functionality
-@anchor{gnat_ugn/inline_assembler the-volatile-parameter}@anchor{25e}@anchor{gnat_ugn/inline_assembler id9}@anchor{25f}
+@anchor{gnat_ugn/inline_assembler the-volatile-parameter}@anchor{262}@anchor{gnat_ugn/inline_assembler id9}@anchor{263}
@subsection The @code{Volatile} Parameter
@@ -30052,7 +30108,7 @@ to @code{True} only if the compiler's optimizations have created
problems.
@node GNU Free Documentation License,Index,Inline Assembler,Top
-@anchor{share/gnu_free_documentation_license gnu-fdl}@anchor{1}@anchor{share/gnu_free_documentation_license doc}@anchor{260}@anchor{share/gnu_free_documentation_license gnu-free-documentation-license}@anchor{261}
+@anchor{share/gnu_free_documentation_license gnu-fdl}@anchor{1}@anchor{share/gnu_free_documentation_license doc}@anchor{264}@anchor{share/gnu_free_documentation_license gnu-free-documentation-license}@anchor{265}
@chapter GNU Free Documentation License
diff --git a/gcc/ada/gnatbind.adb b/gcc/ada/gnatbind.adb
index baba9feef7c..4f5197d82bd 100644
--- a/gcc/ada/gnatbind.adb
+++ b/gcc/ada/gnatbind.adb
@@ -330,9 +330,7 @@ procedure Gnatbind is
then
Output_File_Name_Seen := True;
- if Argv'Length = 0
- or else (Argv'Length >= 1 and then Argv (1) = '-')
- then
+ if Argv'Length = 0 or else Argv (1) = '-' then
Fail ("output File_Name missing after -o");
else
diff --git a/gcc/ada/init.c b/gcc/ada/init.c
index 4071bb461e7..608f41fd748 100644
--- a/gcc/ada/init.c
+++ b/gcc/ada/init.c
@@ -2516,6 +2516,108 @@ __gnat_install_handler (void)
__gnat_handler_installed = 1;
}
+#elif defined(__QNX__)
+
+/***************/
+/* QNX Section */
+/***************/
+
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+#include "sigtramp.h"
+
+void
+__gnat_map_signal (int sig,
+ siginfo_t *si ATTRIBUTE_UNUSED,
+ void *mcontext ATTRIBUTE_UNUSED)
+{
+ struct Exception_Data *exception;
+ const char *msg;
+
+ switch(sig)
+ {
+ case SIGFPE:
+ exception = &constraint_error;
+ msg = "SIGFPE";
+ break;
+ case SIGILL:
+ exception = &constraint_error;
+ msg = "SIGILL";
+ break;
+ case SIGSEGV:
+ exception = &storage_error;
+ msg = "stack overflow or erroneous memory access";
+ break;
+ case SIGBUS:
+ exception = &constraint_error;
+ msg = "SIGBUS";
+ break;
+ default:
+ exception = &program_error;
+ msg = "unhandled signal";
+ }
+
+ Raise_From_Signal_Handler (exception, msg);
+}
+
+static void
+__gnat_error_handler (int sig, siginfo_t *si, void *ucontext)
+{
+ __gnat_sigtramp (sig, (void *) si, (void *) ucontext,
+ (__sigtramphandler_t *)&__gnat_map_signal);
+}
+
+/* This must be in keeping with System.OS_Interface.Alternate_Stack_Size. */
+/* sigaltstack is currently not supported by QNX7 */
+char __gnat_alternate_stack[0];
+
+void
+__gnat_install_handler (void)
+{
+ struct sigaction act;
+ int err;
+
+ act.sa_handler = __gnat_error_handler;
+ act.sa_flags = SA_NODEFER | SA_SIGINFO;
+ sigemptyset (&act.sa_mask);
+
+ /* Do not install handlers if interrupt state is "System" */
+ if (__gnat_get_interrupt_state (SIGFPE) != 's') {
+ err = sigaction (SIGFPE, &act, NULL);
+ if (err == -1) {
+ err = errno;
+ perror ("error while attaching SIGFPE");
+ perror (strerror (err));
+ }
+ }
+ if (__gnat_get_interrupt_state (SIGILL) != 's') {
+ sigaction (SIGILL, &act, NULL);
+ if (err == -1) {
+ err = errno;
+ perror ("error while attaching SIGFPE");
+ perror (strerror (err));
+ }
+ }
+ if (__gnat_get_interrupt_state (SIGSEGV) != 's') {
+ sigaction (SIGSEGV, &act, NULL);
+ if (err == -1) {
+ err = errno;
+ perror ("error while attaching SIGFPE");
+ perror (strerror (err));
+ }
+ }
+ if (__gnat_get_interrupt_state (SIGBUS) != 's') {
+ sigaction (SIGBUS, &act, NULL);
+ if (err == -1) {
+ err = errno;
+ perror ("error while attaching SIGFPE");
+ perror (strerror (err));
+ }
+ }
+ __gnat_handler_installed = 1;
+}
+
#elif defined (__DJGPP__)
void
@@ -2648,7 +2750,7 @@ __gnat_install_handler (void)
#if defined (_WIN32) || defined (__INTERIX) \
|| defined (__Lynx__) || defined(__NetBSD__) || defined(__FreeBSD__) \
- || defined (__OpenBSD__) || defined (__DragonFly__)
+ || defined (__OpenBSD__) || defined (__DragonFly__) || defined(__QNX__)
#define HAVE_GNAT_INIT_FLOAT
diff --git a/gcc/ada/inline.adb b/gcc/ada/inline.adb
index 7096f7c7431..f97fce782f4 100644
--- a/gcc/ada/inline.adb
+++ b/gcc/ada/inline.adb
@@ -2224,13 +2224,13 @@ package body Inline is
Exit_Lab : Entity_Id := Empty;
F : Entity_Id;
A : Node_Id;
- Lab_Decl : Node_Id;
+ Lab_Decl : Node_Id := Empty;
Lab_Id : Node_Id;
New_A : Node_Id;
- Num_Ret : Nat := 0;
+ Num_Ret : Nat := 0;
Ret_Type : Entity_Id;
- Targ : Node_Id;
+ Targ : Node_Id := Empty;
-- The target of the call. If context is an assignment statement then
-- this is the left-hand side of the assignment, else it is a temporary
-- to which the return value is assigned prior to rewriting the call.
diff --git a/gcc/ada/lib-writ.adb b/gcc/ada/lib-writ.adb
index 47109b4e3f9..addc9a083c5 100644
--- a/gcc/ada/lib-writ.adb
+++ b/gcc/ada/lib-writ.adb
@@ -1567,14 +1567,6 @@ package body Lib.Writ is
SCO_Output;
end if;
- -- Output SPARK cross-reference information if needed
-
- if Opt.Xref_Active and then GNATprove_Mode then
- SPARK_Specific.Collect_SPARK_Xrefs (Sdep_Table => Sdep_Table,
- Num_Sdep => Num_Sdep);
- SPARK_Specific.Output_SPARK_Xrefs;
- end if;
-
-- Output final blank line and we are done. This final blank line is
-- probably junk, but we don't feel like making an incompatible change.
diff --git a/gcc/ada/lib-xref-spark_specific.adb b/gcc/ada/lib-xref-spark_specific.adb
index 4d221749907..52958328b1e 100644
--- a/gcc/ada/lib-xref-spark_specific.adb
+++ b/gcc/ada/lib-xref-spark_specific.adb
@@ -27,8 +27,6 @@ with Einfo; use Einfo;
with Nmake; use Nmake;
with SPARK_Xrefs; use SPARK_Xrefs;
-with GNAT.HTable;
-
separate (Lib.Xref)
package body SPARK_Specific is
@@ -59,16 +57,10 @@ package body SPARK_Specific is
's' => True,
others => False);
- type Entity_Hashed_Range is range 0 .. 255;
- -- Size of hash table headers
-
---------------------
-- Local Variables --
---------------------
- Heap : Entity_Id := Empty;
- -- A special entity which denotes the heap object
-
package Drefs is new Table.Table (
Table_Component_Type => Xref_Entry,
Table_Index_Type => Xref_Entry_Number,
@@ -81,243 +73,13 @@ package body SPARK_Specific is
-- "Heap". These references are added to the regular references when
-- computing SPARK cross-references.
- -----------------------
- -- Local Subprograms --
- -----------------------
-
- procedure Add_SPARK_File (Uspec, Ubody : Unit_Number_Type; Dspec : Nat);
- -- Add file and corresponding scopes for unit to the tables
- -- SPARK_File_Table and SPARK_Scope_Table. When two units are present
- -- for the same compilation unit, as it happens for library-level
- -- instantiations of generics, then Ubody is the number of the body
- -- unit; otherwise it is No_Unit.
-
- procedure Add_SPARK_Xrefs;
- -- Filter table Xrefs to add all references used in SPARK to the table
- -- SPARK_Xref_Table.
-
- function Entity_Hash (E : Entity_Id) return Entity_Hashed_Range;
- -- Hash function for hash table
-
- --------------------
- -- Add_SPARK_File --
- --------------------
-
- procedure Add_SPARK_File (Uspec, Ubody : Unit_Number_Type; Dspec : Nat) is
- File : constant Source_File_Index := Source_Index (Uspec);
- From : constant Scope_Index := SPARK_Scope_Table.Last + 1;
-
- Scope_Id : Pos := 1;
-
- procedure Add_SPARK_Scope (N : Node_Id);
- -- Add scope N to the table SPARK_Scope_Table
-
- procedure Detect_And_Add_SPARK_Scope (N : Node_Id);
- -- Call Add_SPARK_Scope on scopes
-
- ---------------------
- -- Add_SPARK_Scope --
- ---------------------
-
- procedure Add_SPARK_Scope (N : Node_Id) is
- E : constant Entity_Id := Defining_Entity (N);
- Loc : constant Source_Ptr := Sloc (E);
-
- -- The character describing the kind of scope is chosen to be the
- -- same as the one describing the corresponding entity in cross
- -- references, see Xref_Entity_Letters in lib-xrefs.ads
-
- Typ : Character;
-
- begin
- -- Ignore scopes without a proper location
-
- if Sloc (N) = No_Location then
- return;
- end if;
-
- case Ekind (E) is
- when E_Entry
- | E_Entry_Family
- | E_Generic_Function
- | E_Generic_Package
- | E_Generic_Procedure
- | E_Package
- | E_Protected_Type
- | E_Task_Type
- =>
- Typ := Xref_Entity_Letters (Ekind (E));
-
- when E_Function
- | E_Procedure
- =>
- -- In SPARK we need to distinguish protected functions and
- -- procedures from ordinary subprograms, but there are no
- -- special Xref letters for them. Since this distiction is
- -- only needed to detect protected calls, we pretend that
- -- such calls are entry calls.
-
- if Ekind (Scope (E)) = E_Protected_Type then
- Typ := Xref_Entity_Letters (E_Entry);
- else
- Typ := Xref_Entity_Letters (Ekind (E));
- end if;
-
- when E_Package_Body
- | E_Protected_Body
- | E_Subprogram_Body
- | E_Task_Body
- =>
- Typ := Xref_Entity_Letters (Ekind (Unique_Entity (E)));
-
- when E_Void =>
-
- -- Compilation of prj-attr.adb with -gnatn creates a node with
- -- entity E_Void for the package defined at a-charac.ads16:13.
- -- ??? TBD
-
- return;
-
- when others =>
- raise Program_Error;
- end case;
-
- -- File_Num and Scope_Num are filled later. From_Xref and To_Xref
- -- are filled even later, but are initialized to represent an empty
- -- range.
-
- SPARK_Scope_Table.Append
- ((Scope_Name => new String'(Unique_Name (E)),
- File_Num => Dspec,
- Scope_Num => Scope_Id,
- Spec_File_Num => 0,
- Spec_Scope_Num => 0,
- Line => Nat (Get_Logical_Line_Number (Loc)),
- Stype => Typ,
- Col => Nat (Get_Column_Number (Loc)),
- From_Xref => 1,
- To_Xref => 0,
- Scope_Entity => E));
-
- Scope_Id := Scope_Id + 1;
- end Add_SPARK_Scope;
-
- --------------------------------
- -- Detect_And_Add_SPARK_Scope --
- --------------------------------
-
- procedure Detect_And_Add_SPARK_Scope (N : Node_Id) is
- begin
- -- Entries
-
- if Nkind_In (N, N_Entry_Body, N_Entry_Declaration)
-
- -- Packages
-
- or else Nkind_In (N, N_Package_Body,
- N_Package_Declaration)
- -- Protected units
-
- or else Nkind_In (N, N_Protected_Body,
- N_Protected_Type_Declaration)
-
- -- Subprograms
-
- or else Nkind_In (N, N_Subprogram_Body,
- N_Subprogram_Declaration)
-
- -- Task units
-
- or else Nkind_In (N, N_Task_Body,
- N_Task_Type_Declaration)
- then
- Add_SPARK_Scope (N);
- end if;
- end Detect_And_Add_SPARK_Scope;
-
- procedure Traverse_Scopes is new
- Traverse_Compilation_Unit (Detect_And_Add_SPARK_Scope);
-
- -- Local variables
-
- File_Name : String_Ptr;
- Unit_File_Name : String_Ptr;
-
- -- Start of processing for Add_SPARK_File
-
- begin
- -- Source file could be inexistant as a result of an error, if option
- -- gnatQ is used.
-
- if File <= No_Source_File then
- return;
- end if;
-
- -- Subunits are traversed as part of the top-level unit to which they
- -- belong.
-
- if Nkind (Unit (Cunit (Uspec))) = N_Subunit then
- return;
- end if;
-
- Traverse_Scopes (CU => Cunit (Uspec), Inside_Stubs => True);
-
- -- When two units are present for the same compilation unit, as it
- -- happens for library-level instantiations of generics, then add all
- -- scopes to the same SPARK file.
-
- if Ubody /= No_Unit then
- Traverse_Scopes (CU => Cunit (Ubody), Inside_Stubs => True);
- end if;
-
- -- Make entry for new file in file table
-
- Get_Name_String (Reference_Name (File));
- File_Name := new String'(Name_Buffer (1 .. Name_Len));
-
- -- For subunits, also retrieve the file name of the unit. Only do so if
- -- unit has an associated compilation unit.
-
- if Present (Cunit (Unit (File)))
- and then Nkind (Unit (Cunit (Unit (File)))) = N_Subunit
- then
- Get_Name_String (Reference_Name (Main_Source_File));
- Unit_File_Name := new String'(Name_Buffer (1 .. Name_Len));
- else
- Unit_File_Name := null;
- end if;
-
- SPARK_File_Table.Append (
- (File_Name => File_Name,
- Unit_File_Name => Unit_File_Name,
- File_Num => Dspec,
- From_Scope => From,
- To_Scope => SPARK_Scope_Table.Last));
- end Add_SPARK_File;
-
- ---------------------
- -- Add_SPARK_Xrefs --
- ---------------------
-
- procedure Add_SPARK_Xrefs is
- function Entity_Of_Scope (S : Scope_Index) return Entity_Id;
- -- Return the entity which maps to the input scope index
-
- function Get_Entity_Type (E : Entity_Id) return Character;
- -- Return a character representing the type of entity
-
- function Get_Scope_Num (E : Entity_Id) return Nat;
- -- Return the scope number associated with the entity E
+ -------------------------
+ -- Iterate_SPARK_Xrefs --
+ -------------------------
- function Is_Constant_Object_Without_Variable_Input
- (E : Entity_Id) return Boolean;
- -- Return True if E is known to have no variable input, as defined in
- -- SPARK RM.
+ procedure Iterate_SPARK_Xrefs is
- function Is_Future_Scope_Entity
- (E : Entity_Id;
- S : Scope_Index) return Boolean;
- -- Check whether entity E is in SPARK_Scope_Table at index S or higher
+ procedure Add_SPARK_Xref (Index : Int; Xref : Xref_Entry);
function Is_SPARK_Reference
(E : Entity_Id;
@@ -329,168 +91,29 @@ package body SPARK_Specific is
-- Return whether the entity or reference scope meets requirements for
-- being a SPARK scope.
- function Lt (Op1 : Natural; Op2 : Natural) return Boolean;
- -- Comparison function for Sort call
-
- procedure Move (From : Natural; To : Natural);
- -- Move procedure for Sort call
-
- procedure Set_Scope_Num (E : Entity_Id; Num : Nat);
- -- Associate entity E with the scope number Num
-
- procedure Update_Scope_Range
- (S : Scope_Index;
- From : Xref_Index;
- To : Xref_Index);
- -- Update the scope which maps to S with the new range From .. To
-
- package Sorting is new GNAT.Heap_Sort_G (Move, Lt);
-
- No_Scope : constant Nat := 0;
- -- Initial scope counter
-
- package Scopes is new GNAT.HTable.Simple_HTable
- (Header_Num => Entity_Hashed_Range,
- Element => Nat,
- No_Element => No_Scope,
- Key => Entity_Id,
- Hash => Entity_Hash,
- Equal => "=");
- -- Package used to build a correspondence between entities and scope
- -- numbers used in SPARK cross references.
-
- Nrefs : Nat := Xrefs.Last;
- -- Number of references in table. This value may get reset (reduced)
- -- when we eliminate duplicate reference entries as well as references
- -- not suitable for local cross-references.
-
- Nrefs_Add : constant Nat := Drefs.Last;
- -- Number of additional references which correspond to dereferences in
- -- the source code.
-
- Rnums : array (0 .. Nrefs + Nrefs_Add) of Nat;
- -- This array contains numbers of references in the Xrefs table. This
- -- list is sorted in output order. The extra 0'th entry is convenient
- -- for the call to sort. When we sort the table, we move the indices in
- -- Rnums around, but we do not move the original table entries.
-
- ---------------------
- -- Entity_Of_Scope --
- ---------------------
-
- function Entity_Of_Scope (S : Scope_Index) return Entity_Id is
- begin
- return SPARK_Scope_Table.Table (S).Scope_Entity;
- end Entity_Of_Scope;
-
- ---------------------
- -- Get_Entity_Type --
- ---------------------
-
- function Get_Entity_Type (E : Entity_Id) return Character is
- begin
- case Ekind (E) is
- when E_Out_Parameter => return '<';
- when E_In_Out_Parameter => return '=';
- when E_In_Parameter => return '>';
- when others => return '*';
- end case;
- end Get_Entity_Type;
-
- -------------------
- -- Get_Scope_Num --
- -------------------
-
- function Get_Scope_Num (E : Entity_Id) return Nat renames Scopes.Get;
-
- -----------------------------------------------
- -- Is_Constant_Object_Without_Variable_Input --
- -----------------------------------------------
-
- function Is_Constant_Object_Without_Variable_Input
- (E : Entity_Id) return Boolean
- is
- begin
- case Ekind (E) is
-
- -- A constant is known to have no variable input if its
- -- initializing expression is static (a value which is
- -- compile-time-known is not guaranteed to have no variable input
- -- as defined in the SPARK RM). Otherwise, the constant may or not
- -- have variable input.
-
- when E_Constant =>
- declare
- Decl : Node_Id;
- begin
- if Present (Full_View (E)) then
- Decl := Parent (Full_View (E));
- else
- Decl := Parent (E);
- end if;
-
- if Is_Imported (E) then
- return False;
- else
- pragma Assert (Present (Expression (Decl)));
- return Is_Static_Expression (Expression (Decl));
- end if;
- end;
-
- when E_In_Parameter
- | E_Loop_Parameter
- =>
- return True;
-
- when others =>
- return False;
- end case;
- end Is_Constant_Object_Without_Variable_Input;
-
- ----------------------------
- -- Is_Future_Scope_Entity --
- ----------------------------
-
- function Is_Future_Scope_Entity
- (E : Entity_Id;
- S : Scope_Index) return Boolean
- is
- function Is_Past_Scope_Entity return Boolean;
- -- Check whether entity E is in SPARK_Scope_Table at index strictly
- -- lower than S.
-
- --------------------------
- -- Is_Past_Scope_Entity --
- --------------------------
-
- function Is_Past_Scope_Entity return Boolean is
- begin
- for Index in SPARK_Scope_Table.First .. S - 1 loop
- if SPARK_Scope_Table.Table (Index).Scope_Entity = E then
- return True;
- end if;
- end loop;
-
- return False;
- end Is_Past_Scope_Entity;
-
- -- Start of processing for Is_Future_Scope_Entity
+ --------------------
+ -- Add_SPARK_Xref --
+ --------------------
+ procedure Add_SPARK_Xref (Index : Int; Xref : Xref_Entry) is
+ Ref : Xref_Key renames Xref.Key;
begin
- for Index in S .. SPARK_Scope_Table.Last loop
- if SPARK_Scope_Table.Table (Index).Scope_Entity = E then
- return True;
- end if;
- end loop;
+ -- Eliminate entries not appropriate for SPARK
- -- If this assertion fails, this means that the scope which we are
- -- looking for has been treated already, which reveals a problem in
- -- the order of cross-references.
-
- pragma Assert (not Is_Past_Scope_Entity);
+ if SPARK_Entities (Ekind (Ref.Ent))
+ and then SPARK_References (Ref.Typ)
+ and then Is_SPARK_Scope (Ref.Ent_Scope)
+ and then Is_SPARK_Scope (Ref.Ref_Scope)
+ and then Is_SPARK_Reference (Ref.Ent, Ref.Typ)
+ then
+ Process
+ (Index,
+ (Entity => Ref.Ent,
+ Ref_Scope => Ref.Ref_Scope,
+ Rtype => Ref.Typ));
+ end if;
- return False;
- end Is_Future_Scope_Entity;
+ end Add_SPARK_Xref;
------------------------
-- Is_SPARK_Reference --
@@ -528,525 +151,22 @@ package body SPARK_Specific is
begin
return Present (E)
and then not Is_Generic_Unit (E)
- and then (not Can_Be_Renamed or else No (Renamed_Entity (E)))
- and then Get_Scope_Num (E) /= No_Scope;
+ and then (not Can_Be_Renamed or else No (Renamed_Entity (E)));
end Is_SPARK_Scope;
- --------
- -- Lt --
- --------
-
- function Lt (Op1 : Natural; Op2 : Natural) return Boolean is
- T1 : constant Xref_Entry := Xrefs.Table (Rnums (Nat (Op1)));
- T2 : constant Xref_Entry := Xrefs.Table (Rnums (Nat (Op2)));
-
- begin
- -- First test: if entity is in different unit, sort by unit. Note:
- -- that we use Ent_Scope_File rather than Eun, as Eun may refer to
- -- the file where the generic scope is defined, which may differ from
- -- the file where the enclosing scope is defined. It is the latter
- -- which matters for a correct order here.
-
- if T1.Ent_Scope_File /= T2.Ent_Scope_File then
- return Dependency_Num (T1.Ent_Scope_File) <
- Dependency_Num (T2.Ent_Scope_File);
-
- -- Second test: within same unit, sort by location of the scope of
- -- the entity definition.
-
- elsif Get_Scope_Num (T1.Key.Ent_Scope) /=
- Get_Scope_Num (T2.Key.Ent_Scope)
- then
- return Get_Scope_Num (T1.Key.Ent_Scope) <
- Get_Scope_Num (T2.Key.Ent_Scope);
-
- -- Third test: within same unit and scope, sort by location of
- -- entity definition.
-
- elsif T1.Def /= T2.Def then
- return T1.Def < T2.Def;
-
- else
- -- Both entities must be equal at this point
-
- pragma Assert (T1.Key.Ent = T2.Key.Ent);
- pragma Assert (T1.Key.Ent_Scope = T2.Key.Ent_Scope);
- pragma Assert (T1.Ent_Scope_File = T2.Ent_Scope_File);
-
- -- Fourth test: if reference is in same unit as entity definition,
- -- sort first.
-
- if T1.Key.Lun /= T2.Key.Lun
- and then T1.Ent_Scope_File = T1.Key.Lun
- then
- return True;
-
- elsif T1.Key.Lun /= T2.Key.Lun
- and then T2.Ent_Scope_File = T2.Key.Lun
- then
- return False;
-
- -- Fifth test: if reference is in same unit and same scope as
- -- entity definition, sort first.
-
- elsif T1.Ent_Scope_File = T1.Key.Lun
- and then T1.Key.Ref_Scope /= T2.Key.Ref_Scope
- and then T1.Key.Ent_Scope = T1.Key.Ref_Scope
- then
- return True;
-
- elsif T2.Ent_Scope_File = T2.Key.Lun
- and then T1.Key.Ref_Scope /= T2.Key.Ref_Scope
- and then T2.Key.Ent_Scope = T2.Key.Ref_Scope
- then
- return False;
-
- -- Sixth test: for same entity, sort by reference location unit
-
- elsif T1.Key.Lun /= T2.Key.Lun then
- return Dependency_Num (T1.Key.Lun) <
- Dependency_Num (T2.Key.Lun);
-
- -- Seventh test: for same entity, sort by reference location scope
-
- elsif Get_Scope_Num (T1.Key.Ref_Scope) /=
- Get_Scope_Num (T2.Key.Ref_Scope)
- then
- return Get_Scope_Num (T1.Key.Ref_Scope) <
- Get_Scope_Num (T2.Key.Ref_Scope);
-
- -- Eighth test: order of location within referencing unit
-
- elsif T1.Key.Loc /= T2.Key.Loc then
- return T1.Key.Loc < T2.Key.Loc;
-
- -- Finally, for two locations at the same address prefer the one
- -- that does NOT have the type 'r', so that a modification or
- -- extension takes preference, when there are more than one
- -- reference at the same location. As a result, in the case of
- -- entities that are in-out actuals, the read reference follows
- -- the modify reference.
-
- else
- return T2.Key.Typ = 'r';
- end if;
- end if;
- end Lt;
-
- ----------
- -- Move --
- ----------
-
- procedure Move (From : Natural; To : Natural) is
- begin
- Rnums (Nat (To)) := Rnums (Nat (From));
- end Move;
-
- -------------------
- -- Set_Scope_Num --
- -------------------
-
- procedure Set_Scope_Num (E : Entity_Id; Num : Nat) renames Scopes.Set;
-
- ------------------------
- -- Update_Scope_Range --
- ------------------------
-
- procedure Update_Scope_Range
- (S : Scope_Index;
- From : Xref_Index;
- To : Xref_Index)
- is
- begin
- SPARK_Scope_Table.Table (S).From_Xref := From;
- SPARK_Scope_Table.Table (S).To_Xref := To;
- end Update_Scope_Range;
-
- -- Local variables
-
- Col : Nat;
- From_Index : Xref_Index;
- Line : Nat;
- Prev_Loc : Source_Ptr;
- Prev_Typ : Character;
- Ref_Count : Nat;
- Ref_Id : Entity_Id;
- Ref_Name : String_Ptr;
- Scope_Id : Scope_Index;
-
-- Start of processing for Add_SPARK_Xrefs
begin
- for Index in SPARK_Scope_Table.First .. SPARK_Scope_Table.Last loop
- declare
- S : SPARK_Scope_Record renames SPARK_Scope_Table.Table (Index);
- begin
- Set_Scope_Num (S.Scope_Entity, S.Scope_Num);
- end;
- end loop;
-
- declare
- Drefs_Table : Drefs.Table_Type
- renames Drefs.Table (Drefs.First .. Drefs.Last);
- begin
- Xrefs.Append_All (Xrefs.Table_Type (Drefs_Table));
- Nrefs := Nrefs + Drefs_Table'Length;
- end;
-
- -- Capture the definition Sloc values. As in the case of normal cross
- -- references, we have to wait until now to get the correct value.
-
- for Index in 1 .. Nrefs loop
- Xrefs.Table (Index).Def := Sloc (Xrefs.Table (Index).Key.Ent);
- end loop;
-
- -- Eliminate entries not appropriate for SPARK. Done prior to sorting
- -- cross-references, as it discards useless references which do not have
- -- a proper format for the comparison function (like no location).
-
- Ref_Count := Nrefs;
- Nrefs := 0;
+ -- Expose cross-references from private frontend tables to the backend
- for Index in 1 .. Ref_Count loop
- declare
- Ref : Xref_Key renames Xrefs.Table (Index).Key;
-
- begin
- if SPARK_Entities (Ekind (Ref.Ent))
- and then SPARK_References (Ref.Typ)
- and then Is_SPARK_Scope (Ref.Ent_Scope)
- and then Is_SPARK_Scope (Ref.Ref_Scope)
- and then Is_SPARK_Reference (Ref.Ent, Ref.Typ)
-
- -- Discard references from unknown scopes, e.g. generic scopes
-
- and then Get_Scope_Num (Ref.Ent_Scope) /= No_Scope
- and then Get_Scope_Num (Ref.Ref_Scope) /= No_Scope
-
- -- Discard references to loop parameters introduced within
- -- expression functions, as they give two references: one from
- -- the analysis of the expression function itself and one from
- -- the analysis of the expanded body. We don't lose any globals
- -- by discarding them, because such loop parameters can only be
- -- accessed locally from within the expression function body.
-
- and then not
- (Ekind (Ref.Ent) = E_Loop_Parameter
- and then Scope_Within
- (Ref.Ent, Unique_Entity (Ref.Ref_Scope))
- and then Is_Expression_Function (Ref.Ref_Scope))
- then
- Nrefs := Nrefs + 1;
- Rnums (Nrefs) := Index;
- end if;
- end;
+ for Index in Drefs.First .. Drefs.Last loop
+ Add_SPARK_Xref (Index, Drefs.Table (Index));
end loop;
- -- Sort the references
-
- Sorting.Sort (Integer (Nrefs));
-
- -- Eliminate duplicate entries
-
- -- We need this test for Ref_Count because if we force ALI file
- -- generation in case of errors detected, it may be the case that
- -- Nrefs is 0, so we should not reset it here.
-
- if Nrefs >= 2 then
- Ref_Count := Nrefs;
- Nrefs := 1;
-
- for Index in 2 .. Ref_Count loop
- if Xrefs.Table (Rnums (Index)) /= Xrefs.Table (Rnums (Nrefs)) then
- Nrefs := Nrefs + 1;
- Rnums (Nrefs) := Rnums (Index);
- end if;
- end loop;
- end if;
-
- -- Eliminate the reference if it is at the same location as the previous
- -- one, unless it is a read-reference indicating that the entity is an
- -- in-out actual in a call.
-
- Ref_Count := Nrefs;
- Nrefs := 0;
- Prev_Loc := No_Location;
- Prev_Typ := 'm';
-
- for Index in 1 .. Ref_Count loop
- declare
- Ref : Xref_Key renames Xrefs.Table (Rnums (Index)).Key;
-
- begin
- if Ref.Loc /= Prev_Loc
- or else (Prev_Typ = 'm' and then Ref.Typ = 'r')
- then
- Prev_Loc := Ref.Loc;
- Prev_Typ := Ref.Typ;
- Nrefs := Nrefs + 1;
- Rnums (Nrefs) := Rnums (Index);
- end if;
- end;
+ for Index in Xrefs.First .. Xrefs.Last loop
+ Add_SPARK_Xref (-Index, Xrefs.Table (Index));
end loop;
-
- -- The two steps have eliminated all references, nothing to do
-
- if SPARK_Scope_Table.Last = 0 then
- return;
- end if;
-
- Ref_Id := Empty;
- Scope_Id := 1;
- From_Index := 1;
-
- -- Loop to output references
-
- for Refno in 1 .. Nrefs loop
- declare
- Ref_Entry : Xref_Entry renames Xrefs.Table (Rnums (Refno));
- Ref : Xref_Key renames Ref_Entry.Key;
- Typ : Character;
-
- begin
- -- If this assertion fails, the scope which we are looking for is
- -- not in SPARK scope table, which reveals either a problem in the
- -- construction of the scope table, or an erroneous scope for the
- -- current cross-reference.
-
- pragma Assert (Is_Future_Scope_Entity (Ref.Ent_Scope, Scope_Id));
-
- -- Update the range of cross references to which the current scope
- -- refers to. This may be the empty range only for the first scope
- -- considered.
-
- if Ref.Ent_Scope /= Entity_Of_Scope (Scope_Id) then
- Update_Scope_Range
- (S => Scope_Id,
- From => From_Index,
- To => SPARK_Xref_Table.Last);
-
- From_Index := SPARK_Xref_Table.Last + 1;
- end if;
-
- while Ref.Ent_Scope /= Entity_Of_Scope (Scope_Id) loop
- Scope_Id := Scope_Id + 1;
- pragma Assert (Scope_Id <= SPARK_Scope_Table.Last);
- end loop;
-
- if Ref.Ent /= Ref_Id then
- Ref_Name := new String'(Unique_Name (Ref.Ent));
- end if;
-
- if Ref.Ent = Heap then
- Line := 0;
- Col := 0;
- else
- Line := Nat (Get_Logical_Line_Number (Ref_Entry.Def));
- Col := Nat (Get_Column_Number (Ref_Entry.Def));
- end if;
-
- -- References to constant objects without variable inputs (see
- -- SPARK RM 3.3.1) are considered specially in SPARK section,
- -- because these will be translated as constants in the
- -- intermediate language for formal verification, and should
- -- therefore never appear in frame conditions. Other constants may
- -- later be treated the same, up to GNATprove to decide based on
- -- its flow analysis.
-
- if Is_Constant_Object_Without_Variable_Input (Ref.Ent) then
- Typ := 'c';
- else
- Typ := Ref.Typ;
- end if;
-
- SPARK_Xref_Table.Append (
- (Entity_Name => Ref_Name,
- Entity_Line => Line,
- Etype => Get_Entity_Type (Ref.Ent),
- Entity_Col => Col,
- File_Num => Dependency_Num (Ref.Lun),
- Scope_Num => Get_Scope_Num (Ref.Ref_Scope),
- Line => Nat (Get_Logical_Line_Number (Ref.Loc)),
- Rtype => Typ,
- Col => Nat (Get_Column_Number (Ref.Loc))));
- end;
- end loop;
-
- -- Update the range of cross references to which the scope refers to
-
- Update_Scope_Range
- (S => Scope_Id,
- From => From_Index,
- To => SPARK_Xref_Table.Last);
- end Add_SPARK_Xrefs;
-
- -------------------------
- -- Collect_SPARK_Xrefs --
- -------------------------
-
- procedure Collect_SPARK_Xrefs
- (Sdep_Table : Unit_Ref_Table;
- Num_Sdep : Nat)
- is
- Sdep : Pos;
- Sdep_Next : Pos;
- -- Index of the current and next source dependency
-
- Sdep_File : Pos;
- -- Index of the file to which the scopes need to be assigned; for
- -- library-level instances of generic units this points to the unit
- -- of the body, because this is where references are assigned to.
-
- Ubody : Unit_Number_Type;
- Uspec : Unit_Number_Type;
- -- Unit numbers for the dependency spec and possibly its body (only in
- -- the case of library-level instance of a generic package).
-
- begin
- -- Cross-references should have been computed first
-
- pragma Assert (Xrefs.Last /= 0);
-
- Initialize_SPARK_Tables;
-
- -- Generate file and scope SPARK cross-reference information
-
- Sdep := 1;
- while Sdep <= Num_Sdep loop
-
- -- Skip dependencies with no entity node, e.g. configuration files
- -- with pragmas (.adc) or target description (.atp), since they
- -- present no interest for SPARK cross references.
-
- if No (Cunit_Entity (Sdep_Table (Sdep))) then
- Sdep_Next := Sdep + 1;
-
- -- For library-level instantiation of a generic, two consecutive
- -- units refer to the same compilation unit node and entity (one to
- -- body, one to spec). In that case, treat them as a single unit for
- -- the sake of SPARK cross references by passing to Add_SPARK_File.
-
- else
- if Sdep < Num_Sdep
- and then Cunit_Entity (Sdep_Table (Sdep)) =
- Cunit_Entity (Sdep_Table (Sdep + 1))
- then
- declare
- Cunit1 : Node_Id renames Cunit (Sdep_Table (Sdep));
- Cunit2 : Node_Id renames Cunit (Sdep_Table (Sdep + 1));
-
- begin
- -- Both Cunits point to compilation unit nodes
-
- pragma Assert
- (Nkind (Cunit1) = N_Compilation_Unit
- and then Nkind (Cunit2) = N_Compilation_Unit);
-
- -- Do not depend on the sorting order, which is based on
- -- Unit_Name, and for library-level instances of nested
- -- generic packages they are equal.
-
- -- If declaration comes before the body
-
- if Nkind (Unit (Cunit1)) = N_Package_Declaration
- and then Nkind (Unit (Cunit2)) = N_Package_Body
- then
- Uspec := Sdep_Table (Sdep);
- Ubody := Sdep_Table (Sdep + 1);
-
- Sdep_File := Sdep + 1;
-
- -- If body comes before declaration
-
- elsif Nkind (Unit (Cunit1)) = N_Package_Body
- and then Nkind (Unit (Cunit2)) = N_Package_Declaration
- then
- Uspec := Sdep_Table (Sdep + 1);
- Ubody := Sdep_Table (Sdep);
-
- Sdep_File := Sdep;
-
- -- Otherwise it is an error
-
- else
- raise Program_Error;
- end if;
-
- Sdep_Next := Sdep + 2;
- end;
-
- -- ??? otherwise?
-
- else
- Uspec := Sdep_Table (Sdep);
- Ubody := No_Unit;
-
- Sdep_File := Sdep;
- Sdep_Next := Sdep + 1;
- end if;
-
- Add_SPARK_File
- (Uspec => Uspec,
- Ubody => Ubody,
- Dspec => Sdep_File);
- end if;
-
- Sdep := Sdep_Next;
- end loop;
-
- -- Fill in the spec information when relevant
-
- declare
- package Entity_Hash_Table is new
- GNAT.HTable.Simple_HTable
- (Header_Num => Entity_Hashed_Range,
- Element => Scope_Index,
- No_Element => 0,
- Key => Entity_Id,
- Hash => Entity_Hash,
- Equal => "=");
-
- begin
- -- Fill in the hash-table
-
- for S in SPARK_Scope_Table.First .. SPARK_Scope_Table.Last loop
- declare
- Srec : SPARK_Scope_Record renames SPARK_Scope_Table.Table (S);
- begin
- Entity_Hash_Table.Set (Srec.Scope_Entity, S);
- end;
- end loop;
-
- -- Use the hash-table to locate spec entities
-
- for S in SPARK_Scope_Table.First .. SPARK_Scope_Table.Last loop
- declare
- Srec : SPARK_Scope_Record renames SPARK_Scope_Table.Table (S);
-
- Spec_Entity : constant Entity_Id :=
- Unique_Entity (Srec.Scope_Entity);
- Spec_Scope : constant Scope_Index :=
- Entity_Hash_Table.Get (Spec_Entity);
-
- begin
- -- Generic spec may be missing in which case Spec_Scope is zero
-
- if Spec_Entity /= Srec.Scope_Entity
- and then Spec_Scope /= 0
- then
- Srec.Spec_File_Num :=
- SPARK_Scope_Table.Table (Spec_Scope).File_Num;
- Srec.Spec_Scope_Num :=
- SPARK_Scope_Table.Table (Spec_Scope).Scope_Num;
- end if;
- end;
- end loop;
- end;
-
- -- Generate SPARK cross-reference information
-
- Add_SPARK_Xrefs;
- end Collect_SPARK_Xrefs;
+ end Iterate_SPARK_Xrefs;
-------------------------------------
-- Enclosing_Subprogram_Or_Package --
@@ -1143,16 +263,6 @@ package body SPARK_Specific is
return Context;
end Enclosing_Subprogram_Or_Library_Package;
- -----------------
- -- Entity_Hash --
- -----------------
-
- function Entity_Hash (E : Entity_Id) return Entity_Hashed_Range is
- begin
- return
- Entity_Hashed_Range (E mod (Entity_Id (Entity_Hashed_Range'Last) + 1));
- end Entity_Hash;
-
--------------------------
-- Generate_Dereference --
--------------------------
@@ -1221,332 +331,4 @@ package body SPARK_Specific is
end if;
end Generate_Dereference;
- -------------------------------
- -- Traverse_Compilation_Unit --
- -------------------------------
-
- procedure Traverse_Compilation_Unit
- (CU : Node_Id;
- Inside_Stubs : Boolean)
- is
- procedure Traverse_Block (N : Node_Id);
- procedure Traverse_Declaration_Or_Statement (N : Node_Id);
- procedure Traverse_Declarations_And_HSS (N : Node_Id);
- procedure Traverse_Declarations_Or_Statements (L : List_Id);
- procedure Traverse_Handled_Statement_Sequence (N : Node_Id);
- procedure Traverse_Package_Body (N : Node_Id);
- procedure Traverse_Visible_And_Private_Parts (N : Node_Id);
- procedure Traverse_Protected_Body (N : Node_Id);
- procedure Traverse_Subprogram_Body (N : Node_Id);
- procedure Traverse_Task_Body (N : Node_Id);
-
- -- Traverse corresponding construct, calling Process on all declarations
-
- --------------------
- -- Traverse_Block --
- --------------------
-
- procedure Traverse_Block (N : Node_Id) renames
- Traverse_Declarations_And_HSS;
-
- ---------------------------------------
- -- Traverse_Declaration_Or_Statement --
- ---------------------------------------
-
- procedure Traverse_Declaration_Or_Statement (N : Node_Id) is
- function Traverse_Stub (N : Node_Id) return Boolean;
- -- Returns True iff stub N should be traversed
-
- function Traverse_Stub (N : Node_Id) return Boolean is
- begin
- pragma Assert (Nkind_In (N, N_Package_Body_Stub,
- N_Protected_Body_Stub,
- N_Subprogram_Body_Stub,
- N_Task_Body_Stub));
-
- return Inside_Stubs and then Present (Library_Unit (N));
- end Traverse_Stub;
-
- -- Start of processing for Traverse_Declaration_Or_Statement
-
- begin
- case Nkind (N) is
- when N_Package_Declaration =>
- Traverse_Visible_And_Private_Parts (Specification (N));
-
- when N_Package_Body =>
- Traverse_Package_Body (N);
-
- when N_Package_Body_Stub =>
- if Traverse_Stub (N) then
- Traverse_Package_Body (Get_Body_From_Stub (N));
- end if;
-
- when N_Subprogram_Body =>
- Traverse_Subprogram_Body (N);
-
- when N_Entry_Body =>
- Traverse_Subprogram_Body (N);
-
- when N_Subprogram_Body_Stub =>
- if Traverse_Stub (N) then
- Traverse_Subprogram_Body (Get_Body_From_Stub (N));
- end if;
-
- when N_Protected_Body =>
- Traverse_Protected_Body (N);
-
- when N_Protected_Body_Stub =>
- if Traverse_Stub (N) then
- Traverse_Protected_Body (Get_Body_From_Stub (N));
- end if;
-
- when N_Protected_Type_Declaration =>
- Traverse_Visible_And_Private_Parts (Protected_Definition (N));
-
- when N_Task_Type_Declaration =>
-
- -- Task type definition is optional (unlike protected type
- -- definition, which is mandatory).
-
- declare
- Task_Def : constant Node_Id := Task_Definition (N);
- begin
- if Present (Task_Def) then
- Traverse_Visible_And_Private_Parts (Task_Def);
- end if;
- end;
-
- when N_Task_Body =>
- Traverse_Task_Body (N);
-
- when N_Task_Body_Stub =>
- if Traverse_Stub (N) then
- Traverse_Task_Body (Get_Body_From_Stub (N));
- end if;
-
- when N_Block_Statement =>
- Traverse_Block (N);
-
- when N_If_Statement =>
-
- -- Traverse the statements in the THEN part
-
- Traverse_Declarations_Or_Statements (Then_Statements (N));
-
- -- Loop through ELSIF parts if present
-
- if Present (Elsif_Parts (N)) then
- declare
- Elif : Node_Id := First (Elsif_Parts (N));
-
- begin
- while Present (Elif) loop
- Traverse_Declarations_Or_Statements
- (Then_Statements (Elif));
- Next (Elif);
- end loop;
- end;
- end if;
-
- -- Finally traverse the ELSE statements if present
-
- Traverse_Declarations_Or_Statements (Else_Statements (N));
-
- when N_Case_Statement =>
-
- -- Process case branches
-
- declare
- Alt : Node_Id := First (Alternatives (N));
- begin
- loop
- Traverse_Declarations_Or_Statements (Statements (Alt));
- Next (Alt);
- exit when No (Alt);
- end loop;
- end;
-
- when N_Extended_Return_Statement =>
- Traverse_Handled_Statement_Sequence
- (Handled_Statement_Sequence (N));
-
- when N_Loop_Statement =>
- Traverse_Declarations_Or_Statements (Statements (N));
-
- -- Generic declarations are ignored
-
- when others =>
- null;
- end case;
- end Traverse_Declaration_Or_Statement;
-
- -----------------------------------
- -- Traverse_Declarations_And_HSS --
- -----------------------------------
-
- procedure Traverse_Declarations_And_HSS (N : Node_Id) is
- begin
- Traverse_Declarations_Or_Statements (Declarations (N));
- Traverse_Handled_Statement_Sequence (Handled_Statement_Sequence (N));
- end Traverse_Declarations_And_HSS;
-
- -----------------------------------------
- -- Traverse_Declarations_Or_Statements --
- -----------------------------------------
-
- procedure Traverse_Declarations_Or_Statements (L : List_Id) is
- N : Node_Id;
-
- begin
- -- Loop through statements or declarations
-
- N := First (L);
- while Present (N) loop
-
- -- Call Process on all declarations
-
- if Nkind (N) in N_Declaration
- or else Nkind (N) in N_Later_Decl_Item
- or else Nkind (N) = N_Entry_Body
- then
- if Nkind (N) in N_Body_Stub then
- Process (Get_Body_From_Stub (N));
- else
- Process (N);
- end if;
- end if;
-
- Traverse_Declaration_Or_Statement (N);
-
- Next (N);
- end loop;
- end Traverse_Declarations_Or_Statements;
-
- -----------------------------------------
- -- Traverse_Handled_Statement_Sequence --
- -----------------------------------------
-
- procedure Traverse_Handled_Statement_Sequence (N : Node_Id) is
- Handler : Node_Id;
-
- begin
- if Present (N) then
- Traverse_Declarations_Or_Statements (Statements (N));
-
- if Present (Exception_Handlers (N)) then
- Handler := First (Exception_Handlers (N));
- while Present (Handler) loop
- Traverse_Declarations_Or_Statements (Statements (Handler));
- Next (Handler);
- end loop;
- end if;
- end if;
- end Traverse_Handled_Statement_Sequence;
-
- ---------------------------
- -- Traverse_Package_Body --
- ---------------------------
-
- procedure Traverse_Package_Body (N : Node_Id) is
- Spec_E : constant Entity_Id := Unique_Defining_Entity (N);
-
- begin
- case Ekind (Spec_E) is
- when E_Package =>
- Traverse_Declarations_And_HSS (N);
-
- when E_Generic_Package =>
- null;
-
- when others =>
- raise Program_Error;
- end case;
- end Traverse_Package_Body;
-
- -----------------------------
- -- Traverse_Protected_Body --
- -----------------------------
-
- procedure Traverse_Protected_Body (N : Node_Id) is
- begin
- Traverse_Declarations_Or_Statements (Declarations (N));
- end Traverse_Protected_Body;
-
- ------------------------------
- -- Traverse_Subprogram_Body --
- ------------------------------
-
- procedure Traverse_Subprogram_Body (N : Node_Id) is
- Spec_E : constant Entity_Id := Unique_Defining_Entity (N);
-
- begin
- case Ekind (Spec_E) is
- when Entry_Kind
- | E_Function
- | E_Procedure
- =>
- Traverse_Declarations_And_HSS (N);
-
- when Generic_Subprogram_Kind =>
- null;
-
- when others =>
- raise Program_Error;
- end case;
- end Traverse_Subprogram_Body;
-
- ------------------------
- -- Traverse_Task_Body --
- ------------------------
-
- procedure Traverse_Task_Body (N : Node_Id) renames
- Traverse_Declarations_And_HSS;
-
- ----------------------------------------
- -- Traverse_Visible_And_Private_Parts --
- ----------------------------------------
-
- procedure Traverse_Visible_And_Private_Parts (N : Node_Id) is
- begin
- Traverse_Declarations_Or_Statements (Visible_Declarations (N));
- Traverse_Declarations_Or_Statements (Private_Declarations (N));
- end Traverse_Visible_And_Private_Parts;
-
- -- Local variables
-
- Lu : Node_Id;
-
- -- Start of processing for Traverse_Compilation_Unit
-
- begin
- -- Get Unit (checking case of subunit)
-
- Lu := Unit (CU);
-
- if Nkind (Lu) = N_Subunit then
- Lu := Proper_Body (Lu);
- end if;
-
- -- Do not add scopes for generic units
-
- if Nkind (Lu) = N_Package_Body
- and then Ekind (Corresponding_Spec (Lu)) in Generic_Unit_Kind
- then
- return;
- end if;
-
- -- Call Process on all declarations
-
- if Nkind (Lu) in N_Declaration
- or else Nkind (Lu) in N_Later_Decl_Item
- then
- Process (Lu);
- end if;
-
- -- Traverse the unit
-
- Traverse_Declaration_Or_Statement (Lu);
- end Traverse_Compilation_Unit;
-
end SPARK_Specific;
diff --git a/gcc/ada/lib-xref.adb b/gcc/ada/lib-xref.adb
index eb6ac0a629f..513d5924126 100644
--- a/gcc/ada/lib-xref.adb
+++ b/gcc/ada/lib-xref.adb
@@ -27,6 +27,7 @@ with Atree; use Atree;
with Csets; use Csets;
with Elists; use Elists;
with Errout; use Errout;
+with Lib.Util; use Lib.Util;
with Nlists; use Nlists;
with Opt; use Opt;
with Restrict; use Restrict;
diff --git a/gcc/ada/lib-xref.ads b/gcc/ada/lib-xref.ads
index d4216396c9c..0baa896253e 100644
--- a/gcc/ada/lib-xref.ads
+++ b/gcc/ada/lib-xref.ads
@@ -26,9 +26,8 @@
-- This package contains for collecting and outputting cross-reference
-- information.
-with Einfo; use Einfo;
-with Lib.Util; use Lib.Util;
-with Put_SPARK_Xrefs;
+with Einfo; use Einfo;
+with SPARK_Xrefs;
package Lib.Xref is
@@ -640,26 +639,15 @@ package Lib.Xref is
-- This procedure is called to record a dereference. N is the location
-- of the dereference.
- procedure Collect_SPARK_Xrefs
- (Sdep_Table : Unit_Ref_Table;
- Num_Sdep : Nat);
- -- Collect SPARK cross-reference information from library units (for
- -- files and scopes) and from shared cross-references. Fill in the
- -- tables in library package called SPARK_Xrefs.
-
- procedure Output_SPARK_Xrefs is new Put_SPARK_Xrefs;
- -- Output SPARK cross-reference information to the ALI files, based on
- -- the information collected in the tables in library package called
- -- SPARK_Xrefs, and using routines in Lib.Util.
-
generic
- with procedure Process (N : Node_Id) is <>;
- procedure Traverse_Compilation_Unit
- (CU : Node_Id;
- Inside_Stubs : Boolean);
- -- Call Process on all declarations within compilation unit CU. If
- -- Inside_Stubs is True, then the body of stubs is also traversed.
- -- Generic declarations are ignored.
+ with procedure Process
+ (Index : Int;
+ Xref : SPARK_Xrefs.SPARK_Xref_Record);
+ procedure Iterate_SPARK_Xrefs;
+ -- Call Process on cross-references relevant to the SPARK backend with
+ -- parameter Xref holding the relevant subset of the xref entry and
+ -- Index holding the position in the original tables with references
+ -- (if positive) or dereferences (if negative).
end SPARK_Specific;
diff --git a/gcc/ada/libgnarl/a-intnam__qnx.ads b/gcc/ada/libgnarl/a-intnam__qnx.ads
new file mode 100644
index 00000000000..ab45b381863
--- /dev/null
+++ b/gcc/ada/libgnarl/a-intnam__qnx.ads
@@ -0,0 +1,146 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT RUN-TIME LIBRARY (GNARL) COMPONENTS --
+-- --
+-- A D A . I N T E R R U P T S . N A M E S --
+-- --
+-- S p e c --
+-- --
+-- Copyright (C) 1991-2017, Free Software Foundation, Inc. --
+-- --
+-- GNARL is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNARL was developed by the GNARL team at Florida State University. --
+-- Extensive contributions were provided by Ada Core Technologies, Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This is a QNX version of this package
+
+-- The pragma Unreserve_All_Interrupts affects the following signal(s):
+
+-- SIGINT: made available for Ada handler
+
+-- This target-dependent package spec contains names of interrupts
+-- supported by the local system.
+
+with System.OS_Interface;
+
+package Ada.Interrupts.Names is
+
+ -- All identifiers in this unit are implementation defined
+
+ pragma Implementation_Defined;
+
+ -- Beware that the mapping of names to signals may be many-to-one. There
+ -- may be aliases. Also, for all signal names that are not supported on the
+ -- current system the value of the corresponding constant will be zero.
+
+ SIGHUP : constant Interrupt_ID :=
+ System.OS_Interface.SIGHUP; -- hangup
+
+ SIGINT : constant Interrupt_ID :=
+ System.OS_Interface.SIGINT; -- interrupt (rubout)
+
+ SIGQUIT : constant Interrupt_ID :=
+ System.OS_Interface.SIGQUIT; -- quit (ASCD FS)
+
+ SIGILL : constant Interrupt_ID :=
+ System.OS_Interface.SIGILL; -- illegal instruction (not reset)
+
+ SIGTRAP : constant Interrupt_ID :=
+ System.OS_Interface.SIGTRAP; -- trace trap (not reset)
+
+ SIGIOT : constant Interrupt_ID :=
+ System.OS_Interface.SIGIOT; -- IOT instruction
+
+ SIGABRT : constant Interrupt_ID := -- used by abort,
+ System.OS_Interface.SIGABRT; -- replace SIGIOT in the future
+
+ SIGFPE : constant Interrupt_ID :=
+ System.OS_Interface.SIGFPE; -- floating point exception
+
+ SIGKILL : constant Interrupt_ID :=
+ System.OS_Interface.SIGKILL; -- kill (cannot be caught or ignored)
+
+ SIGBUS : constant Interrupt_ID :=
+ System.OS_Interface.SIGBUS; -- bus error
+
+ SIGSEGV : constant Interrupt_ID :=
+ System.OS_Interface.SIGSEGV; -- segmentation violation
+
+ SIGPIPE : constant Interrupt_ID := -- write on a pipe with
+ System.OS_Interface.SIGPIPE; -- no one to read it
+
+ SIGALRM : constant Interrupt_ID :=
+ System.OS_Interface.SIGALRM; -- alarm clock
+
+ SIGTERM : constant Interrupt_ID :=
+ System.OS_Interface.SIGTERM; -- software termination signal from kill
+
+ SIGUSR1 : constant Interrupt_ID :=
+ System.OS_Interface.SIGUSR1; -- user defined signal 1
+
+ SIGUSR2 : constant Interrupt_ID :=
+ System.OS_Interface.SIGUSR2; -- user defined signal 2
+
+ SIGCLD : constant Interrupt_ID :=
+ System.OS_Interface.SIGCLD; -- child status change
+
+ SIGCHLD : constant Interrupt_ID :=
+ System.OS_Interface.SIGCHLD; -- 4.3BSD's/POSIX name for SIGCLD
+
+ SIGWINCH : constant Interrupt_ID :=
+ System.OS_Interface.SIGWINCH; -- window size change
+
+ SIGURG : constant Interrupt_ID :=
+ System.OS_Interface.SIGURG; -- urgent condition on IO channel
+
+ SIGPOLL : constant Interrupt_ID :=
+ System.OS_Interface.SIGPOLL; -- pollable event occurred
+
+ SIGIO : constant Interrupt_ID := -- input/output possible,
+ System.OS_Interface.SIGIO; -- SIGPOLL alias (Solaris)
+
+ SIGSTOP : constant Interrupt_ID :=
+ System.OS_Interface.SIGSTOP; -- stop (cannot be caught or ignored)
+
+ SIGTSTP : constant Interrupt_ID :=
+ System.OS_Interface.SIGTSTP; -- user stop requested from tty
+
+ SIGCONT : constant Interrupt_ID :=
+ System.OS_Interface.SIGCONT; -- stopped process has been continued
+
+ SIGTTIN : constant Interrupt_ID :=
+ System.OS_Interface.SIGTTIN; -- background tty read attempted
+
+ SIGTTOU : constant Interrupt_ID :=
+ System.OS_Interface.SIGTTOU; -- background tty write attempted
+
+ SIGVTALRM : constant Interrupt_ID :=
+ System.OS_Interface.SIGVTALRM; -- virtual timer expired
+
+ SIGPROF : constant Interrupt_ID :=
+ System.OS_Interface.SIGPROF; -- profiling timer expired
+
+ SIGXCPU : constant Interrupt_ID :=
+ System.OS_Interface.SIGXCPU; -- CPU time limit exceeded
+
+ SIGXFSZ : constant Interrupt_ID :=
+ System.OS_Interface.SIGXFSZ; -- filesize limit exceeded
+
+end Ada.Interrupts.Names;
diff --git a/gcc/ada/libgnarl/g-thread.adb b/gcc/ada/libgnarl/g-thread.adb
index 90d51afb8c9..59c444b1f9e 100644
--- a/gcc/ada/libgnarl/g-thread.adb
+++ b/gcc/ada/libgnarl/g-thread.adb
@@ -33,6 +33,7 @@ with Ada.Task_Identification; use Ada.Task_Identification;
with System.Task_Primitives.Operations;
with System.Tasking;
with System.Tasking.Stages; use System.Tasking.Stages;
+with System.Tasking.Utilities;
with System.OS_Interface; use System.OS_Interface;
with System.Soft_Links; use System.Soft_Links;
with Ada.Unchecked_Conversion;
@@ -172,6 +173,15 @@ package body GNAT.Threads is
Thr.all := Task_Primitives.Operations.Get_Thread_Id (To_Id (Id));
end Get_Thread;
+ ----------------------
+ -- Make_Independent --
+ ----------------------
+
+ function Make_Independent return Boolean is
+ begin
+ return System.Tasking.Utilities.Make_Independent;
+ end Make_Independent;
+
----------------
-- To_Task_Id --
----------------
diff --git a/gcc/ada/libgnarl/g-thread.ads b/gcc/ada/libgnarl/g-thread.ads
index e2fd748dc1d..027b7c2f747 100644
--- a/gcc/ada/libgnarl/g-thread.ads
+++ b/gcc/ada/libgnarl/g-thread.ads
@@ -146,4 +146,15 @@ package GNAT.Threads is
-- Given a low level Id, as returned by Create_Thread, return a Task_Id,
-- so that operations in Ada.Task_Identification can be used.
+ function Make_Independent return Boolean;
+ -- If a procedure loads a shared library containing tasks, and that
+ -- procedure is considered to be a master by the compiler (because it
+ -- contains tasks or class-wide objects that might contain tasks),
+ -- then the tasks in the shared library need to call Make_Independent
+ -- because otherwise they will depend on the procedure that loaded the
+ -- shared library.
+ --
+ -- See System.Tasking.Utilities.Make_Independent in s-tasuti.ads for
+ -- further documentation.
+
end GNAT.Threads;
diff --git a/gcc/ada/libgnarl/s-intman__qnx.adb b/gcc/ada/libgnarl/s-intman__qnx.adb
new file mode 100644
index 00000000000..ae33d69fae3
--- /dev/null
+++ b/gcc/ada/libgnarl/s-intman__qnx.adb
@@ -0,0 +1,298 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT RUN-TIME LIBRARY (GNARL) COMPONENTS --
+-- --
+-- S Y S T E M . I N T E R R U P T _ M A N A G E M E N T --
+-- --
+-- B o d y --
+-- --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
+-- --
+-- GNARL is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNARL was developed by the GNARL team at Florida State University. --
+-- Extensive contributions were provided by Ada Core Technologies, Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This is the QNX/Neutrino threads version of this package
+
+-- Make a careful study of all signals available under the OS, to see which
+-- need to be reserved, kept always unmasked, or kept always unmasked. Be on
+-- the lookout for special signals that may be used by the thread library.
+
+-- Since this is a multi target file, the signal <-> exception mapping
+-- is simple minded. If you need a more precise and target specific
+-- signal handling, create a new s-intman.adb that will fit your needs.
+
+-- This file assumes that:
+
+-- SIGFPE, SIGILL, SIGSEGV and SIGBUS exist. They are mapped as follows:
+-- SIGPFE => Constraint_Error
+-- SIGILL => Program_Error
+-- SIGSEGV => Storage_Error
+-- SIGBUS => Storage_Error
+
+-- SIGINT exists and will be kept unmasked unless the pragma
+-- Unreserve_All_Interrupts is specified anywhere in the application.
+
+-- System.OS_Interface contains the following:
+-- SIGADAABORT: the signal that will be used to abort tasks.
+-- Unmasked: the OS specific set of signals that should be unmasked in
+-- all the threads. SIGADAABORT is unmasked by
+-- default
+-- Reserved: the OS specific set of signals that are reserved.
+
+with System.Task_Primitives;
+
+package body System.Interrupt_Management is
+
+ use Interfaces.C;
+ use System.OS_Interface;
+
+ type Interrupt_List is array (Interrupt_ID range <>) of Interrupt_ID;
+ Exception_Interrupts : constant Interrupt_List :=
+ (SIGFPE, SIGILL, SIGSEGV, SIGBUS);
+
+ Unreserve_All_Interrupts : Interfaces.C.int;
+ pragma Import
+ (C, Unreserve_All_Interrupts, "__gl_unreserve_all_interrupts");
+
+ -----------------------
+ -- Local Subprograms --
+ -----------------------
+
+ procedure Signal_Trampoline
+ (signo : Signal;
+ siginfo : System.Address;
+ ucontext : System.Address;
+ handler : System.Address);
+ pragma Import (C, Signal_Trampoline, "__gnat_sigtramp");
+ -- Pass the real handler to a speical function that handles unwinding by
+ -- skipping over the kernel signal frame (which doesn't contain any unwind
+ -- information).
+
+ procedure Map_Signal
+ (signo : Signal;
+ siginfo : System.Address;
+ ucontext : System.Address);
+ pragma Import (C, Map_Signal, "__gnat_map_signal");
+
+ function State (Int : Interrupt_ID) return Character;
+ pragma Import (C, State, "__gnat_get_interrupt_state");
+ -- Get interrupt state. Defined in init.c The input argument is the
+ -- interrupt number, and the result is one of the following:
+
+ User : constant Character := 'u';
+ Runtime : constant Character := 'r';
+ Default : constant Character := 's';
+ -- 'n' this interrupt not set by any Interrupt_State pragma
+ -- 'u' Interrupt_State pragma set state to User
+ -- 'r' Interrupt_State pragma set state to Runtime
+ -- 's' Interrupt_State pragma set state to System (use "default"
+ -- system handler)
+
+ procedure Notify_Exception
+ (signo : Signal;
+ siginfo : System.Address;
+ ucontext : System.Address);
+ -- This function identifies the Ada exception to be raised using the
+ -- information when the system received a synchronous signal. Since this
+ -- function is machine and OS dependent, different code has to be provided
+ -- for different target.
+
+ ----------------------
+ -- Notify_Exception --
+ ----------------------
+
+ Signal_Mask : aliased sigset_t;
+ -- The set of signals handled by Notify_Exception
+
+ procedure Notify_Exception
+ (signo : Signal;
+ siginfo : System.Address;
+ ucontext : System.Address)
+ is
+ Result : Interfaces.C.int;
+
+ begin
+ -- With the __builtin_longjmp, the signal mask is not restored, so we
+ -- need to restore it explicitly.
+
+ Result := pthread_sigmask (SIG_UNBLOCK, Signal_Mask'Access, null);
+ pragma Assert (Result = 0);
+
+ -- Perform the necessary context adjustments prior to a raise
+ -- from a signal handler.
+
+ Adjust_Context_For_Raise (signo, ucontext);
+
+ -- Check that treatment of exception propagation here is consistent with
+ -- treatment of the abort signal in System.Task_Primitives.Operations.
+
+ Signal_Trampoline (signo, siginfo, ucontext, Map_Signal'Address);
+ end Notify_Exception;
+
+ ----------------
+ -- Initialize --
+ ----------------
+
+ Initialized : Boolean := False;
+
+ procedure Initialize is
+ act : aliased struct_sigaction;
+ old_act : aliased struct_sigaction;
+ Result : System.OS_Interface.int;
+
+ Use_Alternate_Stack : constant Boolean :=
+ System.Task_Primitives.Alternate_Stack_Size /= 0;
+ -- Whether to use an alternate signal stack for stack overflows
+
+ begin
+ if Initialized then
+ return;
+ end if;
+
+ Initialized := True;
+
+ -- Need to call pthread_init very early because it is doing signal
+ -- initializations.
+
+ pthread_init;
+
+ Abort_Task_Interrupt := SIGADAABORT;
+
+ act.sa_handler := Notify_Exception'Address;
+
+ -- Setting SA_SIGINFO asks the kernel to pass more than just the signal
+ -- number argument to the handler when it is called. The set of extra
+ -- parameters includes a pointer to the interrupted context, which the
+ -- ZCX propagation scheme needs.
+
+ -- Most man pages for sigaction mention that sa_sigaction should be set
+ -- instead of sa_handler when SA_SIGINFO is on. In practice, the two
+ -- fields are actually union'ed and located at the same offset.
+
+ -- On some targets, we set sa_flags to SA_NODEFER so that during the
+ -- handler execution we do not change the Signal_Mask to be masked for
+ -- the Signal.
+
+ -- This is a temporary fix to the problem that the Signal_Mask is not
+ -- restored after the exception (longjmp) from the handler. The right
+ -- fix should be made in sigsetjmp so that we save the Signal_Set and
+ -- restore it after a longjmp.
+
+ -- Since SA_NODEFER is obsolete, instead we reset explicitly the mask
+ -- in the exception handler.
+
+ Result := sigemptyset (Signal_Mask'Access);
+ pragma Assert (Result = 0);
+
+ -- Add signals that map to Ada exceptions to the mask
+
+ for J in Exception_Interrupts'Range loop
+ if State (Exception_Interrupts (J)) /= Default then
+ Result :=
+ sigaddset (Signal_Mask'Access, Signal (Exception_Interrupts (J)));
+ pragma Assert (Result = 0);
+ end if;
+ end loop;
+
+ act.sa_mask := Signal_Mask;
+
+ pragma Assert (Keep_Unmasked = (Interrupt_ID'Range => False));
+ pragma Assert (Reserve = (Interrupt_ID'Range => False));
+
+ -- Process state of exception signals
+
+ for J in Exception_Interrupts'Range loop
+ if State (Exception_Interrupts (J)) /= User then
+ Keep_Unmasked (Exception_Interrupts (J)) := True;
+ Reserve (Exception_Interrupts (J)) := True;
+
+ if State (Exception_Interrupts (J)) /= Default then
+ act.sa_flags := SA_SIGINFO;
+
+ if Use_Alternate_Stack
+ and then Exception_Interrupts (J) = SIGSEGV
+ then
+ act.sa_flags := act.sa_flags + SA_ONSTACK;
+ end if;
+
+ Result :=
+ sigaction
+ (Signal (Exception_Interrupts (J)), act'Unchecked_Access,
+ old_act'Unchecked_Access);
+ pragma Assert (Result = 0);
+ end if;
+ end if;
+ end loop;
+
+ if State (Abort_Task_Interrupt) /= User then
+ Keep_Unmasked (Abort_Task_Interrupt) := True;
+ Reserve (Abort_Task_Interrupt) := True;
+ end if;
+
+ -- Set SIGINT to unmasked state as long as it is not in "User" state.
+ -- Check for Unreserve_All_Interrupts last.
+
+ if State (SIGINT) /= User then
+ Keep_Unmasked (SIGINT) := True;
+ Reserve (SIGINT) := True;
+ end if;
+
+ -- Check all signals for state that requires keeping them unmasked and
+ -- reserved.
+
+ for J in Interrupt_ID'Range loop
+ if State (J) = Default or else State (J) = Runtime then
+ Keep_Unmasked (J) := True;
+ Reserve (J) := True;
+ end if;
+ end loop;
+
+ -- Add the set of signals that must always be unmasked for this target
+
+ for J in Unmasked'Range loop
+ Keep_Unmasked (Interrupt_ID (Unmasked (J))) := True;
+ Reserve (Interrupt_ID (Unmasked (J))) := True;
+ end loop;
+
+ -- Add target-specific reserved signals
+
+ if Reserved'Length > 0 then
+ for J in Reserved'Range loop
+ Reserve (Interrupt_ID (Reserved (J))) := True;
+ end loop;
+ end if;
+
+ -- Process pragma Unreserve_All_Interrupts. This overrides any settings
+ -- due to pragma Interrupt_State:
+
+ if Unreserve_All_Interrupts /= 0 then
+ Keep_Unmasked (SIGINT) := False;
+ Reserve (SIGINT) := False;
+ end if;
+
+ -- We do not really have Signal 0. We just use this value to identify
+ -- non-existent signals (see s-intnam.ads). Therefore, Signal should not
+ -- be used in all signal related operations hence mark it as reserved.
+
+ Reserve (0) := True;
+ end Initialize;
+
+end System.Interrupt_Management;
diff --git a/gcc/ada/libgnarl/s-osinte__qnx.adb b/gcc/ada/libgnarl/s-osinte__qnx.adb
new file mode 100644
index 00000000000..bc9ec4446c7
--- /dev/null
+++ b/gcc/ada/libgnarl/s-osinte__qnx.adb
@@ -0,0 +1,109 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT RUN-TIME LIBRARY (GNARL) COMPONENTS --
+-- --
+-- S Y S T E M . O S _ I N T E R F A C E --
+-- --
+-- B o d y --
+-- --
+-- Copyright (C) 1991-2017, Florida State University --
+-- Copyright (C) 1995-2017, AdaCore --
+-- --
+-- GNAT is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNARL was developed by the GNARL team at Florida State University. --
+-- Extensive contributions were provided by Ada Core Technologies, Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This version is for QNX operating systems
+
+pragma Polling (Off);
+-- Turn off polling, we do not want ATC polling to take place during
+-- tasking operations. It causes infinite loops and other problems.
+
+-- This package encapsulates all direct interfaces to OS services
+-- that are needed by children of System.
+
+with Interfaces.C; use Interfaces.C;
+package body System.OS_Interface is
+
+ --------------------
+ -- Get_Stack_Base --
+ --------------------
+
+ function Get_Stack_Base (thread : pthread_t) return Address is
+ pragma Warnings (Off, thread);
+
+ begin
+ return Null_Address;
+ end Get_Stack_Base;
+
+ ------------------
+ -- pthread_init --
+ ------------------
+
+ procedure pthread_init is
+ begin
+ null;
+ end pthread_init;
+
+ -----------------
+ -- To_Duration --
+ -----------------
+
+ function To_Duration (TS : timespec) return Duration is
+ begin
+ return Duration (TS.tv_sec) + Duration (TS.tv_nsec) / 10#1#E9;
+ end To_Duration;
+
+ ------------------------
+ -- To_Target_Priority --
+ ------------------------
+
+ function To_Target_Priority
+ (Prio : System.Any_Priority) return Interfaces.C.int
+ is
+ begin
+ return Interfaces.C.int (Prio + 1);
+ end To_Target_Priority;
+
+ -----------------
+ -- To_Timespec --
+ -----------------
+
+ function To_Timespec (D : Duration) return timespec is
+ S : time_t;
+ F : Duration;
+
+ begin
+ S := time_t (Long_Long_Integer (D));
+ F := D - Duration (S);
+
+ -- If F has negative value due to a round-up, adjust for positive F
+ -- value.
+
+ if F < 0.0 then
+ S := S - 1;
+ F := F + 1.0;
+ end if;
+
+ return timespec'(tv_sec => S,
+ tv_nsec => long (Long_Long_Integer (F * 10#1#E9)));
+ end To_Timespec;
+
+end System.OS_Interface;
diff --git a/gcc/ada/libgnarl/s-osinte__qnx.ads b/gcc/ada/libgnarl/s-osinte__qnx.ads
new file mode 100644
index 00000000000..14416cc7ab7
--- /dev/null
+++ b/gcc/ada/libgnarl/s-osinte__qnx.ads
@@ -0,0 +1,617 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT RUN-TIME LIBRARY (GNARL) COMPONENTS --
+-- --
+-- S Y S T E M . O S _ I N T E R F A C E --
+-- --
+-- S p e c --
+-- --
+-- Copyright (C) 1995-2017, Free Software Foundation, Inc. --
+-- --
+-- GNAT is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNARL was developed by the GNARL team at Florida State University. --
+-- Extensive contributions were provided by Ada Core Technologies, Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This is a QNX/Neutrino version of this package
+
+-- This package encapsulates all direct interfaces to OS services
+-- that are needed by the tasking run-time (libgnarl).
+
+-- PLEASE DO NOT add any with-clauses to this package or remove the pragma
+-- Preelaborate. This package is designed to be a bottom-level (leaf) package.
+
+with Ada.Unchecked_Conversion;
+with Interfaces.C;
+with System.OS_Constants;
+
+package System.OS_Interface is
+ pragma Preelaborate;
+
+ subtype int is Interfaces.C.int;
+ subtype char is Interfaces.C.char;
+ subtype short is Interfaces.C.short;
+ subtype long is Interfaces.C.long;
+ subtype unsigned is Interfaces.C.unsigned;
+ subtype unsigned_short is Interfaces.C.unsigned_short;
+ subtype unsigned_long is Interfaces.C.unsigned_long;
+ subtype unsigned_char is Interfaces.C.unsigned_char;
+ subtype plain_char is Interfaces.C.plain_char;
+ subtype size_t is Interfaces.C.size_t;
+
+ -----------
+ -- Errno --
+ -----------
+
+ function errno return int;
+ pragma Import (C, errno, "__get_errno");
+
+ EPERM : constant := 1;
+ EINTR : constant := 4;
+ EAGAIN : constant := 11;
+ ENOMEM : constant := 12;
+ EINVAL : constant := 22;
+ ETIMEDOUT : constant := 260;
+
+ -------------
+ -- Signals --
+ -------------
+
+ Max_Interrupt : constant := 64;
+ type Signal is new int range 0 .. Max_Interrupt;
+ for Signal'Size use int'Size;
+
+ SIGHUP : constant := 1;
+ SIGINT : constant := 2;
+ SIGQUIT : constant := 3;
+ SIGILL : constant := 4;
+ SIGTRAP : constant := 5;
+ SIGIOT : constant := 6;
+ SIGABRT : constant := 6;
+ SIGDEADLK : constant := 7;
+ SIGFPE : constant := 8;
+ SIGKILL : constant := 9;
+ SIGBUS : constant := 10;
+ SIGSEGV : constant := 11;
+ SIGSYS : constant := 12;
+ SIGPIPE : constant := 13;
+ SIGALRM : constant := 14;
+ SIGTERM : constant := 15;
+ SIGUSR1 : constant := 16;
+ SIGUSR2 : constant := 17;
+ SIGCLD : constant := 18;
+ SIGCHLD : constant := 18;
+ SIGPWR : constant := 19;
+ SIGWINCH : constant := 20;
+ SIGURG : constant := 21;
+ SIGPOLL : constant := 22;
+ SIGIO : constant := 22;
+ SIGSTOP : constant := 23;
+ SIGTSTP : constant := 24;
+ SIGCONT : constant := 25;
+ SIGTTIN : constant := 26;
+ SIGTTOU : constant := 27;
+ SIGVTALRM : constant := 28;
+ SIGPROF : constant := 29;
+ SIGXCPU : constant := 30;
+ SIGXFSZ : constant := 31;
+
+ SIGRTMIN : constant := 41;
+ SITRTMAX : constant := 56;
+
+ SIGSELECT : constant := 57;
+ SIGPHOTON : constant := 58;
+
+ SIGADAABORT : constant := SIGABRT;
+ -- Change this to use another signal for task abort. SIGTERM might be a
+ -- good one.
+
+ type Signal_Set is array (Natural range <>) of Signal;
+
+ Unmasked : constant Signal_Set := (
+ SIGTRAP,
+ -- To enable debugging on multithreaded applications, mark SIGTRAP to
+ -- be kept unmasked.
+
+ SIGBUS,
+
+ SIGTTIN, SIGTTOU, SIGTSTP,
+ -- Keep these three signals unmasked so that background processes and IO
+ -- behaves as normal "C" applications
+
+ SIGPROF,
+ -- To avoid confusing the profiler
+
+ SIGKILL, SIGSTOP);
+ -- These two signals actually can't be masked (POSIX won't allow it)
+
+ Reserved : constant Signal_Set := (SIGABRT, SIGKILL, SIGSTOP, SIGSEGV);
+
+ type sigset_t is private;
+
+ function sigaddset (set : access sigset_t; sig : Signal) return int;
+ pragma Import (C, sigaddset, "sigaddset");
+
+ function sigdelset (set : access sigset_t; sig : Signal) return int;
+ pragma Import (C, sigdelset, "sigdelset");
+
+ function sigfillset (set : access sigset_t) return int;
+ pragma Import (C, sigfillset, "sigfillset");
+
+ function sigismember (set : access sigset_t; sig : Signal) return int;
+ pragma Import (C, sigismember, "sigismember");
+
+ function sigemptyset (set : access sigset_t) return int;
+ pragma Import (C, sigemptyset, "sigemptyset");
+
+ type pad7 is array (1 .. 7) of int;
+ type siginfo_t is record
+ si_signo : int;
+ si_code : int;
+ si_errno : int;
+ X_data : pad7;
+ end record;
+ pragma Convention (C, siginfo_t);
+
+ type struct_sigaction is record
+ sa_handler : System.Address;
+ sa_flags : int;
+ sa_mask : sigset_t;
+ end record;
+ pragma Convention (C, struct_sigaction);
+
+ type struct_sigaction_ptr is access all struct_sigaction;
+
+ SIG_BLOCK : constant := 0;
+ SIG_UNBLOCK : constant := 1;
+ SIG_SETMASK : constant := 2;
+ SIG_PENDING : constant := 5;
+
+ SA_NOCLDSTOP : constant := 16#0001#;
+ SA_SIGINFO : constant := 16#0002#;
+ SA_RESETHAND : constant := 16#0004#;
+ SA_ONSTACK : constant := 16#0008#;
+ SA_NODEFER : constant := 16#0010#;
+ SA_NOCLDWAIT : constant := 16#0020#;
+
+ SS_ONSTACK : constant := 1;
+ SS_DISABLE : constant := 2;
+
+ SIG_DFL : constant := 0;
+ SIG_IGN : constant := 1;
+
+ function sigaction
+ (sig : Signal;
+ act : struct_sigaction_ptr;
+ oact : struct_sigaction_ptr) return int;
+ pragma Import (C, sigaction, "sigaction");
+
+ ----------
+ -- Time --
+ ----------
+
+ Time_Slice_Supported : constant Boolean := True;
+ -- Indicates whether time slicing is supported
+
+ type timespec is private;
+
+ type clockid_t is new int;
+
+ function clock_gettime
+ (clock_id : clockid_t; tp : access timespec) return int;
+ pragma Import (C, clock_gettime, "clock_gettime");
+
+ function clock_getres
+ (clock_id : clockid_t;
+ res : access timespec) return int;
+ pragma Import (C, clock_getres, "clock_getres");
+
+ function To_Duration (TS : timespec) return Duration;
+ pragma Inline (To_Duration);
+
+ function To_Timespec (D : Duration) return timespec;
+ pragma Inline (To_Timespec);
+
+ -------------------------
+ -- Priority Scheduling --
+ -------------------------
+
+ SCHED_FIFO : constant := 1;
+ SCHED_RR : constant := 2;
+ SCHED_OTHER : constant := 3;
+
+ function To_Target_Priority
+ (Prio : System.Any_Priority) return Interfaces.C.int
+ with Inline_Always;
+ -- Maps System.Any_Priority to a POSIX priority
+
+ -------------
+ -- Process --
+ -------------
+
+ type pid_t is private;
+
+ function kill (pid : pid_t; sig : Signal) return int;
+ pragma Import (C, kill, "kill");
+
+ function getpid return pid_t;
+ pragma Import (C, getpid, "getpid");
+
+ -------------
+ -- Threads --
+ -------------
+
+ type Thread_Body is access
+ function (arg : System.Address) return System.Address;
+ pragma Convention (C, Thread_Body);
+
+ function Thread_Body_Access is new
+ Ada.Unchecked_Conversion (System.Address, Thread_Body);
+
+ type pthread_t is new int;
+ subtype Thread_Id is pthread_t;
+
+ type pthread_mutex_t is limited private;
+ type pthread_cond_t is limited private;
+ type pthread_attr_t is limited private;
+ type pthread_mutexattr_t is limited private;
+ type pthread_condattr_t is limited private;
+ type pthread_key_t is private;
+
+ PTHREAD_CREATE_DETACHED : constant := 1;
+
+ PTHREAD_SCOPE_PROCESS : constant := 4;
+ PTHREAD_SCOPE_SYSTEM : constant := 0;
+
+ PTHREAD_INHERIT_SCHED : constant := 0;
+ PTHREAD_EXPLICIT_SCHED : constant := 2;
+
+ -- Read/Write lock not supported on Android.
+
+ subtype pthread_rwlock_t is pthread_mutex_t;
+ subtype pthread_rwlockattr_t is pthread_mutexattr_t;
+
+ -----------
+ -- Stack --
+ -----------
+
+ type stack_t is record
+ ss_sp : System.Address;
+ ss_flags : int;
+ ss_size : size_t;
+ end record;
+ pragma Convention (C, stack_t);
+
+ function sigaltstack
+ (ss : not null access stack_t;
+ oss : access stack_t) return int
+ is (0);
+ -- Not supported on QNX
+
+ Alternate_Stack : aliased System.Address;
+ -- Dummy definition: alternate stack not available due to missing
+ -- sigaltstack in QNX
+
+ Alternate_Stack_Size : constant := 0;
+ -- This must be kept in sync with init.c:__gnat_alternate_stack
+
+ Stack_Base_Available : constant Boolean := False;
+ -- Indicates whether the stack base is available on this target
+
+ function Get_Stack_Base (thread : pthread_t) return System.Address
+ with Inline_Always;
+ -- This is a dummy procedure to share some GNULLI files
+
+ function Get_Page_Size return int;
+ pragma Import (C, Get_Page_Size, "getpagesize");
+ -- Returns the size of a page
+
+ PROT_NONE : constant := 16#00_00#;
+ PROT_READ : constant := 16#01_00#;
+ PROT_WRITE : constant := 16#02_00#;
+ PROT_EXEC : constant := 16#04_00#;
+ PROT_ALL : constant := PROT_READ + PROT_WRITE + PROT_EXEC;
+ PROT_ON : constant := PROT_READ;
+ PROT_OFF : constant := PROT_ALL;
+
+ function mprotect (addr : Address; len : size_t; prot : int) return int;
+ pragma Import (C, mprotect);
+
+ ---------------------------------------
+ -- Nonstandard Thread Initialization --
+ ---------------------------------------
+
+ procedure pthread_init with Inline_Always;
+
+ -------------------------
+ -- POSIX.1c Section 3 --
+ -------------------------
+
+ function sigwait (set : access sigset_t; sig : access Signal) return int;
+ pragma Import (C, sigwait, "sigwait");
+
+ function pthread_kill (thread : pthread_t; sig : Signal) return int;
+ pragma Import (C, pthread_kill, "pthread_kill");
+
+ function pthread_sigmask
+ (how : int;
+ set : access sigset_t;
+ oset : access sigset_t) return int;
+ pragma Import (C, pthread_sigmask, "pthread_sigmask");
+
+ --------------------------
+ -- POSIX.1c Section 11 --
+ --------------------------
+
+ function pthread_mutexattr_init
+ (attr : access pthread_mutexattr_t) return int;
+ pragma Import (C, pthread_mutexattr_init, "pthread_mutexattr_init");
+
+ function pthread_mutexattr_destroy
+ (attr : access pthread_mutexattr_t) return int;
+ pragma Import (C, pthread_mutexattr_destroy, "pthread_mutexattr_destroy");
+
+ function pthread_mutex_init
+ (mutex : access pthread_mutex_t;
+ attr : access pthread_mutexattr_t) return int;
+ pragma Import (C, pthread_mutex_init, "pthread_mutex_init");
+
+ function pthread_mutex_destroy (mutex : access pthread_mutex_t) return int;
+ pragma Import (C, pthread_mutex_destroy, "pthread_mutex_destroy");
+
+ function pthread_mutex_lock (mutex : access pthread_mutex_t) return int;
+ pragma Import (C, pthread_mutex_lock, "pthread_mutex_lock");
+
+ function pthread_mutex_unlock (mutex : access pthread_mutex_t) return int;
+ pragma Import (C, pthread_mutex_unlock, "pthread_mutex_unlock");
+
+ function pthread_mutex_setprioceiling
+ (mutex : access pthread_mutex_t;
+ prioceiling : int;
+ old_ceiling : access int) return int;
+ pragma Import (C, pthread_mutex_setprioceiling);
+
+ function pthread_condattr_init
+ (attr : access pthread_condattr_t) return int;
+ pragma Import (C, pthread_condattr_init, "pthread_condattr_init");
+
+ function pthread_condattr_destroy
+ (attr : access pthread_condattr_t) return int;
+ pragma Import (C, pthread_condattr_destroy, "pthread_condattr_destroy");
+
+ function pthread_cond_init
+ (cond : access pthread_cond_t;
+ attr : access pthread_condattr_t) return int;
+ pragma Import (C, pthread_cond_init, "pthread_cond_init");
+
+ function pthread_cond_destroy (cond : access pthread_cond_t) return int;
+ pragma Import (C, pthread_cond_destroy, "pthread_cond_destroy");
+
+ function pthread_cond_signal (cond : access pthread_cond_t) return int;
+ pragma Import (C, pthread_cond_signal, "pthread_cond_signal");
+
+ function pthread_cond_wait
+ (cond : access pthread_cond_t;
+ mutex : access pthread_mutex_t) return int;
+ pragma Import (C, pthread_cond_wait, "pthread_cond_wait");
+
+ function pthread_cond_timedwait
+ (cond : access pthread_cond_t;
+ mutex : access pthread_mutex_t;
+ abstime : access timespec) return int;
+ pragma Import (C, pthread_cond_timedwait, "pthread_cond_timedwait");
+
+ Relative_Timed_Wait : constant Boolean := False;
+ -- pthread_cond_timedwait requires an absolute delay time
+
+ --------------------------
+ -- POSIX.1c Section 13 --
+ --------------------------
+
+ PTHREAD_PRIO_INHERIT : constant := 0;
+ PTHREAD_PRIO_NONE : constant := 1;
+ PTHREAD_PRIO_PROTECT : constant := 2;
+
+ function pthread_mutexattr_setprotocol
+ (attr : access pthread_mutexattr_t;
+ protocol : int) return int;
+ pragma Import (C, pthread_mutexattr_setprotocol);
+
+ function pthread_mutexattr_getprotocol
+ (attr : access pthread_mutexattr_t;
+ protocol : access int) return int;
+ pragma Import (C, pthread_mutexattr_getprotocol);
+
+ function pthread_mutexattr_setprioceiling
+ (attr : access pthread_mutexattr_t;
+ prioceiling : int) return int;
+ pragma Import (C, pthread_mutexattr_setprioceiling);
+
+ function pthread_mutexattr_getprioceiling
+ (attr : access pthread_mutexattr_t;
+ prioceiling : access int) return int;
+ pragma Import (C, pthread_mutexattr_getprioceiling);
+
+ function pthread_mutex_getprioceiling
+ (attr : access pthread_mutex_t;
+ prioceiling : access int) return int;
+ pragma Import (C, pthread_mutex_getprioceiling);
+
+ type pad8 is array (1 .. 8) of int;
+ pragma Convention (C, pad8);
+
+ type struct_sched_param is record
+ sched_priority : int := 0; -- scheduling priority
+ sched_curpriority : int := 0;
+ reserved : pad8 := (others => 0);
+ end record;
+ pragma Convention (C, struct_sched_param);
+
+ function pthread_setschedparam
+ (thread : pthread_t;
+ policy : int;
+ param : access struct_sched_param) return int;
+ pragma Import (C, pthread_setschedparam, "pthread_setschedparam");
+
+ function pthread_getschedparam
+ (thread : pthread_t;
+ policy : access int;
+ param : access struct_sched_param) return int;
+ pragma Import (C, pthread_getschedparam, "pthread_getschedparam");
+
+ function pthread_setschedprio
+ (thread : pthread_t;
+ priority : int) return int;
+ pragma Import (C, pthread_setschedprio);
+
+ function pthread_attr_setschedparam
+ (attr : access pthread_attr_t;
+ param : access struct_sched_param) return int;
+ pragma Import (C, pthread_attr_setschedparam);
+
+ function pthread_attr_setinheritsched
+ (attr : access pthread_attr_t;
+ inheritsched : int) return int;
+ pragma Import (C, pthread_attr_setinheritsched);
+
+ function pthread_attr_setscope
+ (attr : access pthread_attr_t;
+ scope : int) return int;
+ pragma Import (C, pthread_attr_setscope, "pthread_attr_setscope");
+
+ function pthread_attr_setschedpolicy
+ (attr : access pthread_attr_t;
+ policy : int) return int;
+ pragma Import
+ (C, pthread_attr_setschedpolicy, "pthread_attr_setschedpolicy");
+
+ function sched_yield return int;
+ pragma Import (C, sched_yield, "sched_yield");
+
+ ---------------------------
+ -- P1003.1c - Section 16 --
+ ---------------------------
+
+ function pthread_attr_init
+ (attributes : access pthread_attr_t) return int;
+ pragma Import (C, pthread_attr_init, "pthread_attr_init");
+
+ function pthread_attr_destroy
+ (attributes : access pthread_attr_t) return int;
+ pragma Import (C, pthread_attr_destroy, "pthread_attr_destroy");
+
+ function pthread_attr_setdetachstate
+ (attr : access pthread_attr_t;
+ detachstate : int) return int;
+ pragma Import (C, pthread_attr_setdetachstate);
+
+ function pthread_attr_setstacksize
+ (attr : access pthread_attr_t;
+ stacksize : size_t) return int;
+ pragma Import (C, pthread_attr_setstacksize);
+
+ function pthread_create
+ (thread : access pthread_t;
+ attributes : access pthread_attr_t;
+ start_routine : Thread_Body;
+ arg : System.Address) return int;
+ pragma Import (C, pthread_create, "pthread_create");
+
+ procedure pthread_exit (status : System.Address);
+ pragma Import (C, pthread_exit, "pthread_exit");
+
+ function pthread_self return pthread_t;
+ pragma Import (C, pthread_self, "pthread_self");
+
+ function lwp_self return System.Address;
+ pragma Import (C, lwp_self, "pthread_self");
+
+ --------------------------
+ -- POSIX.1c Section 17 --
+ --------------------------
+
+ function pthread_setspecific
+ (key : pthread_key_t;
+ value : System.Address) return int;
+ pragma Import (C, pthread_setspecific, "pthread_setspecific");
+
+ function pthread_getspecific (key : pthread_key_t) return System.Address;
+ pragma Import (C, pthread_getspecific, "pthread_getspecific");
+
+ type destructor_pointer is access procedure (arg : System.Address);
+ pragma Convention (C, destructor_pointer);
+
+ function pthread_key_create
+ (key : access pthread_key_t;
+ destructor : destructor_pointer) return int;
+ pragma Import (C, pthread_key_create, "pthread_key_create");
+
+private
+
+ type sigset_t is array (1 .. 2) of Interfaces.Unsigned_32;
+ pragma Convention (C, sigset_t);
+
+ type pid_t is new int;
+
+ type time_t is new long;
+
+ type timespec is record
+ tv_sec : time_t;
+ tv_nsec : long;
+ end record;
+ pragma Convention (C, timespec);
+
+ type unsigned_long_long_t is mod 2 ** 64;
+ -- Local type only used to get the alignment of this type below
+
+ subtype char_array is Interfaces.C.char_array;
+
+ type pthread_attr_t is record
+ Data : char_array (1 .. OS_Constants.PTHREAD_ATTR_SIZE);
+ end record;
+ pragma Convention (C, pthread_attr_t);
+ for pthread_attr_t'Alignment use Interfaces.C.unsigned_long'Alignment;
+
+ type pthread_condattr_t is record
+ Data : char_array (1 .. OS_Constants.PTHREAD_CONDATTR_SIZE);
+ end record;
+ pragma Convention (C, pthread_condattr_t);
+ for pthread_condattr_t'Alignment use Interfaces.C.int'Alignment;
+
+ type pthread_mutexattr_t is record
+ Data : char_array (1 .. OS_Constants.PTHREAD_MUTEXATTR_SIZE);
+ end record;
+ pragma Convention (C, pthread_mutexattr_t);
+ for pthread_mutexattr_t'Alignment use Interfaces.C.int'Alignment;
+
+ type pthread_mutex_t is record
+ Data : char_array (1 .. OS_Constants.PTHREAD_MUTEX_SIZE);
+ end record;
+ pragma Convention (C, pthread_mutex_t);
+ for pthread_mutex_t'Alignment use Interfaces.C.unsigned_long'Alignment;
+
+ type pthread_cond_t is record
+ Data : char_array (1 .. OS_Constants.PTHREAD_COND_SIZE);
+ end record;
+ pragma Convention (C, pthread_cond_t);
+ for pthread_cond_t'Alignment use unsigned_long_long_t'Alignment;
+
+ type pthread_key_t is new int;
+
+end System.OS_Interface;
diff --git a/gcc/ada/libgnarl/s-qnx.ads b/gcc/ada/libgnarl/s-qnx.ads
new file mode 100644
index 00000000000..2097f778624
--- /dev/null
+++ b/gcc/ada/libgnarl/s-qnx.ads
@@ -0,0 +1,122 @@
+------------------------------------------------------------------------------
+-- --
+-- GNU ADA RUN-TIME LIBRARY (GNARL) COMPONENTS --
+-- --
+-- S Y S T E M . Q N X --
+-- --
+-- S p e c --
+-- --
+-- Copyright (C) 2017, Free Software Foundation, Inc. --
+-- --
+-- GNARL is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- --
+------------------------------------------------------------------------------
+
+-- This is the default version of this package
+
+-- This package encapsulates cpu specific differences between implementations
+-- of QNX, in order to share s-osinte-linux.ads.
+
+-- PLEASE DO NOT add any with-clauses to this package or remove the pragma
+-- Preelaborate. This package is designed to be a bottom-level (leaf) package
+
+with Interfaces.C;
+
+package System.QNX is
+ pragma Preelaborate;
+
+ ----------
+ -- Time --
+ ----------
+
+ subtype long is Interfaces.C.long;
+ subtype suseconds_t is Interfaces.C.long;
+ subtype time_t is Interfaces.C.long;
+ subtype clockid_t is Interfaces.C.int;
+
+ type timespec is record
+ tv_sec : time_t;
+ tv_nsec : long;
+ end record;
+ pragma Convention (C, timespec);
+
+ type timeval is record
+ tv_sec : time_t;
+ tv_usec : suseconds_t;
+ end record;
+ pragma Convention (C, timeval);
+
+ -----------
+ -- Errno --
+ -----------
+
+ EAGAIN : constant := 11;
+ EINTR : constant := 4;
+ EINVAL : constant := 22;
+ ENOMEM : constant := 12;
+ EPERM : constant := 1;
+ ETIMEDOUT : constant := 110;
+
+ -------------
+ -- Signals --
+ -------------
+
+ SIGHUP : constant := 1; -- hangup
+ SIGINT : constant := 2; -- interrupt (rubout)
+ SIGQUIT : constant := 3; -- quit (ASCD FS)
+ SIGILL : constant := 4; -- illegal instruction (not reset)
+ SIGTRAP : constant := 5; -- trace trap (not reset)
+ SIGIOT : constant := 6; -- IOT instruction
+ SIGABRT : constant := 6; -- used by abort, replace SIGIOT in the future
+ SIGEMT : constant := 7; -- EMT instruction
+ SIGDEADLK : constant := 7; -- Mutex deadlock
+ SIGFPE : constant := 8; -- floating point exception
+ SIGKILL : constant := 9; -- kill (cannot be caught or ignored)
+ SIGSEGV : constant := 11; -- segmentation violation
+ SIGPIPE : constant := 13; -- write on a pipe with no one to read it
+ SIGALRM : constant := 14; -- alarm clock
+ SIGTERM : constant := 15; -- software termination signal from kill
+ SIGUSR1 : constant := 16; -- user defined signal 1
+ SIGUSR2 : constant := 17; -- user defined signal 2
+ SIGCHLD : constant := 18; -- child status change
+ SIGCLD : constant := 18; -- alias for SIGCHLD
+ SIGPWR : constant := 19; -- power-fail restart
+ SIGWINCH : constant := 20; -- window size change
+ SIGURG : constant := 21; -- urgent condition on IO channel
+ SIGPOLL : constant := 22; -- pollable event occurred
+ SIGIO : constant := 22; -- I/O now possible (4.2 BSD)
+ SIGSTOP : constant := 23; -- stop (cannot be caught or ignored)
+ SIGTSTP : constant := 24; -- user stop requested from tty
+ SIGCONT : constant := 25; -- stopped process has been continued
+ SIGTTIN : constant := 26; -- background tty read attempted
+ SIGTTOU : constant := 27; -- background tty write attempted
+ SIGVTALRM : constant := 28; -- virtual timer expired
+ SIGPROF : constant := 29; -- profiling timer expired
+ SIGXCPU : constant := 30; -- CPU time limit exceeded
+ SIGXFSZ : constant := 31; -- filesize limit exceeded
+
+ -- struct_sigaction offsets
+
+ sa_handler_pos : constant := 0;
+ sa_mask_pos : constant := Standard'Address_Size / 8;
+ sa_flags_pos : constant := 128 + sa_mask_pos;
+
+ SA_SIGINFO : constant := 16#04#;
+ SA_ONSTACK : constant := 16#08000000#;
+
+end System.QNX;
diff --git a/gcc/ada/libgnarl/s-taprop__linux.adb b/gcc/ada/libgnarl/s-taprop__linux.adb
index 5da10824a15..2efdc978ff2 100644
--- a/gcc/ada/libgnarl/s-taprop__linux.adb
+++ b/gcc/ada/libgnarl/s-taprop__linux.adb
@@ -141,9 +141,9 @@ package body System.Task_Primitives.Operations is
function Monotonic_Clock return Duration;
pragma Inline (Monotonic_Clock);
- -- Returns "absolute" time, represented as an offset relative to "the
- -- Epoch", which is Jan 1, 1970. This clock implementation is immune to
- -- the system's clock changes.
+ -- Returns an absolute time, represented as an offset relative to some
+ -- unspecified starting point, typically system boot time. This clock is
+ -- not affected by discontinuous jumps in the system time.
function RT_Resolution return Duration;
pragma Inline (RT_Resolution);
diff --git a/gcc/ada/libgnarl/s-taprop__mingw.adb b/gcc/ada/libgnarl/s-taprop__mingw.adb
index b14444ad185..8517bbe86ec 100644
--- a/gcc/ada/libgnarl/s-taprop__mingw.adb
+++ b/gcc/ada/libgnarl/s-taprop__mingw.adb
@@ -796,7 +796,17 @@ package body System.Task_Primitives.Operations is
raise Invalid_CPU_Number;
end if;
- Self_ID.Common.LL.Thread := GetCurrentThread;
+ -- Initialize the thread here only if not set. This is done for a
+ -- foreign task but is not needed when a real thread-id is already
+ -- set in Create_Task. Note that we do want to keep the real thread-id
+ -- as it is the only way to free the associated resource. Another way
+ -- to say this is that a pseudo thread-id from a foreign thread won't
+ -- allow for freeing resources.
+
+ if Self_ID.Common.LL.Thread = Null_Thread_Id then
+ Self_ID.Common.LL.Thread := GetCurrentThread;
+ end if;
+
Self_ID.Common.LL.Thread_Id := GetCurrentThreadId;
Get_Stack_Bounds
@@ -976,7 +986,7 @@ package body System.Task_Primitives.Operations is
Known_Tasks (T.Known_Tasks_Index) := null;
end if;
- if T.Common.LL.Thread /= 0 then
+ if T.Common.LL.Thread /= Null_Thread_Id then
-- This task has been activated. Close the thread handle. This
-- is needed to release system resources.
diff --git a/gcc/ada/libgnarl/s-taprop__posix.adb b/gcc/ada/libgnarl/s-taprop__posix.adb
index d9ee078b364..b1d619f16b5 100644
--- a/gcc/ada/libgnarl/s-taprop__posix.adb
+++ b/gcc/ada/libgnarl/s-taprop__posix.adb
@@ -149,9 +149,9 @@ package body System.Task_Primitives.Operations is
function Monotonic_Clock return Duration;
pragma Inline (Monotonic_Clock);
- -- Returns "absolute" time, represented as an offset relative to "the
- -- Epoch", which is Jan 1, 1970. This clock implementation is immune to
- -- the system's clock changes.
+ -- Returns an absolute time, represented as an offset relative to some
+ -- unspecified starting point, typically system boot time. This clock
+ -- is not affected by discontinuous jumps in the system time.
function RT_Resolution return Duration;
pragma Inline (RT_Resolution);
diff --git a/gcc/ada/libgnarl/s-taprop__qnx.adb b/gcc/ada/libgnarl/s-taprop__qnx.adb
new file mode 100644
index 00000000000..4ec033046c5
--- /dev/null
+++ b/gcc/ada/libgnarl/s-taprop__qnx.adb
@@ -0,0 +1,1355 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT RUN-TIME LIBRARY (GNARL) COMPONENTS --
+-- --
+-- S Y S T E M . T A S K _ P R I M I T I V E S . O P E R A T I O N S --
+-- --
+-- B o d y --
+-- --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
+-- --
+-- GNARL is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNARL was developed by the GNARL team at Florida State University. --
+-- Extensive contributions were provided by Ada Core Technologies, Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This is a POSIX-like version of this package
+
+-- This package contains all the GNULL primitives that interface directly with
+-- the underlying OS.
+
+-- Note: this file can only be used for POSIX compliant systems that implement
+-- SCHED_FIFO and Ceiling Locking correctly.
+
+-- For configurations where SCHED_FIFO and priority ceiling are not a
+-- requirement, this file can also be used (e.g AiX threads)
+
+pragma Polling (Off);
+-- Turn off polling, we do not want ATC polling to take place during tasking
+-- operations. It causes infinite loops and other problems.
+
+with Ada.Unchecked_Conversion;
+
+with Interfaces.C;
+
+with System.Tasking.Debug;
+with System.Interrupt_Management;
+with System.OS_Constants;
+with System.OS_Primitives;
+with System.Task_Info;
+
+with System.Soft_Links;
+-- We use System.Soft_Links instead of System.Tasking.Initialization
+-- because the later is a higher level package that we shouldn't depend on.
+-- For example when using the restricted run time, it is replaced by
+-- System.Tasking.Restricted.Stages.
+
+package body System.Task_Primitives.Operations is
+
+ package OSC renames System.OS_Constants;
+ package SSL renames System.Soft_Links;
+
+ use System.Tasking.Debug;
+ use System.Tasking;
+ use Interfaces.C;
+ use System.OS_Interface;
+ use System.Parameters;
+ use System.OS_Primitives;
+
+ ----------------
+ -- Local Data --
+ ----------------
+
+ -- The followings are logically constants, but need to be initialized
+ -- at run time.
+
+ Single_RTS_Lock : aliased RTS_Lock;
+ -- This is a lock to allow only one thread of control in the RTS at
+ -- a time; it is used to execute in mutual exclusion from all other tasks.
+ -- Used mainly in Single_Lock mode, but also to protect All_Tasks_List
+
+ Environment_Task_Id : Task_Id;
+ -- A variable to hold Task_Id for the environment task
+
+ Locking_Policy : Character;
+ pragma Import (C, Locking_Policy, "__gl_locking_policy");
+ -- Value of the pragma Locking_Policy:
+ -- 'C' for Ceiling_Locking
+ -- 'I' for Inherit_Locking
+ -- ' ' for none.
+
+ Unblocked_Signal_Mask : aliased sigset_t;
+ -- The set of signals that should unblocked in all tasks
+
+ -- The followings are internal configuration constants needed
+
+ Next_Serial_Number : Task_Serial_Number := 100;
+ -- We start at 100, to reserve some special values for
+ -- using in error checking.
+
+ Time_Slice_Val : Integer;
+ pragma Import (C, Time_Slice_Val, "__gl_time_slice_val");
+
+ Dispatching_Policy : Character;
+ pragma Import (C, Dispatching_Policy, "__gl_task_dispatching_policy");
+
+ Foreign_Task_Elaborated : aliased Boolean := True;
+ -- Used to identified fake tasks (i.e., non-Ada Threads)
+
+ Use_Alternate_Stack : constant Boolean := Alternate_Stack_Size /= 0;
+ -- Whether to use an alternate signal stack for stack overflows
+
+ Abort_Handler_Installed : Boolean := False;
+ -- True if a handler for the abort signal is installed
+
+ type RTS_Lock_Ptr is not null access all RTS_Lock;
+
+ function Init_Mutex (L : RTS_Lock_Ptr; Prio : Any_Priority) return int;
+ -- Initialize the mutex L. If Ceiling_Support is True, then set the ceiling
+ -- to Prio. Returns 0 for success, or ENOMEM for out-of-memory.
+
+ function Get_Policy (Prio : System.Any_Priority) return Character;
+ pragma Import (C, Get_Policy, "__gnat_get_specific_dispatching");
+ -- Get priority specific dispatching policy
+
+ --------------------
+ -- Local Packages --
+ --------------------
+
+ package Specific is
+
+ procedure Initialize (Environment_Task : Task_Id);
+ pragma Inline (Initialize);
+ -- Initialize various data needed by this package
+
+ function Is_Valid_Task return Boolean;
+ pragma Inline (Is_Valid_Task);
+ -- Does executing thread have a TCB?
+
+ procedure Set (Self_Id : Task_Id);
+ pragma Inline (Set);
+ -- Set the self id for the current task
+
+ function Self return Task_Id;
+ pragma Inline (Self);
+ -- Return a pointer to the Ada Task Control Block of the calling task
+
+ end Specific;
+
+ package body Specific is separate;
+ -- The body of this package is target specific
+
+ package Monotonic is
+
+ function Monotonic_Clock return Duration;
+ pragma Inline (Monotonic_Clock);
+ -- Returns an absolute time, represented as an offset relative to some
+ -- unspecified starting point, typically system boot time. This clock
+ -- is not affected by discontinuous jumps in the system time.
+
+ function RT_Resolution return Duration;
+ pragma Inline (RT_Resolution);
+ -- Returns resolution of the underlying clock used to implement RT_Clock
+
+ procedure Timed_Sleep
+ (Self_ID : ST.Task_Id;
+ Time : Duration;
+ Mode : ST.Delay_Modes;
+ Reason : System.Tasking.Task_States;
+ Timedout : out Boolean;
+ Yielded : out Boolean);
+ -- Combination of Sleep (above) and Timed_Delay
+
+ procedure Timed_Delay
+ (Self_ID : ST.Task_Id;
+ Time : Duration;
+ Mode : ST.Delay_Modes);
+ -- Implement the semantics of the delay statement.
+ -- The caller should be abort-deferred and should not hold any locks.
+
+ end Monotonic;
+
+ package body Monotonic is separate;
+
+ ----------------------------------
+ -- ATCB allocation/deallocation --
+ ----------------------------------
+
+ package body ATCB_Allocation is separate;
+ -- The body of this package is shared across several targets
+
+ ---------------------------------
+ -- Support for foreign threads --
+ ---------------------------------
+
+ function Register_Foreign_Thread
+ (Thread : Thread_Id;
+ Sec_Stack_Size : Size_Type := Unspecified_Size) return Task_Id;
+ -- Allocate and initialize a new ATCB for the current Thread. The size of
+ -- the secondary stack can be optionally specified.
+
+ function Register_Foreign_Thread
+ (Thread : Thread_Id;
+ Sec_Stack_Size : Size_Type := Unspecified_Size)
+ return Task_Id is separate;
+
+ -----------------------
+ -- Local Subprograms --
+ -----------------------
+
+ procedure Abort_Handler (Sig : Signal);
+ -- Signal handler used to implement asynchronous abort.
+ -- See also comment before body, below.
+
+ function To_Address is
+ new Ada.Unchecked_Conversion (Task_Id, System.Address);
+
+ function GNAT_pthread_condattr_setup
+ (attr : access pthread_condattr_t) return int;
+ pragma Import (C,
+ GNAT_pthread_condattr_setup, "__gnat_pthread_condattr_setup");
+
+ -------------------
+ -- Abort_Handler --
+ -------------------
+
+ -- Target-dependent binding of inter-thread Abort signal to the raising of
+ -- the Abort_Signal exception.
+
+ -- The technical issues and alternatives here are essentially the
+ -- same as for raising exceptions in response to other signals
+ -- (e.g. Storage_Error). See code and comments in the package body
+ -- System.Interrupt_Management.
+
+ -- Some implementations may not allow an exception to be propagated out of
+ -- a handler, and others might leave the signal or interrupt that invoked
+ -- this handler masked after the exceptional return to the application
+ -- code.
+
+ -- GNAT exceptions are originally implemented using setjmp()/longjmp(). On
+ -- most UNIX systems, this will allow transfer out of a signal handler,
+ -- which is usually the only mechanism available for implementing
+ -- asynchronous handlers of this kind. However, some systems do not
+ -- restore the signal mask on longjmp(), leaving the abort signal masked.
+
+ procedure Abort_Handler (Sig : Signal) is
+ pragma Unreferenced (Sig);
+
+ T : constant Task_Id := Self;
+ Old_Set : aliased sigset_t;
+
+ Result : Interfaces.C.int;
+ pragma Warnings (Off, Result);
+
+ begin
+ -- It's not safe to raise an exception when using GCC ZCX mechanism.
+ -- Note that we still need to install a signal handler, since in some
+ -- cases (e.g. shutdown of the Server_Task in System.Interrupts) we
+ -- need to send the Abort signal to a task.
+
+ if ZCX_By_Default then
+ return;
+ end if;
+
+ if T.Deferral_Level = 0
+ and then T.Pending_ATC_Level < T.ATC_Nesting_Level and then
+ not T.Aborting
+ then
+ T.Aborting := True;
+
+ -- Make sure signals used for RTS internal purpose are unmasked
+
+ Result := pthread_sigmask (SIG_UNBLOCK,
+ Unblocked_Signal_Mask'Access, Old_Set'Access);
+ pragma Assert (Result = 0);
+
+ raise Standard'Abort_Signal;
+ end if;
+ end Abort_Handler;
+
+ -----------------
+ -- Stack_Guard --
+ -----------------
+
+ procedure Stack_Guard (T : ST.Task_Id; On : Boolean) is
+ Stack_Base : constant Address := Get_Stack_Base (T.Common.LL.Thread);
+ Page_Size : Address;
+ Res : Interfaces.C.int;
+
+ begin
+ if Stack_Base_Available then
+
+ -- Compute the guard page address
+
+ Page_Size := Address (Get_Page_Size);
+ Res :=
+ mprotect
+ (Stack_Base - (Stack_Base mod Page_Size) + Page_Size,
+ size_t (Page_Size),
+ prot => (if On then PROT_ON else PROT_OFF));
+ pragma Assert (Res = 0);
+ end if;
+ end Stack_Guard;
+
+ --------------------
+ -- Get_Thread_Id --
+ --------------------
+
+ function Get_Thread_Id (T : ST.Task_Id) return OSI.Thread_Id is
+ begin
+ return T.Common.LL.Thread;
+ end Get_Thread_Id;
+
+ ----------
+ -- Self --
+ ----------
+
+ function Self return Task_Id renames Specific.Self;
+
+ ----------------
+ -- Init_Mutex --
+ ----------------
+
+ function Init_Mutex (L : RTS_Lock_Ptr; Prio : Any_Priority) return int
+ is
+ Attributes : aliased pthread_mutexattr_t;
+ Result : int;
+ Result_2 : aliased int;
+
+ begin
+ Result := pthread_mutexattr_init (Attributes'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result = ENOMEM then
+ return Result;
+ end if;
+
+ if Locking_Policy = 'C' then
+ Result := pthread_mutexattr_setprotocol
+ (Attributes'Access, PTHREAD_PRIO_PROTECT);
+ pragma Assert (Result = 0);
+
+ Result := pthread_mutexattr_getprotocol
+ (Attributes'Access, Result_2'Access);
+ if Result_2 /= PTHREAD_PRIO_PROTECT then
+ raise Program_Error with "setprotocol failed";
+ end if;
+
+ Result := pthread_mutexattr_setprioceiling
+ (Attributes'Access, To_Target_Priority (Prio));
+ pragma Assert (Result = 0);
+
+ elsif Locking_Policy = 'I' then
+ Result := pthread_mutexattr_setprotocol
+ (Attributes'Access, PTHREAD_PRIO_INHERIT);
+ pragma Assert (Result = 0);
+ end if;
+
+ Result := pthread_mutex_init (L, Attributes'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ Result_2 := pthread_mutexattr_destroy (Attributes'Access);
+ pragma Assert (Result_2 = 0);
+
+ return Result;
+ end Init_Mutex;
+
+ ---------------------
+ -- Initialize_Lock --
+ ---------------------
+
+ -- Note: mutexes and cond_variables needed per-task basis are initialized
+ -- in Initialize_TCB and the Storage_Error is handled. Other mutexes (such
+ -- as RTS_Lock, Memory_Lock...) used in RTS is initialized before any
+ -- status change of RTS. Therefore raising Storage_Error in the following
+ -- routines should be able to be handled safely.
+
+ procedure Initialize_Lock
+ (Prio : System.Any_Priority;
+ L : not null access Lock)
+ is
+ begin
+ if Init_Mutex (L.WO'Access, Prio) = ENOMEM then
+ raise Storage_Error with "Failed to allocate a lock";
+ end if;
+ end Initialize_Lock;
+
+ procedure Initialize_Lock
+ (L : not null access RTS_Lock; Level : Lock_Level)
+ is
+ pragma Unreferenced (Level);
+
+ begin
+ if Init_Mutex (L.all'Access, Any_Priority'Last) = ENOMEM then
+ raise Storage_Error with "Failed to allocate a lock";
+ end if;
+ end Initialize_Lock;
+
+ -------------------
+ -- Finalize_Lock --
+ -------------------
+
+ procedure Finalize_Lock (L : not null access Lock) is
+ Result : Interfaces.C.int;
+ begin
+ Result := pthread_mutex_destroy (L.WO'Access);
+ pragma Assert (Result = 0);
+ end Finalize_Lock;
+
+ procedure Finalize_Lock (L : not null access RTS_Lock) is
+ Result : Interfaces.C.int;
+ begin
+ Result := pthread_mutex_destroy (L);
+ pragma Assert (Result = 0);
+ end Finalize_Lock;
+
+ ----------------
+ -- Write_Lock --
+ ----------------
+
+ procedure Write_Lock
+ (L : not null access Lock; Ceiling_Violation : out Boolean)
+ is
+ Self : constant pthread_t := pthread_self;
+ Result : int;
+ Policy : aliased int;
+ Ceiling : aliased int;
+ Sched : aliased struct_sched_param;
+
+ begin
+ Result := pthread_mutex_lock (L.WO'Access);
+
+ -- The cause of EINVAL is a priority ceiling violation
+
+ Ceiling_Violation := Result = EINVAL;
+ pragma Assert (Result = 0 or else Ceiling_Violation);
+
+ -- Workaround bug in QNX on ceiling locks: tasks with priority higher
+ -- than the ceiling priority don't receive EINVAL upon trying to lock.
+ if Result = 0 then
+ Result := pthread_getschedparam (Self, Policy'Access, Sched'Access);
+ pragma Assert (Result = 0);
+ Result := pthread_mutex_getprioceiling (L.WO'Access, Ceiling'Access);
+ pragma Assert (Result = 0);
+
+ -- Ceiling = 0 means no Ceiling Priority policy is set on this mutex
+ -- Else, Ceiling < current priority means Ceiling violation
+ -- (otherwise the current priority == ceiling)
+ if Ceiling > 0 and then Ceiling < Sched.sched_curpriority then
+ Ceiling_Violation := True;
+ Result := pthread_mutex_unlock (L.WO'Access);
+ pragma Assert (Result = 0);
+ end if;
+ end if;
+ end Write_Lock;
+
+ procedure Write_Lock
+ (L : not null access RTS_Lock;
+ Global_Lock : Boolean := False)
+ is
+ Result : Interfaces.C.int;
+ begin
+ if not Single_Lock or else Global_Lock then
+ Result := pthread_mutex_lock (L);
+ pragma Assert (Result = 0);
+ end if;
+ end Write_Lock;
+
+ procedure Write_Lock (T : Task_Id) is
+ Result : Interfaces.C.int;
+ begin
+ if not Single_Lock then
+ Result := pthread_mutex_lock (T.Common.LL.L'Access);
+ pragma Assert (Result = 0);
+ end if;
+ end Write_Lock;
+
+ ---------------
+ -- Read_Lock --
+ ---------------
+
+ procedure Read_Lock
+ (L : not null access Lock; Ceiling_Violation : out Boolean) is
+ begin
+ Write_Lock (L, Ceiling_Violation);
+ end Read_Lock;
+
+ ------------
+ -- Unlock --
+ ------------
+
+ procedure Unlock (L : not null access Lock) is
+ Result : Interfaces.C.int;
+ begin
+ Result := pthread_mutex_unlock (L.WO'Access);
+ pragma Assert (Result = 0);
+ end Unlock;
+
+ procedure Unlock
+ (L : not null access RTS_Lock; Global_Lock : Boolean := False)
+ is
+ Result : Interfaces.C.int;
+ begin
+ if not Single_Lock or else Global_Lock then
+ Result := pthread_mutex_unlock (L);
+ pragma Assert (Result = 0);
+ end if;
+ end Unlock;
+
+ procedure Unlock (T : Task_Id) is
+ Result : Interfaces.C.int;
+ begin
+ if not Single_Lock then
+ Result := pthread_mutex_unlock (T.Common.LL.L'Access);
+ pragma Assert (Result = 0);
+ end if;
+ end Unlock;
+
+ -----------------
+ -- Set_Ceiling --
+ -----------------
+
+ procedure Set_Ceiling
+ (L : not null access Lock;
+ Prio : System.Any_Priority)
+ is
+ Result : Interfaces.C.int;
+ begin
+ Result := pthread_mutex_setprioceiling
+ (L.WO'Access, To_Target_Priority (Prio), null);
+ pragma Assert (Result = 0);
+ end Set_Ceiling;
+
+ -----------
+ -- Sleep --
+ -----------
+
+ procedure Sleep
+ (Self_ID : Task_Id;
+ Reason : System.Tasking.Task_States)
+ is
+ pragma Unreferenced (Reason);
+
+ Result : Interfaces.C.int;
+
+ begin
+ Result :=
+ pthread_cond_wait
+ (cond => Self_ID.Common.LL.CV'Access,
+ mutex => (if Single_Lock
+ then Single_RTS_Lock'Access
+ else Self_ID.Common.LL.L'Access));
+
+ -- EINTR is not considered a failure
+
+ pragma Assert (Result = 0 or else Result = EINTR);
+ end Sleep;
+
+ -----------------
+ -- Timed_Sleep --
+ -----------------
+
+ -- This is for use within the run-time system, so abort is
+ -- assumed to be already deferred, and the caller should be
+ -- holding its own ATCB lock.
+
+ procedure Timed_Sleep
+ (Self_ID : Task_Id;
+ Time : Duration;
+ Mode : ST.Delay_Modes;
+ Reason : Task_States;
+ Timedout : out Boolean;
+ Yielded : out Boolean) renames Monotonic.Timed_Sleep;
+
+ -----------------
+ -- Timed_Delay --
+ -----------------
+
+ -- This is for use in implementing delay statements, so we assume the
+ -- caller is abort-deferred but is holding no locks.
+
+ procedure Timed_Delay
+ (Self_ID : Task_Id;
+ Time : Duration;
+ Mode : ST.Delay_Modes) renames Monotonic.Timed_Delay;
+
+ ---------------------
+ -- Monotonic_Clock --
+ ---------------------
+
+ function Monotonic_Clock return Duration renames Monotonic.Monotonic_Clock;
+
+ -------------------
+ -- RT_Resolution --
+ -------------------
+
+ function RT_Resolution return Duration renames Monotonic.RT_Resolution;
+
+ ------------
+ -- Wakeup --
+ ------------
+
+ procedure Wakeup (T : Task_Id; Reason : System.Tasking.Task_States) is
+ pragma Unreferenced (Reason);
+ Result : Interfaces.C.int;
+ begin
+ Result := pthread_cond_signal (T.Common.LL.CV'Access);
+ pragma Assert (Result = 0);
+ end Wakeup;
+
+ -----------
+ -- Yield --
+ -----------
+
+ procedure Yield (Do_Yield : Boolean := True) is
+ Result : Interfaces.C.int;
+ pragma Unreferenced (Result);
+ begin
+ if Do_Yield then
+ Result := sched_yield;
+ end if;
+ end Yield;
+
+ ------------------
+ -- Set_Priority --
+ ------------------
+
+ procedure Set_Priority
+ (T : Task_Id;
+ Prio : System.Any_Priority;
+ Loss_Of_Inheritance : Boolean := False)
+ is
+ pragma Unreferenced (Loss_Of_Inheritance);
+ Result : Interfaces.C.int;
+ Old : constant System.Any_Priority := T.Common.Current_Priority;
+
+ begin
+ T.Common.Current_Priority := Prio;
+ Result := pthread_setschedprio
+ (T.Common.LL.Thread, To_Target_Priority (Prio));
+ pragma Assert (Result = 0);
+
+ if T.Common.LL.Thread = pthread_self
+ and then Old > Prio
+ then
+ -- When lowering the priority via a pthread_setschedprio, QNX ensures
+ -- that the running thread remains in the head of the FIFO for tne
+ -- new priority. Annex D expects the thread to be requeued so let's
+ -- yield to the other threads of the same priority.
+ Result := sched_yield;
+ pragma Assert (Result = 0);
+ end if;
+ end Set_Priority;
+
+ ------------------
+ -- Get_Priority --
+ ------------------
+
+ function Get_Priority (T : Task_Id) return System.Any_Priority is
+ begin
+ return T.Common.Current_Priority;
+ end Get_Priority;
+
+ ----------------
+ -- Enter_Task --
+ ----------------
+
+ procedure Enter_Task (Self_ID : Task_Id) is
+ begin
+ Self_ID.Common.LL.Thread := pthread_self;
+ Self_ID.Common.LL.LWP := lwp_self;
+
+ Specific.Set (Self_ID);
+
+ if Use_Alternate_Stack then
+ declare
+ Stack : aliased stack_t;
+ Result : Interfaces.C.int;
+ begin
+ Stack.ss_sp := Self_ID.Common.Task_Alternate_Stack;
+ Stack.ss_size := Alternate_Stack_Size;
+ Stack.ss_flags := 0;
+ Result := sigaltstack (Stack'Access, null);
+ pragma Assert (Result = 0);
+ end;
+ end if;
+ end Enter_Task;
+
+ -------------------
+ -- Is_Valid_Task --
+ -------------------
+
+ function Is_Valid_Task return Boolean renames Specific.Is_Valid_Task;
+
+ -----------------------------
+ -- Register_Foreign_Thread --
+ -----------------------------
+
+ function Register_Foreign_Thread return Task_Id is
+ begin
+ if Is_Valid_Task then
+ return Self;
+ else
+ return Register_Foreign_Thread (pthread_self);
+ end if;
+ end Register_Foreign_Thread;
+
+ --------------------
+ -- Initialize_TCB --
+ --------------------
+
+ procedure Initialize_TCB (Self_ID : Task_Id; Succeeded : out Boolean)
+ is
+ Result : Interfaces.C.int;
+ Cond_Attr : aliased pthread_condattr_t;
+
+ begin
+ -- Give the task a unique serial number
+
+ Self_ID.Serial_Number := Next_Serial_Number;
+ Next_Serial_Number := Next_Serial_Number + 1;
+ pragma Assert (Next_Serial_Number /= 0);
+
+ if not Single_Lock then
+ Result := Init_Mutex (Self_ID.Common.LL.L'Access, Any_Priority'Last);
+ pragma Assert (Result = 0);
+
+ if Result /= 0 then
+ Succeeded := False;
+ return;
+ end if;
+ end if;
+
+ Result := pthread_condattr_init (Cond_Attr'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result = 0 then
+ Result := GNAT_pthread_condattr_setup (Cond_Attr'Access);
+ pragma Assert (Result = 0);
+
+ Result :=
+ pthread_cond_init
+ (Self_ID.Common.LL.CV'Access, Cond_Attr'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+ end if;
+
+ if Result = 0 then
+ Succeeded := True;
+ else
+ if not Single_Lock then
+ Result := pthread_mutex_destroy (Self_ID.Common.LL.L'Access);
+ pragma Assert (Result = 0);
+ end if;
+
+ Succeeded := False;
+ end if;
+
+ Result := pthread_condattr_destroy (Cond_Attr'Access);
+ pragma Assert (Result = 0);
+ end Initialize_TCB;
+
+ -----------------
+ -- Create_Task --
+ -----------------
+
+ procedure Create_Task
+ (T : Task_Id;
+ Wrapper : System.Address;
+ Stack_Size : System.Parameters.Size_Type;
+ Priority : System.Any_Priority;
+ Succeeded : out Boolean)
+ is
+ Attributes : aliased pthread_attr_t;
+ Adjusted_Stack_Size : Interfaces.C.size_t;
+ Page_Size : constant Interfaces.C.size_t :=
+ Interfaces.C.size_t (Get_Page_Size);
+ Sched_Param : aliased struct_sched_param;
+ Result : Interfaces.C.int;
+
+ Priority_Specific_Policy : constant Character := Get_Policy (Priority);
+ -- Upper case first character of the policy name corresponding to the
+ -- task as set by a Priority_Specific_Dispatching pragma.
+
+ function Thread_Body_Access is new
+ Ada.Unchecked_Conversion (System.Address, Thread_Body);
+
+ begin
+ Adjusted_Stack_Size :=
+ Interfaces.C.size_t (Stack_Size + Alternate_Stack_Size);
+
+ if Stack_Base_Available then
+
+ -- If Stack Checking is supported then allocate 2 additional pages:
+
+ -- In the worst case, stack is allocated at something like
+ -- N * Get_Page_Size - epsilon, we need to add the size for 2 pages
+ -- to be sure the effective stack size is greater than what
+ -- has been asked.
+
+ Adjusted_Stack_Size := Adjusted_Stack_Size + 2 * Page_Size;
+ end if;
+
+ -- Round stack size as this is required by some OSes (Darwin)
+
+ Adjusted_Stack_Size := Adjusted_Stack_Size + Page_Size - 1;
+ Adjusted_Stack_Size :=
+ Adjusted_Stack_Size - Adjusted_Stack_Size mod Page_Size;
+
+ Result := pthread_attr_init (Attributes'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result /= 0 then
+ Succeeded := False;
+ return;
+ end if;
+
+ Result :=
+ pthread_attr_setdetachstate
+ (Attributes'Access, PTHREAD_CREATE_DETACHED);
+ pragma Assert (Result = 0);
+
+ Result :=
+ pthread_attr_setstacksize
+ (Attributes'Access, Adjusted_Stack_Size);
+ pragma Assert (Result = 0);
+
+ -- Set thread priority
+ T.Common.Current_Priority := Priority;
+ Sched_Param.sched_priority := To_Target_Priority (Priority);
+
+ Result := pthread_attr_setinheritsched
+ (Attributes'Access, PTHREAD_EXPLICIT_SCHED);
+ pragma Assert (Result = 0);
+
+ Result := pthread_attr_setschedparam
+ (Attributes'Access, Sched_Param'Access);
+ pragma Assert (Result = 0);
+
+ if Time_Slice_Supported
+ and then (Dispatching_Policy = 'R'
+ or else Priority_Specific_Policy = 'R'
+ or else Time_Slice_Val > 0)
+ then
+ Result := pthread_attr_setschedpolicy
+ (Attributes'Access, SCHED_RR);
+
+ elsif Dispatching_Policy = 'F'
+ or else Priority_Specific_Policy = 'F'
+ or else Time_Slice_Val = 0
+ then
+ Result := pthread_attr_setschedpolicy
+ (Attributes'Access, SCHED_FIFO);
+
+ else
+ Result := pthread_attr_setschedpolicy
+ (Attributes'Access, SCHED_OTHER);
+ end if;
+
+ pragma Assert (Result = 0);
+
+ -- Since the initial signal mask of a thread is inherited from the
+ -- creator, and the Environment task has all its signals masked, we
+ -- do not need to manipulate caller's signal mask at this point.
+ -- All tasks in RTS will have All_Tasks_Mask initially.
+
+ -- Note: the use of Unrestricted_Access in the following call is needed
+ -- because otherwise we have an error of getting a access-to-volatile
+ -- value which points to a non-volatile object. But in this case it is
+ -- safe to do this, since we know we have no problems with aliasing and
+ -- Unrestricted_Access bypasses this check.
+
+ Result := pthread_create
+ (T.Common.LL.Thread'Unrestricted_Access,
+ Attributes'Access,
+ Thread_Body_Access (Wrapper),
+ To_Address (T));
+ pragma Assert (Result = 0 or else Result = EAGAIN);
+
+ Succeeded := Result = 0;
+
+ Result := pthread_attr_destroy (Attributes'Access);
+ pragma Assert (Result = 0);
+ end Create_Task;
+
+ ------------------
+ -- Finalize_TCB --
+ ------------------
+
+ procedure Finalize_TCB (T : Task_Id) is
+ Result : Interfaces.C.int;
+
+ begin
+ if not Single_Lock then
+ Result := pthread_mutex_destroy (T.Common.LL.L'Access);
+ pragma Assert (Result = 0);
+ end if;
+
+ Result := pthread_cond_destroy (T.Common.LL.CV'Access);
+ pragma Assert (Result = 0);
+
+ if T.Known_Tasks_Index /= -1 then
+ Known_Tasks (T.Known_Tasks_Index) := null;
+ end if;
+
+ ATCB_Allocation.Free_ATCB (T);
+ end Finalize_TCB;
+
+ ---------------
+ -- Exit_Task --
+ ---------------
+
+ procedure Exit_Task is
+ begin
+ -- Mark this task as unknown, so that if Self is called, it won't
+ -- return a dangling pointer.
+
+ Specific.Set (null);
+ end Exit_Task;
+
+ ----------------
+ -- Abort_Task --
+ ----------------
+
+ procedure Abort_Task (T : Task_Id) is
+ Result : Interfaces.C.int;
+ begin
+ if Abort_Handler_Installed then
+ Result :=
+ pthread_kill
+ (T.Common.LL.Thread,
+ Signal (System.Interrupt_Management.Abort_Task_Interrupt));
+ pragma Assert (Result = 0);
+ end if;
+ end Abort_Task;
+
+ ----------------
+ -- Initialize --
+ ----------------
+
+ procedure Initialize (S : in out Suspension_Object) is
+ Mutex_Attr : aliased pthread_mutexattr_t;
+ Cond_Attr : aliased pthread_condattr_t;
+ Result : Interfaces.C.int;
+
+ begin
+ -- Initialize internal state (always to False (RM D.10 (6)))
+
+ S.State := False;
+ S.Waiting := False;
+
+ -- Initialize internal mutex
+
+ Result := pthread_mutexattr_init (Mutex_Attr'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result = ENOMEM then
+ raise Storage_Error;
+ end if;
+
+ Result := pthread_mutex_init (S.L'Access, Mutex_Attr'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result = ENOMEM then
+ Result := pthread_mutexattr_destroy (Mutex_Attr'Access);
+ pragma Assert (Result = 0);
+
+ raise Storage_Error;
+ end if;
+
+ Result := pthread_mutexattr_destroy (Mutex_Attr'Access);
+ pragma Assert (Result = 0);
+
+ -- Initialize internal condition variable
+
+ Result := pthread_condattr_init (Cond_Attr'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result /= 0 then
+ Result := pthread_mutex_destroy (S.L'Access);
+ pragma Assert (Result = 0);
+
+ -- Storage_Error is propagated as intended if the allocation of the
+ -- underlying OS entities fails.
+
+ raise Storage_Error;
+
+ else
+ Result := GNAT_pthread_condattr_setup (Cond_Attr'Access);
+ pragma Assert (Result = 0);
+ end if;
+
+ Result := pthread_cond_init (S.CV'Access, Cond_Attr'Access);
+ pragma Assert (Result = 0 or else Result = ENOMEM);
+
+ if Result /= 0 then
+ Result := pthread_mutex_destroy (S.L'Access);
+ pragma Assert (Result = 0);
+
+ Result := pthread_condattr_destroy (Cond_Attr'Access);
+ pragma Assert (Result = 0);
+
+ -- Storage_Error is propagated as intended if the allocation of the
+ -- underlying OS entities fails.
+
+ raise Storage_Error;
+ end if;
+
+ Result := pthread_condattr_destroy (Cond_Attr'Access);
+ pragma Assert (Result = 0);
+ end Initialize;
+
+ --------------
+ -- Finalize --
+ --------------
+
+ procedure Finalize (S : in out Suspension_Object) is
+ Result : Interfaces.C.int;
+
+ begin
+ -- Destroy internal mutex
+
+ Result := pthread_mutex_destroy (S.L'Access);
+ pragma Assert (Result = 0);
+
+ -- Destroy internal condition variable
+
+ Result := pthread_cond_destroy (S.CV'Access);
+ pragma Assert (Result = 0);
+ end Finalize;
+
+ -------------------
+ -- Current_State --
+ -------------------
+
+ function Current_State (S : Suspension_Object) return Boolean is
+ begin
+ -- We do not want to use lock on this read operation. State is marked
+ -- as Atomic so that we ensure that the value retrieved is correct.
+
+ return S.State;
+ end Current_State;
+
+ ---------------
+ -- Set_False --
+ ---------------
+
+ procedure Set_False (S : in out Suspension_Object) is
+ Result : Interfaces.C.int;
+
+ begin
+ SSL.Abort_Defer.all;
+
+ Result := pthread_mutex_lock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ S.State := False;
+
+ Result := pthread_mutex_unlock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ SSL.Abort_Undefer.all;
+ end Set_False;
+
+ --------------
+ -- Set_True --
+ --------------
+
+ procedure Set_True (S : in out Suspension_Object) is
+ Result : Interfaces.C.int;
+
+ begin
+ SSL.Abort_Defer.all;
+
+ Result := pthread_mutex_lock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ -- If there is already a task waiting on this suspension object then
+ -- we resume it, leaving the state of the suspension object to False,
+ -- as it is specified in (RM D.10(9)). Otherwise, it just leaves
+ -- the state to True.
+
+ if S.Waiting then
+ S.Waiting := False;
+ S.State := False;
+
+ Result := pthread_cond_signal (S.CV'Access);
+ pragma Assert (Result = 0);
+
+ else
+ S.State := True;
+ end if;
+
+ Result := pthread_mutex_unlock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ SSL.Abort_Undefer.all;
+ end Set_True;
+
+ ------------------------
+ -- Suspend_Until_True --
+ ------------------------
+
+ procedure Suspend_Until_True (S : in out Suspension_Object) is
+ Result : Interfaces.C.int;
+
+ begin
+ SSL.Abort_Defer.all;
+
+ Result := pthread_mutex_lock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ if S.Waiting then
+
+ -- Program_Error must be raised upon calling Suspend_Until_True
+ -- if another task is already waiting on that suspension object
+ -- (RM D.10(10)).
+
+ Result := pthread_mutex_unlock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ SSL.Abort_Undefer.all;
+
+ raise Program_Error;
+
+ else
+ -- Suspend the task if the state is False. Otherwise, the task
+ -- continues its execution, and the state of the suspension object
+ -- is set to False (ARM D.10 par. 9).
+
+ if S.State then
+ S.State := False;
+ else
+ S.Waiting := True;
+
+ loop
+ -- Loop in case pthread_cond_wait returns earlier than expected
+ -- (e.g. in case of EINTR caused by a signal).
+
+ Result := pthread_cond_wait (S.CV'Access, S.L'Access);
+ pragma Assert (Result = 0 or else Result = EINTR);
+
+ exit when not S.Waiting;
+ end loop;
+ end if;
+
+ Result := pthread_mutex_unlock (S.L'Access);
+ pragma Assert (Result = 0);
+
+ SSL.Abort_Undefer.all;
+ end if;
+ end Suspend_Until_True;
+
+ ----------------
+ -- Check_Exit --
+ ----------------
+
+ -- Dummy version
+
+ function Check_Exit (Self_ID : ST.Task_Id) return Boolean is
+ pragma Unreferenced (Self_ID);
+ begin
+ return True;
+ end Check_Exit;
+
+ --------------------
+ -- Check_No_Locks --
+ --------------------
+
+ function Check_No_Locks (Self_ID : ST.Task_Id) return Boolean is
+ pragma Unreferenced (Self_ID);
+ begin
+ return True;
+ end Check_No_Locks;
+
+ ----------------------
+ -- Environment_Task --
+ ----------------------
+
+ function Environment_Task return Task_Id is
+ begin
+ return Environment_Task_Id;
+ end Environment_Task;
+
+ --------------
+ -- Lock_RTS --
+ --------------
+
+ procedure Lock_RTS is
+ begin
+ Write_Lock (Single_RTS_Lock'Access, Global_Lock => True);
+ end Lock_RTS;
+
+ ----------------
+ -- Unlock_RTS --
+ ----------------
+
+ procedure Unlock_RTS is
+ begin
+ Unlock (Single_RTS_Lock'Access, Global_Lock => True);
+ end Unlock_RTS;
+
+ ------------------
+ -- Suspend_Task --
+ ------------------
+
+ function Suspend_Task
+ (T : ST.Task_Id;
+ Thread_Self : Thread_Id) return Boolean
+ is
+ pragma Unreferenced (T, Thread_Self);
+ begin
+ return False;
+ end Suspend_Task;
+
+ -----------------
+ -- Resume_Task --
+ -----------------
+
+ function Resume_Task
+ (T : ST.Task_Id;
+ Thread_Self : Thread_Id) return Boolean
+ is
+ pragma Unreferenced (T, Thread_Self);
+ begin
+ return False;
+ end Resume_Task;
+
+ --------------------
+ -- Stop_All_Tasks --
+ --------------------
+
+ procedure Stop_All_Tasks is
+ begin
+ null;
+ end Stop_All_Tasks;
+
+ ---------------
+ -- Stop_Task --
+ ---------------
+
+ function Stop_Task (T : ST.Task_Id) return Boolean is
+ pragma Unreferenced (T);
+ begin
+ return False;
+ end Stop_Task;
+
+ -------------------
+ -- Continue_Task --
+ -------------------
+
+ function Continue_Task (T : ST.Task_Id) return Boolean is
+ pragma Unreferenced (T);
+ begin
+ return False;
+ end Continue_Task;
+
+ ----------------
+ -- Initialize --
+ ----------------
+
+ procedure Initialize (Environment_Task : Task_Id) is
+ act : aliased struct_sigaction;
+ old_act : aliased struct_sigaction;
+ Tmp_Set : aliased sigset_t;
+ Result : Interfaces.C.int;
+
+ function State
+ (Int : System.Interrupt_Management.Interrupt_ID) return Character;
+ pragma Import (C, State, "__gnat_get_interrupt_state");
+ -- Get interrupt state. Defined in a-init.c
+ -- The input argument is the interrupt number,
+ -- and the result is one of the following:
+
+ Default : constant Character := 's';
+ -- 'n' this interrupt not set by any Interrupt_State pragma
+ -- 'u' Interrupt_State pragma set state to User
+ -- 'r' Interrupt_State pragma set state to Runtime
+ -- 's' Interrupt_State pragma set state to System (use "default"
+ -- system handler)
+
+ begin
+ Environment_Task_Id := Environment_Task;
+
+ Interrupt_Management.Initialize;
+
+ -- Prepare the set of signals that should unblocked in all tasks
+
+ Result := sigemptyset (Unblocked_Signal_Mask'Access);
+ pragma Assert (Result = 0);
+
+ for J in Interrupt_Management.Interrupt_ID loop
+ if System.Interrupt_Management.Keep_Unmasked (J) then
+ Result := sigaddset (Unblocked_Signal_Mask'Access, Signal (J));
+ pragma Assert (Result = 0);
+ end if;
+ end loop;
+
+ -- Initialize the lock used to synchronize chain of all ATCBs
+
+ Initialize_Lock (Single_RTS_Lock'Access, RTS_Lock_Level);
+
+ Specific.Initialize (Environment_Task);
+
+ if Use_Alternate_Stack then
+ Environment_Task.Common.Task_Alternate_Stack :=
+ Alternate_Stack'Address;
+ end if;
+
+ -- Make environment task known here because it doesn't go through
+ -- Activate_Tasks, which does it for all other tasks.
+
+ Known_Tasks (Known_Tasks'First) := Environment_Task;
+ Environment_Task.Known_Tasks_Index := Known_Tasks'First;
+
+ Enter_Task (Environment_Task);
+
+ if State
+ (System.Interrupt_Management.Abort_Task_Interrupt) /= Default
+ then
+ act.sa_flags := 0;
+ act.sa_handler := Abort_Handler'Address;
+
+ Result := sigemptyset (Tmp_Set'Access);
+ pragma Assert (Result = 0);
+ act.sa_mask := Tmp_Set;
+
+ Result :=
+ sigaction
+ (Signal (System.Interrupt_Management.Abort_Task_Interrupt),
+ act'Unchecked_Access,
+ old_act'Unchecked_Access);
+ pragma Assert (Result = 0);
+ Abort_Handler_Installed := True;
+ end if;
+ end Initialize;
+
+ -----------------------
+ -- Set_Task_Affinity --
+ -----------------------
+
+ procedure Set_Task_Affinity (T : ST.Task_Id) is
+ pragma Unreferenced (T);
+
+ begin
+ -- Setting task affinity is not supported by the underlying system
+
+ null;
+ end Set_Task_Affinity;
+
+end System.Task_Primitives.Operations;
diff --git a/gcc/ada/libgnarl/s-tasini.adb b/gcc/ada/libgnarl/s-tasini.adb
index 21404d0cd52..d83ed3cda14 100644
--- a/gcc/ada/libgnarl/s-tasini.adb
+++ b/gcc/ada/libgnarl/s-tasini.adb
@@ -325,8 +325,8 @@ package body System.Tasking.Initialization is
-- of the environment task.
Self_Id := Environment_Task;
- Self_Id.Master_of_Task := Environment_Task_Level;
- Self_Id.Master_Within := Self_Id.Master_of_Task + 1;
+ Self_Id.Master_Of_Task := Environment_Task_Level;
+ Self_Id.Master_Within := Self_Id.Master_Of_Task + 1;
for L in Self_Id.Entry_Calls'Range loop
Self_Id.Entry_Calls (L).Self := Self_Id;
diff --git a/gcc/ada/libgnarl/s-taskin.ads b/gcc/ada/libgnarl/s-taskin.ads
index 7c8b44b952c..fe725b8d731 100644
--- a/gcc/ada/libgnarl/s-taskin.ads
+++ b/gcc/ada/libgnarl/s-taskin.ads
@@ -982,7 +982,7 @@ package System.Tasking is
-- updated it itself using information from a suspended Caller, or
-- after Caller has updated it and awakened Self.
- Master_of_Task : Master_Level;
+ Master_Of_Task : Master_Level;
-- The task executing the master of this task, and the ID of this task's
-- master (unique only among masters currently active within Parent).
--
diff --git a/gcc/ada/libgnarl/s-tasren.adb b/gcc/ada/libgnarl/s-tasren.adb
index c1b35482c41..f180631d4f8 100644
--- a/gcc/ada/libgnarl/s-tasren.adb
+++ b/gcc/ada/libgnarl/s-tasren.adb
@@ -1138,7 +1138,7 @@ package body System.Tasking.Rendezvous is
Parent.Awake_Count := Parent.Awake_Count + 1;
if Parent.Common.State = Master_Completion_Sleep
- and then Acceptor.Master_of_Task = Parent.Master_Within
+ and then Acceptor.Master_Of_Task = Parent.Master_Within
then
Parent.Common.Wait_Count :=
Parent.Common.Wait_Count + 1;
diff --git a/gcc/ada/libgnarl/s-tassta.adb b/gcc/ada/libgnarl/s-tassta.adb
index 518a02c8b48..f0f1df4d8e7 100644
--- a/gcc/ada/libgnarl/s-tassta.adb
+++ b/gcc/ada/libgnarl/s-tassta.adb
@@ -151,7 +151,7 @@ package body System.Tasking.Stages is
-- duplicate master ids. For example, suppose we have three nested
-- task bodies T1,T2,T3. And suppose T1 also calls P which calls Q (and
-- both P and Q are task masters). Q will have the same master id as
- -- Master_of_Task of T3. Previous versions of this would abort T3 when
+ -- Master_Of_Task of T3. Previous versions of this would abort T3 when
-- Q calls Complete_Master, which was completely wrong.
begin
@@ -160,7 +160,7 @@ package body System.Tasking.Stages is
P := C.Common.Parent;
if P = Self_ID then
- if C.Master_of_Task = Self_ID.Master_Within then
+ if C.Master_Of_Task = Self_ID.Master_Within then
pragma Debug
(Debug.Trace (Self_ID, "Aborting", 'X', C));
Utilities.Abort_One_Task (Self_ID, C);
@@ -304,7 +304,7 @@ package body System.Tasking.Stages is
P.Alive_Count := P.Alive_Count + 1;
if P.Common.State = Master_Completion_Sleep and then
- C.Master_of_Task = P.Master_Within
+ C.Master_Of_Task = P.Master_Within
then
pragma Assert (Self_ID /= P);
P.Common.Wait_Count := P.Common.Wait_Count + 1;
@@ -498,7 +498,7 @@ package body System.Tasking.Stages is
-- has already awaited its dependent tasks. This raises Program_Error,
-- by 4.8(10.3/2). See AI-280. Ignore this check for foreign threads.
- if Self_ID.Master_of_Task /= Foreign_Task_Level
+ if Self_ID.Master_Of_Task /= Foreign_Task_Level
and then Master > Self_ID.Master_Within
then
raise Program_Error with
@@ -559,10 +559,10 @@ package body System.Tasking.Stages is
P := Self_ID;
- if P.Master_of_Task <= Independent_Task_Level then
+ if P.Master_Of_Task <= Independent_Task_Level then
P := Environment_Task;
else
- while P /= null and then P.Master_of_Task >= Master loop
+ while P /= null and then P.Master_Of_Task >= Master loop
P := P.Common.Parent;
end loop;
end if;
@@ -621,13 +621,13 @@ package body System.Tasking.Stages is
-- a regular library level task, otherwise the run-time will get
-- confused when waiting for these tasks to terminate.
- T.Master_of_Task := Library_Task_Level;
+ T.Master_Of_Task := Library_Task_Level;
else
- T.Master_of_Task := Master;
+ T.Master_Of_Task := Master;
end if;
- T.Master_Within := T.Master_of_Task + 1;
+ T.Master_Within := T.Master_Of_Task + 1;
for L in T.Entry_Calls'Range loop
T.Entry_Calls (L).Self := T;
@@ -710,7 +710,7 @@ package body System.Tasking.Stages is
pragma Debug
(Debug.Trace
- (Self_ID, "Created task in " & T.Master_of_Task'Img, 'C', T));
+ (Self_ID, "Created task in " & T.Master_Of_Task'Img, 'C', T));
end Create_Task;
--------------------
@@ -988,11 +988,11 @@ package body System.Tasking.Stages is
Initialization.Defer_Abort_Nestable (Self_ID);
- -- Loop through the From chain, changing their Master_of_Task fields,
+ -- Loop through the From chain, changing their Master_Of_Task fields,
-- and to find the end of the chain.
loop
- C.Master_of_Task := New_Master;
+ C.Master_Of_Task := New_Master;
exit when C.Common.Activation_Link = null;
C := C.Common.Activation_Link;
end loop;
@@ -1094,7 +1094,7 @@ package body System.Tasking.Stages is
pragma Assert (Self_ID.Deferral_Level = 1);
Debug.Master_Hook
- (Self_ID, Self_ID.Common.Parent, Self_ID.Master_of_Task);
+ (Self_ID, Self_ID.Common.Parent, Self_ID.Master_Of_Task);
if Use_Alternate_Stack then
Self_ID.Common.Task_Alternate_Stack := Task_Alternate_Stack'Address;
@@ -1307,7 +1307,7 @@ package body System.Tasking.Stages is
-- environment task), because they are implementation artifacts that
-- should be invisible to Ada programs.
- elsif Self_ID.Master_of_Task /= Independent_Task_Level then
+ elsif Self_ID.Master_Of_Task /= Independent_Task_Level then
-- Look for a fall-back handler following the master relationship
-- for the task. As specified in ARM C.7.3 par. 9/2, "the fall-back
@@ -1377,7 +1377,7 @@ package body System.Tasking.Stages is
procedure Terminate_Task (Self_ID : Task_Id) is
Environment_Task : constant Task_Id := STPO.Environment_Task;
- Master_of_Task : Integer;
+ Master_Of_Task : Integer;
Deallocate : Boolean;
begin
@@ -1397,12 +1397,12 @@ package body System.Tasking.Stages is
Lock_RTS;
end if;
- Master_of_Task := Self_ID.Master_of_Task;
+ Master_Of_Task := Self_ID.Master_Of_Task;
-- Check if the current task is an independent task If so, decrement
-- the Independent_Task_Count value.
- if Master_of_Task = Independent_Task_Level then
+ if Master_Of_Task = Independent_Task_Level then
if Single_Lock then
Utilities.Independent_Task_Count :=
Utilities.Independent_Task_Count - 1;
@@ -1439,7 +1439,7 @@ package body System.Tasking.Stages is
Free_Task (Self_ID);
end if;
- if Master_of_Task > 0 then
+ if Master_Of_Task > 0 then
STPO.Exit_Task;
end if;
end Terminate_Task;
@@ -1606,11 +1606,11 @@ package body System.Tasking.Stages is
C := All_Tasks_List;
while C /= null loop
- if C.Common.Activator = Self_ID and then C.Master_of_Task = CM then
+ if C.Common.Activator = Self_ID and then C.Master_Of_Task = CM then
return False;
end if;
- if C.Common.Parent = Self_ID and then C.Master_of_Task = CM then
+ if C.Common.Parent = Self_ID and then C.Master_Of_Task = CM then
Write_Lock (C);
if C.Common.State = Unactivated then
@@ -1662,9 +1662,9 @@ package body System.Tasking.Stages is
-- Terminate unactivated (never-to-be activated) tasks
- if C.Common.Activator = Self_ID and then C.Master_of_Task = CM then
+ if C.Common.Activator = Self_ID and then C.Master_Of_Task = CM then
- -- Usually, C.Common.Activator = Self_ID implies C.Master_of_Task
+ -- Usually, C.Common.Activator = Self_ID implies C.Master_Of_Task
-- = CM. The only case where C is pending activation by this
-- task, but the master of C is not CM is in Ada 2005, when C is
-- part of a return object of a build-in-place function.
@@ -1681,7 +1681,7 @@ package body System.Tasking.Stages is
-- Count it if directly dependent on this master
- if C.Common.Parent = Self_ID and then C.Master_of_Task = CM then
+ if C.Common.Parent = Self_ID and then C.Master_Of_Task = CM then
Write_Lock (C);
if C.Awake_Count /= 0 then
@@ -1781,7 +1781,7 @@ package body System.Tasking.Stages is
C := All_Tasks_List;
while C /= null loop
- if C.Common.Parent = Self_ID and then C.Master_of_Task = CM then
+ if C.Common.Parent = Self_ID and then C.Master_Of_Task = CM then
Write_Lock (C);
pragma Assert (C.Awake_Count = 0);
@@ -1840,7 +1840,7 @@ package body System.Tasking.Stages is
-- while the task calls Free_Task itself, in Terminate_Task.
if C.Common.Parent = Self_ID
- and then C.Master_of_Task >= CM
+ and then C.Master_Of_Task >= CM
and then not C.Free_On_Termination
then
if P /= null then
@@ -1912,7 +1912,7 @@ package body System.Tasking.Stages is
if (T.Common.Parent /= null
and then T.Common.Parent.Common.Parent /= null)
- or else T.Master_of_Task > Library_Task_Level
+ or else T.Master_Of_Task > Library_Task_Level
then
Initialization.Task_Lock (Self_ID);
@@ -1977,7 +1977,7 @@ package body System.Tasking.Stages is
pragma Assert (Self_ID = Self);
pragma Assert
(Self_ID.Master_Within in
- Self_ID.Master_of_Task + 1 .. Self_ID.Master_of_Task + 3);
+ Self_ID.Master_Of_Task .. Self_ID.Master_Of_Task + 3);
pragma Assert (Self_ID.Common.Wait_Count = 0);
pragma Assert (Self_ID.Open_Accepts = null);
pragma Assert (Self_ID.ATC_Nesting_Level = 1);
@@ -2007,10 +2007,10 @@ package body System.Tasking.Stages is
Unlock_RTS;
end if;
- -- If Self_ID.Master_Within = Self_ID.Master_of_Task + 2 we may have
+ -- If Self_ID.Master_Within = Self_ID.Master_Of_Task + 2 we may have
-- dependent tasks for which we need to wait. Otherwise we just exit.
- if Self_ID.Master_Within = Self_ID.Master_of_Task + 2 then
+ if Self_ID.Master_Within = Self_ID.Master_Of_Task + 2 then
Vulnerable_Complete_Master (Self_ID);
end if;
end Vulnerable_Complete_Task;
diff --git a/gcc/ada/libgnarl/s-tassta.ads b/gcc/ada/libgnarl/s-tassta.ads
index a1129a1085a..10803823c85 100644
--- a/gcc/ada/libgnarl/s-tassta.ads
+++ b/gcc/ada/libgnarl/s-tassta.ads
@@ -285,7 +285,7 @@ package System.Tasking.Stages is
(From, To : Activation_Chain_Access;
New_Master : Master_ID);
-- Compiler interface only. Do not call from within the RTS.
- -- Move all tasks on From list to To list, and change their Master_of_Task
+ -- Move all tasks on From list to To list, and change their Master_Of_Task
-- to be New_Master. This is used to implement build-in-place function
-- returns. Tasks that are part of the return object are initially placed
-- on an activation chain local to the return statement, and their master
diff --git a/gcc/ada/libgnarl/s-tasuti.adb b/gcc/ada/libgnarl/s-tasuti.adb
index 1a7e8cf9f10..d95bfa861e6 100644
--- a/gcc/ada/libgnarl/s-tasuti.adb
+++ b/gcc/ada/libgnarl/s-tasuti.adb
@@ -258,7 +258,7 @@ package body System.Tasking.Utilities is
pragma Assert (Parent = Environment_Task);
- Self_Id.Master_of_Task := Independent_Task_Level;
+ Self_Id.Master_Of_Task := Independent_Task_Level;
-- Update Independent_Task_Count that is needed for the GLADE
-- termination rule. See also pending update in
@@ -396,7 +396,7 @@ package body System.Tasking.Utilities is
end loop;
if P.Common.State = Master_Phase_2_Sleep
- and then C.Master_of_Task = P.Master_Within
+ and then C.Master_Of_Task = P.Master_Within
then
pragma Assert (P.Common.Wait_Count > 0);
P.Common.Wait_Count := P.Common.Wait_Count - 1;
@@ -462,7 +462,7 @@ package body System.Tasking.Utilities is
-- P has non-passive dependents
if P.Common.State = Master_Completion_Sleep
- and then C.Master_of_Task = P.Master_Within
+ and then C.Master_Of_Task = P.Master_Within
then
pragma Debug
(Debug.Trace
diff --git a/gcc/ada/libgnarl/s-tasuti.ads b/gcc/ada/libgnarl/s-tasuti.ads
index 351666645fb..81351d37d67 100644
--- a/gcc/ada/libgnarl/s-tasuti.ads
+++ b/gcc/ada/libgnarl/s-tasuti.ads
@@ -54,9 +54,9 @@ package System.Tasking.Utilities is
--
-- This is a dangerous operation, and should never be used on nested tasks
-- or tasks that depend on any objects that might be finalized earlier than
- -- the termination of the environment task. It is for internal use by the
- -- GNARL, to prevent such internal server tasks from preventing a partition
- -- from terminating.
+ -- the termination of the environment task. It is primarily for internal
+ -- use by the GNARL, to prevent such internal server tasks from preventing
+ -- a partition from terminating.
--
-- Also note that the run time assumes that the parent of an independent
-- task is the environment task. If this is not the case, Make_Independent
diff --git a/gcc/ada/libgnarl/s-tporft.adb b/gcc/ada/libgnarl/s-tporft.adb
index 56eda26e6a1..a1c68b33719 100644
--- a/gcc/ada/libgnarl/s-tporft.adb
+++ b/gcc/ada/libgnarl/s-tporft.adb
@@ -70,8 +70,8 @@ begin
Unlock_RTS;
pragma Assert (Succeeded);
- Self_Id.Master_of_Task := 0;
- Self_Id.Master_Within := Self_Id.Master_of_Task + 1;
+ Self_Id.Master_Of_Task := 0;
+ Self_Id.Master_Within := Self_Id.Master_Of_Task + 1;
for L in Self_Id.Entry_Calls'Range loop
Self_Id.Entry_Calls (L).Self := Self_Id;
diff --git a/gcc/ada/libgnat/a-tags.adb b/gcc/ada/libgnat/a-tags.adb
index f3c2c0e969c..40dd11e68b5 100644
--- a/gcc/ada/libgnat/a-tags.adb
+++ b/gcc/ada/libgnat/a-tags.adb
@@ -332,7 +332,7 @@ package body Ada.Tags is
function Base_Address (This : System.Address) return System.Address is
begin
- return This - Offset_To_Top (This);
+ return This + Offset_To_Top (This);
end Base_Address;
---------------
@@ -412,14 +412,14 @@ package body Ada.Tags is
-- Case of Static value of Offset_To_Top
if Iface_Table.Ifaces_Table (Id).Static_Offset_To_Top then
- Obj_Base := Obj_Base +
+ Obj_Base := Obj_Base -
Iface_Table.Ifaces_Table (Id).Offset_To_Top_Value;
-- Otherwise call the function generated by the expander to
-- provide the value.
else
- Obj_Base := Obj_Base +
+ Obj_Base := Obj_Base -
Iface_Table.Ifaces_Table (Id).Offset_To_Top_Func.all
(Obj_Base);
end if;
@@ -1046,7 +1046,7 @@ package body Ada.Tags is
-- Save the offset to top field in the secondary dispatch table
if Offset_Value /= 0 then
- Sec_Base := This + Offset_Value;
+ Sec_Base := This - Offset_Value;
Sec_DT := DT (To_Tag_Ptr (Sec_Base).all);
Sec_DT.Offset_To_Top := SSE.Storage_Offset'Last;
end if;
diff --git a/gcc/ada/libgnat/g-altive.ads b/gcc/ada/libgnat/g-altive.ads
index 1e247b30f5c..cc62ca8a540 100644
--- a/gcc/ada/libgnat/g-altive.ads
+++ b/gcc/ada/libgnat/g-altive.ads
@@ -668,18 +668,18 @@ end GNAT.Altivec;
-- type of A. The quad-word operations are only implemented by one
-- Altivec primitive operation. That means that, if QW_Operation is a
-- quad-word operation, we should have:
--- QW_Operation (To_Type_of_A (B)) = QW_Operation (A)
+-- QW_Operation (To_Type_Of_A (B)) = QW_Operation (A)
-- That is true iff:
--- To_Quad_Word (To_Type_of_A (B)) = To_Quad_Word (A)
+-- To_Quad_Word (To_Type_Of_A (B)) = To_Quad_Word (A)
-- As To_Quad_Word is a bijection. we have:
--- To_Type_of_A (B) = A
+-- To_Type_Of_A (B) = A
-- resp. any combination of A, B, C:
--- To_Type_of_A (C) = A
--- To_Type_of_B (A) = B
--- To_Type_of_C (B) = C
+-- To_Type_Of_A (C) = A
+-- To_Type_Of_B (A) = B
+-- To_Type_Of_C (B) = C
-- ...
-- Making sure that the properties described above are verified by the
diff --git a/gcc/ada/libgnat/s-rident.ads b/gcc/ada/libgnat/s-rident.ads
index cd88593656b..cde036aa89d 100644
--- a/gcc/ada/libgnat/s-rident.ads
+++ b/gcc/ada/libgnat/s-rident.ads
@@ -183,6 +183,7 @@ package System.Rident is
No_Elaboration_Code, -- GNAT
No_Obsolescent_Features, -- Ada 2005 AI-368
No_Wide_Characters, -- GNAT
+ Static_Dispatch_Tables, -- GNAT
SPARK_05, -- GNAT
-- The following cases require a parameter value
diff --git a/gcc/ada/libgnat/s-spsufi.adb b/gcc/ada/libgnat/s-spsufi.adb
index 11846c996f2..f1faab268d8 100644
--- a/gcc/ada/libgnat/s-spsufi.adb
+++ b/gcc/ada/libgnat/s-spsufi.adb
@@ -71,9 +71,9 @@ package body System.Storage_Pools.Subpools.Finalization is
-- requires that "The subpool no longer belongs to any pool" BEFORE
-- calling Deallocate_Subpool. The actual dispatching call required is:
--
- -- Deallocate_Subpool(Pool_of_Subpool(Subpool).all, Subpool);
+ -- Deallocate_Subpool(Pool_Of_Subpool(Subpool).all, Subpool);
--
- -- but that can't be taken literally, because Pool_of_Subpool will
+ -- but that can't be taken literally, because Pool_Of_Subpool will
-- return null.
declare
diff --git a/gcc/ada/libgnat/system-qnx-aarch64.ads b/gcc/ada/libgnat/system-qnx-aarch64.ads
new file mode 100644
index 00000000000..4cb1c1be091
--- /dev/null
+++ b/gcc/ada/libgnat/system-qnx-aarch64.ads
@@ -0,0 +1,157 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT RUN-TIME COMPONENTS --
+-- --
+-- S Y S T E M --
+-- --
+-- S p e c --
+-- (QNX/Aarch64 Version) --
+-- --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
+-- --
+-- This specification is derived from the Ada Reference Manual for use with --
+-- GNAT. The copyright notice above, and the license provisions that follow --
+-- apply solely to the contents of the part following the private keyword. --
+-- --
+-- GNAT is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNAT was originally developed by the GNAT team at New York University. --
+-- Extensive contributions were provided by Ada Core Technologies Inc. --
+-- --
+------------------------------------------------------------------------------
+
+package System is
+ pragma Pure;
+ -- Note that we take advantage of the implementation permission to make
+ -- this unit Pure instead of Preelaborable; see RM 13.7.1(15). In Ada
+ -- 2005, this is Pure in any case (AI-362).
+
+ pragma No_Elaboration_Code_All;
+ -- Allow the use of that restriction in units that WITH this unit
+
+ type Name is (SYSTEM_NAME_GNAT);
+ System_Name : constant Name := SYSTEM_NAME_GNAT;
+
+ -- System-Dependent Named Numbers
+
+ Min_Int : constant := Long_Long_Integer'First;
+ Max_Int : constant := Long_Long_Integer'Last;
+
+ Max_Binary_Modulus : constant := 2 ** Long_Long_Integer'Size;
+ Max_Nonbinary_Modulus : constant := 2 ** Integer'Size - 1;
+
+ Max_Base_Digits : constant := Long_Long_Float'Digits;
+ Max_Digits : constant := Long_Long_Float'Digits;
+
+ Max_Mantissa : constant := 63;
+ Fine_Delta : constant := 2.0 ** (-Max_Mantissa);
+
+ Tick : constant := 0.000_001;
+
+ -- Storage-related Declarations
+
+ type Address is private;
+ pragma Preelaborable_Initialization (Address);
+ Null_Address : constant Address;
+
+ Storage_Unit : constant := 8;
+ Word_Size : constant := Standard'Word_Size;
+ Memory_Size : constant := 2 ** Long_Integer'Size;
+
+ -- Address comparison
+
+ function "<" (Left, Right : Address) return Boolean;
+ function "<=" (Left, Right : Address) return Boolean;
+ function ">" (Left, Right : Address) return Boolean;
+ function ">=" (Left, Right : Address) return Boolean;
+ function "=" (Left, Right : Address) return Boolean;
+
+ pragma Import (Intrinsic, "<");
+ pragma Import (Intrinsic, "<=");
+ pragma Import (Intrinsic, ">");
+ pragma Import (Intrinsic, ">=");
+ pragma Import (Intrinsic, "=");
+
+ -- Other System-Dependent Declarations
+
+ type Bit_Order is (High_Order_First, Low_Order_First);
+ Default_Bit_Order : constant Bit_Order :=
+ Bit_Order'Val (Standard'Default_Bit_Order);
+ pragma Warnings (Off, Default_Bit_Order); -- kill constant condition warning
+
+ -- Priority-related Declarations (RM D.1)
+
+ -- System priority is Ada priority + 1, so lies in the range 1 .. 63.
+ --
+ -- If the scheduling policy is SCHED_FIFO or SCHED_RR the runtime makes use
+ -- of the entire range provided by the system.
+ --
+ -- If the scheduling policy is SCHED_OTHER the only valid system priority
+ -- is 1 and other values are simply ignored.
+
+ Max_Priority : constant Positive := 61;
+ Max_Interrupt_Priority : constant Positive := 62;
+
+ subtype Any_Priority is Integer range 0 .. 62;
+ subtype Priority is Any_Priority range 0 .. 61;
+ subtype Interrupt_Priority is Any_Priority range 62 .. 62;
+
+ Default_Priority : constant Priority := 30;
+
+private
+
+ type Address is mod Memory_Size;
+ Null_Address : constant Address := 0;
+
+ --------------------------------------
+ -- System Implementation Parameters --
+ --------------------------------------
+
+ -- These parameters provide information about the target that is used
+ -- by the compiler. They are in the private part of System, where they
+ -- can be accessed using the special circuitry in the Targparm unit
+ -- whose source should be consulted for more detailed descriptions
+ -- of the individual switch values.
+
+ Backend_Divide_Checks : constant Boolean := False;
+ Backend_Overflow_Checks : constant Boolean := True;
+ Command_Line_Args : constant Boolean := True;
+ Configurable_Run_Time : constant Boolean := False;
+ Denorm : constant Boolean := True;
+ Duration_32_Bits : constant Boolean := False;
+ Exit_Status_Supported : constant Boolean := True;
+ Fractional_Fixed_Ops : constant Boolean := False;
+ Frontend_Layout : constant Boolean := False;
+ Machine_Overflows : constant Boolean := False;
+ Machine_Rounds : constant Boolean := True;
+ Preallocated_Stacks : constant Boolean := False;
+ Signed_Zeros : constant Boolean := True;
+ Stack_Check_Default : constant Boolean := False;
+ Stack_Check_Probes : constant Boolean := True;
+ Stack_Check_Limits : constant Boolean := False;
+ Support_Aggregates : constant Boolean := True;
+ Support_Atomic_Primitives : constant Boolean := True;
+ Support_Composite_Assign : constant Boolean := True;
+ Support_Composite_Compare : constant Boolean := True;
+ Support_Long_Shifts : constant Boolean := True;
+ Always_Compatible_Rep : constant Boolean := False;
+ Suppress_Standard_Library : constant Boolean := False;
+ Use_Ada_Main_Program_Name : constant Boolean := False;
+ Frontend_Exceptions : constant Boolean := False;
+ ZCX_By_Default : constant Boolean := True;
+
+end System;
diff --git a/gcc/ada/link.c b/gcc/ada/link.c
index ac3c372f611..99fa000e73b 100644
--- a/gcc/ada/link.c
+++ b/gcc/ada/link.c
@@ -105,6 +105,7 @@ const char *__gnat_default_libgcc_subdir = "lib";
#elif defined (__FreeBSD__) || defined (__DragonFly__) \
|| defined (__NetBSD__) || defined (__OpenBSD__)
+ || defined (__QNX__)
const char *__gnat_object_file_option = "-Wl,@";
const char *__gnat_run_path_option = "-Wl,-rpath,";
char __gnat_shared_libgnat_default = STATIC;
diff --git a/gcc/ada/namet.adb b/gcc/ada/namet.adb
index 2dcbe1a677c..04e92dab55c 100644
--- a/gcc/ada/namet.adb
+++ b/gcc/ada/namet.adb
@@ -159,8 +159,8 @@ package body Namet is
Append (Buf, Buf2.Chars (1 .. Buf2.Length));
end Append;
- procedure Append (Buf : in out Bounded_String; Id : Name_Id) is
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ procedure Append (Buf : in out Bounded_String; Id : Valid_Name_Id) is
+ pragma Assert (Is_Valid_Name (Id));
Index : constant Int := Name_Entries.Table (Id).Name_Chars_Index;
Len : constant Short := Name_Entries.Table (Id).Name_Len;
@@ -174,7 +174,10 @@ package body Namet is
-- Append_Decoded --
--------------------
- procedure Append_Decoded (Buf : in out Bounded_String; Id : Name_Id) is
+ procedure Append_Decoded
+ (Buf : in out Bounded_String;
+ Id : Valid_Name_Id)
+ is
C : Character;
P : Natural;
Temp : Bounded_String;
@@ -255,7 +258,15 @@ package body Namet is
-- simply use their normal representation.
else
- Insert_Character (Character'Val (Hex (2)));
+ declare
+ W2 : constant Word := Hex (2);
+ begin
+ pragma Assert (W2 <= 255);
+ -- Add assumption to facilitate static analysis. Note
+ -- that we cannot use pragma Assume for bootstrap
+ -- reasons.
+ Insert_Character (Character'Val (W2));
+ end;
end if;
-- WW (wide wide character insertion)
@@ -449,7 +460,7 @@ package body Namet is
procedure Append_Decoded_With_Brackets
(Buf : in out Bounded_String;
- Id : Name_Id)
+ Id : Valid_Name_Id)
is
P : Natural;
@@ -596,7 +607,10 @@ package body Namet is
-- Append_Unqualified --
------------------------
- procedure Append_Unqualified (Buf : in out Bounded_String; Id : Name_Id) is
+ procedure Append_Unqualified
+ (Buf : in out Bounded_String;
+ Id : Valid_Name_Id)
+ is
Temp : Bounded_String;
begin
Append (Temp, Id);
@@ -610,7 +624,7 @@ package body Namet is
procedure Append_Unqualified_Decoded
(Buf : in out Bounded_String;
- Id : Name_Id)
+ Id : Valid_Name_Id)
is
Temp : Bounded_String;
begin
@@ -747,6 +761,9 @@ package body Namet is
Write_Eol;
Write_Str ("Average number of probes for lookup = ");
+ pragma Assert (Nsyms /= 0);
+ -- Add assumption to facilitate static analysis. Here Nsyms cannot be
+ -- zero because many symbols are added to the table by default.
Probes := Probes / Nsyms;
Write_Int (Probes / 200);
Write_Char ('.');
@@ -773,7 +790,7 @@ package body Namet is
-- Get_Decoded_Name_String --
-----------------------------
- procedure Get_Decoded_Name_String (Id : Name_Id) is
+ procedure Get_Decoded_Name_String (Id : Valid_Name_Id) is
begin
Global_Name_Buffer.Length := 0;
Append_Decoded (Global_Name_Buffer, Id);
@@ -783,7 +800,7 @@ package body Namet is
-- Get_Decoded_Name_String_With_Brackets --
-------------------------------------------
- procedure Get_Decoded_Name_String_With_Brackets (Id : Name_Id) is
+ procedure Get_Decoded_Name_String_With_Brackets (Id : Valid_Name_Id) is
begin
Global_Name_Buffer.Length := 0;
Append_Decoded_With_Brackets (Global_Name_Buffer, Id);
@@ -794,7 +811,7 @@ package body Namet is
------------------------
procedure Get_Last_Two_Chars
- (N : Name_Id;
+ (N : Valid_Name_Id;
C1 : out Character;
C2 : out Character)
is
@@ -815,13 +832,13 @@ package body Namet is
-- Get_Name_String --
---------------------
- procedure Get_Name_String (Id : Name_Id) is
+ procedure Get_Name_String (Id : Valid_Name_Id) is
begin
Global_Name_Buffer.Length := 0;
Append (Global_Name_Buffer, Id);
end Get_Name_String;
- function Get_Name_String (Id : Name_Id) return String is
+ function Get_Name_String (Id : Valid_Name_Id) return String is
Buf : Bounded_String (Max_Length => Natural (Length_Of_Name (Id)));
begin
Append (Buf, Id);
@@ -832,7 +849,7 @@ package body Namet is
-- Get_Name_String_And_Append --
--------------------------------
- procedure Get_Name_String_And_Append (Id : Name_Id) is
+ procedure Get_Name_String_And_Append (Id : Valid_Name_Id) is
begin
Append (Global_Name_Buffer, Id);
end Get_Name_String_And_Append;
@@ -841,9 +858,9 @@ package body Namet is
-- Get_Name_Table_Boolean1 --
-----------------------------
- function Get_Name_Table_Boolean1 (Id : Name_Id) return Boolean is
+ function Get_Name_Table_Boolean1 (Id : Valid_Name_Id) return Boolean is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
return Name_Entries.Table (Id).Boolean1_Info;
end Get_Name_Table_Boolean1;
@@ -851,9 +868,9 @@ package body Namet is
-- Get_Name_Table_Boolean2 --
-----------------------------
- function Get_Name_Table_Boolean2 (Id : Name_Id) return Boolean is
+ function Get_Name_Table_Boolean2 (Id : Valid_Name_Id) return Boolean is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
return Name_Entries.Table (Id).Boolean2_Info;
end Get_Name_Table_Boolean2;
@@ -861,9 +878,9 @@ package body Namet is
-- Get_Name_Table_Boolean3 --
-----------------------------
- function Get_Name_Table_Boolean3 (Id : Name_Id) return Boolean is
+ function Get_Name_Table_Boolean3 (Id : Valid_Name_Id) return Boolean is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
return Name_Entries.Table (Id).Boolean3_Info;
end Get_Name_Table_Boolean3;
@@ -871,9 +888,9 @@ package body Namet is
-- Get_Name_Table_Byte --
-------------------------
- function Get_Name_Table_Byte (Id : Name_Id) return Byte is
+ function Get_Name_Table_Byte (Id : Valid_Name_Id) return Byte is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
return Name_Entries.Table (Id).Byte_Info;
end Get_Name_Table_Byte;
@@ -881,9 +898,9 @@ package body Namet is
-- Get_Name_Table_Int --
-------------------------
- function Get_Name_Table_Int (Id : Name_Id) return Int is
+ function Get_Name_Table_Int (Id : Valid_Name_Id) return Int is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
return Name_Entries.Table (Id).Int_Info;
end Get_Name_Table_Int;
@@ -891,7 +908,7 @@ package body Namet is
-- Get_Unqualified_Decoded_Name_String --
-----------------------------------------
- procedure Get_Unqualified_Decoded_Name_String (Id : Name_Id) is
+ procedure Get_Unqualified_Decoded_Name_String (Id : Valid_Name_Id) is
begin
Global_Name_Buffer.Length := 0;
Append_Unqualified_Decoded (Global_Name_Buffer, Id);
@@ -901,7 +918,7 @@ package body Namet is
-- Get_Unqualified_Name_String --
---------------------------------
- procedure Get_Unqualified_Name_String (Id : Name_Id) is
+ procedure Get_Unqualified_Name_String (Id : Valid_Name_Id) is
begin
Global_Name_Buffer.Length := 0;
Append_Unqualified (Global_Name_Buffer, Id);
@@ -1032,15 +1049,11 @@ package body Namet is
return False;
end Is_Internal_Name;
- function Is_Internal_Name (Id : Name_Id) return Boolean is
+ function Is_Internal_Name (Id : Valid_Name_Id) return Boolean is
Buf : Bounded_String (Max_Length => Natural (Length_Of_Name (Id)));
begin
- if Id in Error_Name_Or_No_Name then
- return False;
- else
- Append (Buf, Id);
- return Is_Internal_Name (Buf);
- end if;
+ Append (Buf, Id);
+ return Is_Internal_Name (Buf);
end Is_Internal_Name;
function Is_Internal_Name return Boolean is
@@ -1066,10 +1079,10 @@ package body Namet is
-- Is_Operator_Name --
----------------------
- function Is_Operator_Name (Id : Name_Id) return Boolean is
+ function Is_Operator_Name (Id : Valid_Name_Id) return Boolean is
S : Int;
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
S := Name_Entries.Table (Id).Name_Chars_Index;
return Name_Chars.Table (S + 1) = 'O';
end Is_Operator_Name;
@@ -1087,7 +1100,7 @@ package body Namet is
-- Length_Of_Name --
--------------------
- function Length_Of_Name (Id : Name_Id) return Nat is
+ function Length_Of_Name (Id : Valid_Name_Id) return Nat is
begin
return Int (Name_Entries.Table (Id).Name_Len);
end Length_Of_Name;
@@ -1111,7 +1124,7 @@ package body Namet is
----------------
function Name_Enter
- (Buf : Bounded_String := Global_Name_Buffer) return Name_Id
+ (Buf : Bounded_String := Global_Name_Buffer) return Valid_Name_Id
is
begin
Name_Entries.Append
@@ -1136,7 +1149,7 @@ package body Namet is
return Name_Entries.Last;
end Name_Enter;
- function Name_Enter (S : String) return Name_Id is
+ function Name_Enter (S : String) return Valid_Name_Id is
Buf : Bounded_String (Max_Length => S'Length);
begin
Append (Buf, S);
@@ -1157,7 +1170,7 @@ package body Namet is
---------------
function Name_Find
- (Buf : Bounded_String := Global_Name_Buffer) return Name_Id
+ (Buf : Bounded_String := Global_Name_Buffer) return Valid_Name_Id
is
New_Id : Name_Id;
-- Id of entry in hash search, and value to be returned
@@ -1172,7 +1185,7 @@ package body Namet is
-- Quick handling for one character names
if Buf.Length = 1 then
- return Name_Id (First_Name_Id + Character'Pos (Buf.Chars (1)));
+ return Valid_Name_Id (First_Name_Id + Character'Pos (Buf.Chars (1)));
-- Otherwise search hash table for existing matching entry
@@ -1241,7 +1254,7 @@ package body Namet is
end if;
end Name_Find;
- function Name_Find (S : String) return Name_Id is
+ function Name_Find (S : String) return Valid_Name_Id is
Buf : Bounded_String (Max_Length => S'Length);
begin
Append (Buf, S);
@@ -1476,7 +1489,10 @@ package body Namet is
-- Name_Equals --
-----------------
- function Name_Equals (N1 : Name_Id; N2 : Name_Id) return Boolean is
+ function Name_Equals
+ (N1 : Valid_Name_Id;
+ N2 : Valid_Name_Id) return Boolean
+ is
begin
return N1 = N2 or else Get_Name_String (N1) = Get_Name_String (N2);
end Name_Equals;
@@ -1550,9 +1566,9 @@ package body Namet is
-- Set_Name_Table_Boolean1 --
-----------------------------
- procedure Set_Name_Table_Boolean1 (Id : Name_Id; Val : Boolean) is
+ procedure Set_Name_Table_Boolean1 (Id : Valid_Name_Id; Val : Boolean) is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
Name_Entries.Table (Id).Boolean1_Info := Val;
end Set_Name_Table_Boolean1;
@@ -1560,9 +1576,9 @@ package body Namet is
-- Set_Name_Table_Boolean2 --
-----------------------------
- procedure Set_Name_Table_Boolean2 (Id : Name_Id; Val : Boolean) is
+ procedure Set_Name_Table_Boolean2 (Id : Valid_Name_Id; Val : Boolean) is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
Name_Entries.Table (Id).Boolean2_Info := Val;
end Set_Name_Table_Boolean2;
@@ -1570,9 +1586,9 @@ package body Namet is
-- Set_Name_Table_Boolean3 --
-----------------------------
- procedure Set_Name_Table_Boolean3 (Id : Name_Id; Val : Boolean) is
+ procedure Set_Name_Table_Boolean3 (Id : Valid_Name_Id; Val : Boolean) is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
Name_Entries.Table (Id).Boolean3_Info := Val;
end Set_Name_Table_Boolean3;
@@ -1580,9 +1596,9 @@ package body Namet is
-- Set_Name_Table_Byte --
-------------------------
- procedure Set_Name_Table_Byte (Id : Name_Id; Val : Byte) is
+ procedure Set_Name_Table_Byte (Id : Valid_Name_Id; Val : Byte) is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
Name_Entries.Table (Id).Byte_Info := Val;
end Set_Name_Table_Byte;
@@ -1590,9 +1606,9 @@ package body Namet is
-- Set_Name_Table_Int --
-------------------------
- procedure Set_Name_Table_Int (Id : Name_Id; Val : Int) is
+ procedure Set_Name_Table_Int (Id : Valid_Name_Id; Val : Int) is
begin
- pragma Assert (Id in Name_Entries.First .. Name_Entries.Last);
+ pragma Assert (Is_Valid_Name (Id));
Name_Entries.Table (Id).Int_Info := Val;
end Set_Name_Table_Int;
@@ -1734,8 +1750,13 @@ package body Namet is
procedure wn (Id : Name_Id) is
begin
- if Id not in Name_Entries.First .. Name_Entries.Last then
- Write_Str ("<invalid name_id>");
+ if Is_Valid_Name (Id) then
+ declare
+ Buf : Bounded_String (Max_Length => Natural (Length_Of_Name (Id)));
+ begin
+ Append (Buf, Id);
+ Write_Str (Buf.Chars (1 .. Buf.Length));
+ end;
elsif Id = No_Name then
Write_Str ("<No_Name>");
@@ -1744,12 +1765,8 @@ package body Namet is
Write_Str ("<Error_Name>");
else
- declare
- Buf : Bounded_String (Max_Length => Natural (Length_Of_Name (Id)));
- begin
- Append (Buf, Id);
- Write_Str (Buf.Chars (1 .. Buf.Length));
- end;
+ Write_Str ("<invalid name_id>");
+ Write_Int (Int (Id));
end if;
Write_Eol;
@@ -1759,26 +1776,22 @@ package body Namet is
-- Write_Name --
----------------
- procedure Write_Name (Id : Name_Id) is
+ procedure Write_Name (Id : Valid_Name_Id) is
Buf : Bounded_String (Max_Length => Natural (Length_Of_Name (Id)));
begin
- if Id >= First_Name_Id then
- Append (Buf, Id);
- Write_Str (Buf.Chars (1 .. Buf.Length));
- end if;
+ Append (Buf, Id);
+ Write_Str (Buf.Chars (1 .. Buf.Length));
end Write_Name;
------------------------
-- Write_Name_Decoded --
------------------------
- procedure Write_Name_Decoded (Id : Name_Id) is
+ procedure Write_Name_Decoded (Id : Valid_Name_Id) is
Buf : Bounded_String;
begin
- if Id >= First_Name_Id then
- Append_Decoded (Buf, Id);
- Write_Str (Buf.Chars (1 .. Buf.Length));
- end if;
+ Append_Decoded (Buf, Id);
+ Write_Str (Buf.Chars (1 .. Buf.Length));
end Write_Name_Decoded;
-- Package initialization, initialize tables
diff --git a/gcc/ada/namet.ads b/gcc/ada/namet.ads
index 72ac8fabf30..b55d3361744 100644
--- a/gcc/ada/namet.ads
+++ b/gcc/ada/namet.ads
@@ -198,12 +198,12 @@ package Namet is
-- indicate that some kind of error was encountered in scanning out
-- the relevant name, so it does not have a representable label.
- subtype Error_Name_Or_No_Name is Name_Id range No_Name .. Error_Name;
- -- Used to test for either error name or no name
-
First_Name_Id : constant Name_Id := Names_Low_Bound + 2;
-- Subscript of first entry in names table
+ subtype Valid_Name_Id is Name_Id range First_Name_Id .. Name_Id'Last;
+ -- All but No_Name and Error_Name
+
------------------------------
-- Name_Id Membership Tests --
------------------------------
@@ -337,8 +337,8 @@ package Namet is
function "+" (Buf : Bounded_String) return String renames To_String;
function Name_Find
- (Buf : Bounded_String := Global_Name_Buffer) return Name_Id;
- function Name_Find (S : String) return Name_Id;
+ (Buf : Bounded_String := Global_Name_Buffer) return Valid_Name_Id;
+ function Name_Find (S : String) return Valid_Name_Id;
-- Name_Find searches the names table to see if the string has already been
-- stored. If so, the Id of the existing entry is returned. Otherwise a new
-- entry is created with its Name_Table_Int fields set to zero/false. Note
@@ -346,8 +346,8 @@ package Namet is
-- name string.
function Name_Enter
- (Buf : Bounded_String := Global_Name_Buffer) return Name_Id;
- function Name_Enter (S : String) return Name_Id;
+ (Buf : Bounded_String := Global_Name_Buffer) return Valid_Name_Id;
+ function Name_Enter (S : String) return Valid_Name_Id;
-- Name_Enter is similar to Name_Find. The difference is that it does not
-- search the table for an existing match, and also subsequent Name_Find
-- calls using the same name will not locate the entry created by this
@@ -358,10 +358,12 @@ package Namet is
-- names, since these are efficiently located without hashing by Name_Find
-- in any case.
- function Name_Equals (N1 : Name_Id; N2 : Name_Id) return Boolean;
+ function Name_Equals
+ (N1 : Valid_Name_Id;
+ N2 : Valid_Name_Id) return Boolean;
-- Return whether N1 and N2 denote the same character sequence
- function Get_Name_String (Id : Name_Id) return String;
+ function Get_Name_String (Id : Valid_Name_Id) return String;
-- Returns the characters of Id as a String. The lower bound is 1.
-- The following Append procedures ignore any characters that don't fit in
@@ -380,11 +382,11 @@ package Namet is
procedure Append (Buf : in out Bounded_String; Buf2 : Bounded_String);
-- Append Buf2 onto Buf
- procedure Append (Buf : in out Bounded_String; Id : Name_Id);
+ procedure Append (Buf : in out Bounded_String; Id : Valid_Name_Id);
-- Append the characters of Id onto Buf. It is an error to call this with
-- one of the special name Id values (No_Name or Error_Name).
- procedure Append_Decoded (Buf : in out Bounded_String; Id : Name_Id);
+ procedure Append_Decoded (Buf : in out Bounded_String; Id : Valid_Name_Id);
-- Same as Append, except that the result is decoded, so that upper half
-- characters and wide characters appear as originally found in the source
-- program text, operators have their source forms (special characters and
@@ -393,7 +395,7 @@ package Namet is
procedure Append_Decoded_With_Brackets
(Buf : in out Bounded_String;
- Id : Name_Id);
+ Id : Valid_Name_Id);
-- Same as Append_Decoded, except that the brackets notation (Uhh
-- replaced by ["hh"], Whhhh replaced by ["hhhh"], WWhhhhhhhh replaced by
-- ["hhhhhhhh"]) is used for all non-lower half characters, regardless of
@@ -403,7 +405,8 @@ package Namet is
-- requirement for a canonical representation not affected by the
-- character set options (e.g. in the binder generation of symbols).
- procedure Append_Unqualified (Buf : in out Bounded_String; Id : Name_Id);
+ procedure Append_Unqualified
+ (Buf : in out Bounded_String; Id : Valid_Name_Id);
-- Same as Append, except that qualification (as defined in unit
-- Exp_Dbug) is removed (including both preceding __ delimited names, and
-- also the suffixes used to indicate package body entities and to
@@ -415,7 +418,7 @@ package Namet is
procedure Append_Unqualified_Decoded
(Buf : in out Bounded_String;
- Id : Name_Id);
+ Id : Valid_Name_Id);
-- Same as Append_Unqualified, but decoded as for Append_Decoded
procedure Append_Encoded (Buf : in out Bounded_String; C : Char_Code);
@@ -443,40 +446,40 @@ package Namet is
function Is_Internal_Name (Buf : Bounded_String) return Boolean;
procedure Get_Last_Two_Chars
- (N : Name_Id;
+ (N : Valid_Name_Id;
C1 : out Character;
C2 : out Character);
-- Obtains last two characters of a name. C1 is last but one character and
-- C2 is last character. If name is less than two characters long then both
-- C1 and C2 are set to ASCII.NUL on return.
- function Get_Name_Table_Boolean1 (Id : Name_Id) return Boolean;
- function Get_Name_Table_Boolean2 (Id : Name_Id) return Boolean;
- function Get_Name_Table_Boolean3 (Id : Name_Id) return Boolean;
+ function Get_Name_Table_Boolean1 (Id : Valid_Name_Id) return Boolean;
+ function Get_Name_Table_Boolean2 (Id : Valid_Name_Id) return Boolean;
+ function Get_Name_Table_Boolean3 (Id : Valid_Name_Id) return Boolean;
-- Fetches the Boolean values associated with the given name
- function Get_Name_Table_Byte (Id : Name_Id) return Byte;
+ function Get_Name_Table_Byte (Id : Valid_Name_Id) return Byte;
pragma Inline (Get_Name_Table_Byte);
-- Fetches the Byte value associated with the given name
- function Get_Name_Table_Int (Id : Name_Id) return Int;
+ function Get_Name_Table_Int (Id : Valid_Name_Id) return Int;
pragma Inline (Get_Name_Table_Int);
-- Fetches the Int value associated with the given name
- procedure Set_Name_Table_Boolean1 (Id : Name_Id; Val : Boolean);
- procedure Set_Name_Table_Boolean2 (Id : Name_Id; Val : Boolean);
- procedure Set_Name_Table_Boolean3 (Id : Name_Id; Val : Boolean);
+ procedure Set_Name_Table_Boolean1 (Id : Valid_Name_Id; Val : Boolean);
+ procedure Set_Name_Table_Boolean2 (Id : Valid_Name_Id; Val : Boolean);
+ procedure Set_Name_Table_Boolean3 (Id : Valid_Name_Id; Val : Boolean);
-- Sets the Boolean value associated with the given name
- procedure Set_Name_Table_Byte (Id : Name_Id; Val : Byte);
+ procedure Set_Name_Table_Byte (Id : Valid_Name_Id; Val : Byte);
pragma Inline (Set_Name_Table_Byte);
-- Sets the Byte value associated with the given name
- procedure Set_Name_Table_Int (Id : Name_Id; Val : Int);
+ procedure Set_Name_Table_Int (Id : Valid_Name_Id; Val : Int);
pragma Inline (Set_Name_Table_Int);
-- Sets the Int value associated with the given name
- function Is_Internal_Name (Id : Name_Id) return Boolean;
+ function Is_Internal_Name (Id : Valid_Name_Id) return Boolean;
-- Returns True if the name is an internal name, i.e. contains a character
-- for which Is_OK_Internal_Letter is true, or if the name starts or ends
-- with an underscore.
@@ -500,7 +503,7 @@ package Namet is
-- set of reserved letters is O, Q, U, W) and also returns False for the
-- letter X, which is reserved for debug output (see Exp_Dbug).
- function Is_Operator_Name (Id : Name_Id) return Boolean;
+ function Is_Operator_Name (Id : Valid_Name_Id) return Boolean;
-- Returns True if name given is of the form of an operator (that is, it
-- starts with an upper case O).
@@ -508,7 +511,7 @@ package Namet is
-- True if Id is a valid name - points to a valid entry in the Name_Entries
-- table.
- function Length_Of_Name (Id : Name_Id) return Nat;
+ function Length_Of_Name (Id : Valid_Name_Id) return Nat;
pragma Inline (Length_Of_Name);
-- Returns length of given name in characters. This is the length of the
-- encoded name, as stored in the names table.
@@ -553,13 +556,13 @@ package Namet is
-- Writes out internal tables to current tree file using the relevant
-- Table.Tree_Write routines.
- procedure Write_Name (Id : Name_Id);
+ procedure Write_Name (Id : Valid_Name_Id);
-- Write_Name writes the characters of the specified name using the
-- standard output procedures in package Output. The name is written
-- in encoded form (i.e. including Uhh, Whhh, Qx, _op as they appear in
-- the name table). If Id is Error_Name, or No_Name, no text is output.
- procedure Write_Name_Decoded (Id : Name_Id);
+ procedure Write_Name_Decoded (Id : Valid_Name_Id);
-- Like Write_Name, except that the name written is the decoded name, as
-- described for Append_Decoded.
@@ -586,17 +589,17 @@ package Namet is
procedure Add_Str_To_Name_Buffer (S : String);
- procedure Get_Decoded_Name_String (Id : Name_Id);
+ procedure Get_Decoded_Name_String (Id : Valid_Name_Id);
- procedure Get_Decoded_Name_String_With_Brackets (Id : Name_Id);
+ procedure Get_Decoded_Name_String_With_Brackets (Id : Valid_Name_Id);
- procedure Get_Name_String (Id : Name_Id);
+ procedure Get_Name_String (Id : Valid_Name_Id);
- procedure Get_Name_String_And_Append (Id : Name_Id);
+ procedure Get_Name_String_And_Append (Id : Valid_Name_Id);
- procedure Get_Unqualified_Decoded_Name_String (Id : Name_Id);
+ procedure Get_Unqualified_Decoded_Name_String (Id : Valid_Name_Id);
- procedure Get_Unqualified_Name_String (Id : Name_Id);
+ procedure Get_Unqualified_Name_String (Id : Valid_Name_Id);
procedure Insert_Str_In_Name_Buffer (S : String; Index : Positive);
@@ -739,12 +742,12 @@ private
for Name_Entry'Size use 16 * 8;
-- This ensures that we did not leave out any fields
- -- This is the table that is referenced by Name_Id entries.
+ -- This is the table that is referenced by Valid_Name_Id entries.
-- It contains one entry for each unique name in the table.
package Name_Entries is new Table.Table (
Table_Component_Type => Name_Entry,
- Table_Index_Type => Name_Id'Base,
+ Table_Index_Type => Valid_Name_Id'Base,
Table_Low_Bound => First_Name_Id,
Table_Initial => Alloc.Names_Initial,
Table_Increment => Alloc.Names_Increment,
diff --git a/gcc/ada/opt.ads b/gcc/ada/opt.ads
index 96e2f3e2f92..94ed9533ac2 100644
--- a/gcc/ada/opt.ads
+++ b/gcc/ada/opt.ads
@@ -2148,17 +2148,7 @@ package Opt is
-- Other Global Flags --
------------------------
- Expander_Active : Boolean := False;
- -- A flag that indicates if expansion is active (True) or deactivated
- -- (False). When expansion is deactivated all calls to expander routines
- -- have no effect. Note that the initial setting of False is merely to
- -- prevent saving of an undefined value for an initial call to the
- -- Expander_Mode_Save_And_Set procedure. For more information on the use of
- -- this flag, see package Expander. Indeed this flag might more logically
- -- be in the spec of Expander, but it is referenced by Errout, and it
- -- really seems wrong for Errout to depend on Expander.
-
- Static_Dispatch_Tables : Boolean := True;
+ Building_Static_Dispatch_Tables : Boolean := True;
-- This flag indicates if the backend supports generation of statically
-- allocated dispatch tables. If it is True, then the front end will
-- generate static aggregates for dispatch tables that contain forward
@@ -2170,6 +2160,16 @@ package Opt is
-- behavior can be disabled using switch -gnatd.t which will set this flag
-- to False and revert to the previous dynamic behavior.
+ Expander_Active : Boolean := False;
+ -- A flag that indicates if expansion is active (True) or deactivated
+ -- (False). When expansion is deactivated all calls to expander routines
+ -- have no effect. Note that the initial setting of False is merely to
+ -- prevent saving of an undefined value for an initial call to the
+ -- Expander_Mode_Save_And_Set procedure. For more information on the use of
+ -- this flag, see package Expander. Indeed this flag might more logically
+ -- be in the spec of Expander, but it is referenced by Errout, and it
+ -- really seems wrong for Errout to depend on Expander.
+
-----------------------
-- Tree I/O Routines --
-----------------------
diff --git a/gcc/ada/par-ch3.adb b/gcc/ada/par-ch3.adb
index 54dd5621fd8..ddbf716ea6d 100644
--- a/gcc/ada/par-ch3.adb
+++ b/gcc/ada/par-ch3.adb
@@ -4314,6 +4314,8 @@ package body Ch3 is
Scan_State : Saved_Scan_State;
begin
+ Done := False;
+
if Style_Check then
Style.Check_Indentation;
end if;
@@ -4326,7 +4328,6 @@ package body Ch3 is
=>
Check_Bad_Layout;
Append (P_Subprogram (Pf_Decl_Gins_Pbod_Rnam_Stub_Pexp), Decls);
- Done := False;
when Tok_For =>
Check_Bad_Layout;
@@ -4350,12 +4351,10 @@ package body Ch3 is
Restore_Scan_State (Scan_State);
Append (P_Representation_Clause, Decls);
- Done := False;
when Tok_Generic =>
Check_Bad_Layout;
Append (P_Generic, Decls);
- Done := False;
when Tok_Identifier =>
Check_Bad_Layout;
@@ -4370,7 +4369,6 @@ package body Ch3 is
Token := Tok_Overriding;
Append (P_Subprogram (Pf_Decl_Gins_Pbod_Rnam_Stub_Pexp), Decls);
- Done := False;
-- Normal case, no overriding, or overriding followed by colon
@@ -4381,38 +4379,31 @@ package body Ch3 is
when Tok_Package =>
Check_Bad_Layout;
Append (P_Package (Pf_Decl_Gins_Pbod_Rnam_Stub_Pexp), Decls);
- Done := False;
when Tok_Pragma =>
Append (P_Pragma, Decls);
- Done := False;
when Tok_Protected =>
Check_Bad_Layout;
Scan; -- past PROTECTED
Append (P_Protected, Decls);
- Done := False;
when Tok_Subtype =>
Check_Bad_Layout;
Append (P_Subtype_Declaration, Decls);
- Done := False;
when Tok_Task =>
Check_Bad_Layout;
Scan; -- past TASK
Append (P_Task, Decls);
- Done := False;
when Tok_Type =>
Check_Bad_Layout;
Append (P_Type_Declaration, Decls);
- Done := False;
when Tok_Use =>
Check_Bad_Layout;
P_Use_Clause (Decls);
- Done := False;
when Tok_With =>
Check_Bad_Layout;
@@ -4439,8 +4430,6 @@ package body Ch3 is
-- a declarative list. After discarding the misplaced aspects
-- we can continue the scan.
- Done := False;
-
declare
Dummy_Node : constant Node_Id :=
New_Node (N_Package_Specification, Token_Ptr);
@@ -4533,8 +4522,6 @@ package body Ch3 is
End_Statements (Handled_Statement_Sequence (Body_Node));
end;
- Done := False;
-
else
Done := True;
end if;
@@ -4556,7 +4543,6 @@ package body Ch3 is
-- After discarding the misplaced aspects we can continue the
-- scan.
- Done := False;
else
Restore_Scan_State (Scan_State); -- to END
Done := True;
@@ -4671,7 +4657,6 @@ package body Ch3 is
exception
when Error_Resync =>
Resync_Past_Semicolon;
- Done := False;
end P_Declarative_Items;
----------------------------------
diff --git a/gcc/ada/par-ch6.adb b/gcc/ada/par-ch6.adb
index 83bb25118a4..ddcedcae130 100644
--- a/gcc/ada/par-ch6.adb
+++ b/gcc/ada/par-ch6.adb
@@ -336,6 +336,7 @@ package body Ch6 is
end if;
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
Ignore (Tok_Colon);
-- Deal with generic instantiation, the one case in which we do not
diff --git a/gcc/ada/par-ch7.adb b/gcc/ada/par-ch7.adb
index dd4bdb4b329..7ea2d0675d8 100644
--- a/gcc/ada/par-ch7.adb
+++ b/gcc/ada/par-ch7.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 1992-2014, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -146,6 +146,7 @@ package body Ch7 is
Scope.Table (Scope.Last).Sloc := Token_Ptr;
Name_Node := P_Defining_Program_Unit_Name;
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
if Aspect_Specifications_Present then
Aspect_Sloc := Token_Ptr;
@@ -211,6 +212,7 @@ package body Ch7 is
Scope.Table (Scope.Last).Sloc := Token_Ptr;
Name_Node := P_Defining_Program_Unit_Name;
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
-- Case of renaming declaration
diff --git a/gcc/ada/par-ch9.adb b/gcc/ada/par-ch9.adb
index 9e4ac07426f..b5d6d2036a3 100644
--- a/gcc/ada/par-ch9.adb
+++ b/gcc/ada/par-ch9.adb
@@ -101,6 +101,7 @@ package body Ch9 is
Scan; -- past BODY
Name_Node := P_Defining_Identifier (C_Is);
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
if Token = Tok_Left_Paren then
Error_Msg_SC ("discriminant part not allowed in task body");
@@ -168,6 +169,7 @@ package body Ch9 is
Name_Node := P_Defining_Identifier;
Set_Defining_Identifier (Task_Node, Name_Node);
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
Set_Discriminant_Specifications
(Task_Node, P_Known_Discriminant_Part_Opt);
@@ -176,6 +178,7 @@ package body Ch9 is
Name_Node := P_Defining_Identifier (C_Is);
Set_Defining_Identifier (Task_Node, Name_Node);
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
if Token = Tok_Left_Paren then
Error_Msg_SC ("discriminant part not allowed for single task");
@@ -447,6 +450,7 @@ package body Ch9 is
Scan; -- past BODY
Name_Node := P_Defining_Identifier (C_Is);
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
if Token = Tok_Left_Paren then
Error_Msg_SC ("discriminant part not allowed in protected body");
@@ -501,6 +505,7 @@ package body Ch9 is
Name_Node := P_Defining_Identifier (C_Is);
Set_Defining_Identifier (Protected_Node, Name_Node);
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
Set_Discriminant_Specifications
(Protected_Node, P_Known_Discriminant_Part_Opt);
@@ -517,6 +522,7 @@ package body Ch9 is
end if;
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
end if;
P_Aspect_Specifications (Protected_Node, Semicolon => False);
@@ -1049,6 +1055,7 @@ package body Ch9 is
Accept_Node := New_Node (N_Accept_Statement, Token_Ptr);
Scan; -- past ACCEPT
Scope.Table (Scope.Last).Labl := Token_Node;
+ Current_Node := Token_Node;
Set_Entry_Direct_Name (Accept_Node, P_Identifier (C_Do));
@@ -1197,6 +1204,7 @@ package body Ch9 is
Name_Node := P_Defining_Identifier;
Set_Defining_Identifier (Entry_Node, Name_Node);
Scope.Table (Scope.Last).Labl := Name_Node;
+ Current_Node := Name_Node;
Formal_Part_Node := P_Entry_Body_Formal_Part;
Set_Entry_Body_Formal_Part (Entry_Node, Formal_Part_Node);
diff --git a/gcc/ada/par-endh.adb b/gcc/ada/par-endh.adb
index bbcbff92c13..c9f81d07fd3 100644
--- a/gcc/ada/par-endh.adb
+++ b/gcc/ada/par-endh.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 1992-2016, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
diff --git a/gcc/ada/par-util.adb b/gcc/ada/par-util.adb
index ec9a916be0b..01b4670458b 100644
--- a/gcc/ada/par-util.adb
+++ b/gcc/ada/par-util.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 1992-2013, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -667,6 +667,12 @@ package body Util is
pragma Assert (Scope.Last > 0);
Scope.Decrement_Last;
+ if Include_Subprogram_In_Messages
+ and then Scope.Table (Scope.Last).Labl /= Error
+ then
+ Current_Node := Scope.Table (Scope.Last).Labl;
+ end if;
+
if Debug_Flag_P then
Error_Msg_Uint_1 := UI_From_Int (Scope.Last);
Error_Msg_SC ("decrement scope stack ptr, new value = ^!");
diff --git a/gcc/ada/put_spark_xrefs.adb b/gcc/ada/put_spark_xrefs.adb
deleted file mode 100644
index a65fa8a9290..00000000000
--- a/gcc/ada/put_spark_xrefs.adb
+++ /dev/null
@@ -1,194 +0,0 @@
-------------------------------------------------------------------------------
--- --
--- GNAT COMPILER COMPONENTS --
--- --
--- P U T _ S P A R K _ X R E F S --
--- --
--- B o d y --
--- --
--- Copyright (C) 2011-2016, Free Software Foundation, Inc. --
--- --
--- GNAT is free software; you can redistribute it and/or modify it under --
--- terms of the GNU General Public License as published by the Free Soft- --
--- ware Foundation; either version 3, or (at your option) any later ver- --
--- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
--- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
--- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --
--- for more details. You should have received a copy of the GNU General --
--- Public License distributed with GNAT; see file COPYING3. If not, go to --
--- http://www.gnu.org/licenses for a complete copy of the license. --
--- --
--- GNAT was originally developed by the GNAT team at New York University. --
--- Extensive contributions were provided by Ada Core Technologies Inc. --
--- --
-------------------------------------------------------------------------------
-
-with SPARK_Xrefs; use SPARK_Xrefs;
-
-procedure Put_SPARK_Xrefs is
-begin
- -- Loop through entries in SPARK_File_Table
-
- for J in 1 .. SPARK_File_Table.Last loop
- declare
- F : SPARK_File_Record renames SPARK_File_Table.Table (J);
-
- begin
- Write_Info_Initiate ('F');
- Write_Info_Char ('D');
- Write_Info_Char (' ');
- Write_Info_Nat (F.File_Num);
- Write_Info_Char (' ');
-
- Write_Info_Str (F.File_Name.all);
-
- -- If file is a subunit, print the file name for the unit
-
- if F.Unit_File_Name /= null then
- Write_Info_Str (" -> " & F.Unit_File_Name.all);
- end if;
-
- Write_Info_Terminate;
-
- -- Loop through scope entries for this file
-
- for J in F.From_Scope .. F.To_Scope loop
- declare
- S : SPARK_Scope_Record renames SPARK_Scope_Table.Table (J);
-
- begin
- Write_Info_Initiate ('F');
- Write_Info_Char ('S');
- Write_Info_Char (' ');
- Write_Info_Char ('.');
- Write_Info_Nat (S.Scope_Num);
- Write_Info_Char (' ');
- Write_Info_Nat (S.Line);
- Write_Info_Char (S.Stype);
- Write_Info_Nat (S.Col);
- Write_Info_Char (' ');
-
- pragma Assert (S.Scope_Name.all /= "");
-
- Write_Info_Str (S.Scope_Name.all);
-
- if S.Spec_File_Num /= 0 then
- Write_Info_Str (" -> ");
- Write_Info_Nat (S.Spec_File_Num);
- Write_Info_Char ('.');
- Write_Info_Nat (S.Spec_Scope_Num);
- end if;
-
- Write_Info_Terminate;
- end;
- end loop;
- end;
- end loop;
-
- -- Loop through entries in SPARK_File_Table
-
- for J in 1 .. SPARK_File_Table.Last loop
- declare
- F : SPARK_File_Record renames SPARK_File_Table.Table (J);
- File : Nat;
- Scope : Nat;
- Entity_Line : Nat;
- Entity_Col : Nat;
-
- begin
- -- Loop through scope entries for this file
-
- for K in F.From_Scope .. F.To_Scope loop
- Output_One_Scope : declare
- S : SPARK_Scope_Record renames SPARK_Scope_Table.Table (K);
-
- begin
- -- Write only non-empty tables
-
- if S.From_Xref <= S.To_Xref then
-
- Write_Info_Initiate ('F');
- Write_Info_Char ('X');
- Write_Info_Char (' ');
- Write_Info_Nat (F.File_Num);
- Write_Info_Char (' ');
-
- Write_Info_Str (F.File_Name.all);
-
- Write_Info_Char (' ');
- Write_Info_Char ('.');
- Write_Info_Nat (S.Scope_Num);
- Write_Info_Char (' ');
-
- Write_Info_Str (S.Scope_Name.all);
-
- -- Default value of (0,0) is used for the special __HEAP
- -- variable so use another default value.
-
- Entity_Line := 0;
- Entity_Col := 1;
-
- -- Loop through cross reference entries for this scope
-
- for X in S.From_Xref .. S.To_Xref loop
-
- Output_One_Xref : declare
- R : SPARK_Xref_Record renames
- SPARK_Xref_Table.Table (X);
-
- begin
- if R.Entity_Line /= Entity_Line
- or else R.Entity_Col /= Entity_Col
- then
- Write_Info_Terminate;
-
- Write_Info_Initiate ('F');
- Write_Info_Char (' ');
- Write_Info_Nat (R.Entity_Line);
- Write_Info_Char (R.Etype);
- Write_Info_Nat (R.Entity_Col);
- Write_Info_Char (' ');
-
- Write_Info_Str (R.Entity_Name.all);
-
- Entity_Line := R.Entity_Line;
- Entity_Col := R.Entity_Col;
- File := F.File_Num;
- Scope := S.Scope_Num;
- end if;
-
- if Write_Info_Col > 72 then
- Write_Info_Terminate;
- Write_Info_Initiate ('.');
- end if;
-
- Write_Info_Char (' ');
-
- if R.File_Num /= File then
- Write_Info_Nat (R.File_Num);
- Write_Info_Char ('|');
- File := R.File_Num;
- Scope := 0;
- end if;
-
- if R.Scope_Num /= Scope then
- Write_Info_Char ('.');
- Write_Info_Nat (R.Scope_Num);
- Write_Info_Char (':');
- Scope := R.Scope_Num;
- end if;
-
- Write_Info_Nat (R.Line);
- Write_Info_Char (R.Rtype);
- Write_Info_Nat (R.Col);
- end Output_One_Xref;
-
- end loop;
-
- Write_Info_Terminate;
- end if;
- end Output_One_Scope;
- end loop;
- end;
- end loop;
-end Put_SPARK_Xrefs;
diff --git a/gcc/ada/put_spark_xrefs.ads b/gcc/ada/put_spark_xrefs.ads
deleted file mode 100644
index fa4a4bc04e0..00000000000
--- a/gcc/ada/put_spark_xrefs.ads
+++ /dev/null
@@ -1,62 +0,0 @@
-------------------------------------------------------------------------------
--- --
--- GNAT COMPILER COMPONENTS --
--- --
--- P U T _ S P A R K _ X R E F S --
--- --
--- S p e c --
--- --
--- Copyright (C) 2011-2016, Free Software Foundation, Inc. --
--- --
--- GNAT is free software; you can redistribute it and/or modify it under --
--- terms of the GNU General Public License as published by the Free Soft- --
--- ware Foundation; either version 3, or (at your option) any later ver- --
--- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
--- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
--- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --
--- for more details. You should have received a copy of the GNU General --
--- Public License distributed with GNAT; see file COPYING3. If not, go to --
--- http://www.gnu.org/licenses for a complete copy of the license. --
--- --
--- GNAT was originally developed by the GNAT team at New York University. --
--- Extensive contributions were provided by Ada Core Technologies Inc. --
--- --
-------------------------------------------------------------------------------
-
--- This package contains the function used to read SPARK cross-reference
--- information from the internal tables defined in package SPARK_Xrefs, and
--- output text information for the ALI file. The interface allows control over
--- the destination of the output, so that this routine can also be used for
--- debugging purposes.
-
-with Types; use Types;
-
-generic
- -- The following procedures are used to output text information. The
- -- destination of the text information is thus under control of the
- -- particular instantiation. In particular, this procedure is used to
- -- write output to the ALI file, and also for debugging output.
-
- with function Write_Info_Col return Positive is <>;
- -- Return the column in which the next character will be written
-
- with procedure Write_Info_Char (C : Character) is <>;
- -- Output one character
-
- with procedure Write_Info_Str (Val : String) is <>;
- -- Output string stored in string pointer
-
- with procedure Write_Info_Initiate (Key : Character) is <>;
- -- Initiate write of new line to output file, the parameter is the
- -- keyword character for the line.
-
- with procedure Write_Info_Nat (N : Nat) is <>;
- -- Write image of N to output file with no leading or trailing blanks
-
- with procedure Write_Info_Terminate is <>;
- -- Terminate current info line and output lines built in Info_Buffer
-
-procedure Put_SPARK_Xrefs;
--- Read information from SPARK tables (SPARK_Xrefs.SPARK_Xref_Table,
--- SPARK_Xrefs.SPARK_Scope_Table and SPARK_Xrefs.SPARK_File_Table) and output
--- corresponding information in ALI format using the Write_Info procedures.
diff --git a/gcc/ada/rtsfind.ads b/gcc/ada/rtsfind.ads
index c4d7d3c80c6..57b8897f2da 100644
--- a/gcc/ada/rtsfind.ads
+++ b/gcc/ada/rtsfind.ads
@@ -631,6 +631,7 @@ package Rtsfind is
RE_Get_Offset_Index, -- Ada.Tags
RE_Get_Prim_Op_Kind, -- Ada.Tags
RE_Get_Tagged_Kind, -- Ada.Tags
+ RE_HT_Link, -- Ada.Tags
RE_Idepth, -- Ada.Tags
RE_Interfaces_Array, -- Ada.Tags
RE_Interfaces_Table, -- Ada.Tags
@@ -1866,6 +1867,7 @@ package Rtsfind is
RE_Get_Offset_Index => Ada_Tags,
RE_Get_Prim_Op_Kind => Ada_Tags,
RE_Get_Tagged_Kind => Ada_Tags,
+ RE_HT_Link => Ada_Tags,
RE_Idepth => Ada_Tags,
RE_Interfaces_Array => Ada_Tags,
RE_Interfaces_Table => Ada_Tags,
diff --git a/gcc/ada/s-oscons-tmplt.c b/gcc/ada/s-oscons-tmplt.c
index 444ad6072d4..95eadfc8854 100644
--- a/gcc/ada/s-oscons-tmplt.c
+++ b/gcc/ada/s-oscons-tmplt.c
@@ -157,7 +157,8 @@ pragma Style_Checks ("M32766");
# include <_types.h>
#endif
-#if defined (__linux__) || defined (__ANDROID__) || defined (__rtems__)
+#if defined (__linux__) || defined (__ANDROID__) || defined (__QNX__) \
+ || defined (__rtems__)
# include <pthread.h>
# include <signal.h>
#endif
@@ -1191,7 +1192,7 @@ CND(MSG_WAITALL, "Wait for full reception")
#endif
CND(MSG_NOSIGNAL, "No SIGPIPE on send")
-#if defined (__linux__) || defined (__ANDROID__)
+#if defined (__linux__) || defined (__ANDROID__) || defined (__QNX__)
# define MSG_Forced_Flags "MSG_NOSIGNAL"
#else
# define MSG_Forced_Flags "0"
@@ -1361,7 +1362,7 @@ CND(SIZEOF_struct_hostent, "struct hostent")
#define SIZEOF_struct_servent (sizeof (struct servent))
CND(SIZEOF_struct_servent, "struct servent")
-#if defined (__linux__) || defined (__ANDROID__)
+#if defined (__linux__) || defined (__ANDROID__) || defined (__QNX__)
#define SIZEOF_sigset (sizeof (sigset_t))
CND(SIZEOF_sigset, "sigset")
#endif
@@ -1464,7 +1465,7 @@ CNS(CLOCK_RT_Ada, "")
#endif
#if defined (__APPLE__) || defined (__linux__) || defined (__ANDROID__) \
- || defined (__rtems__) || defined (DUMMY)
+ || defined (__QNX__) || defined (__rtems__) || defined (DUMMY)
/*
-- Sizes of pthread data types
diff --git a/gcc/ada/sem.adb b/gcc/ada/sem.adb
index aaa3ccb2e40..02c8fa244ed 100644
--- a/gcc/ada/sem.adb
+++ b/gcc/ada/sem.adb
@@ -612,10 +612,12 @@ package body Sem is
when N_With_Clause =>
Analyze_With_Clause (N);
- -- A call to analyze a call marker is ignored because the node does
- -- not have any static and run-time semantics.
+ -- A call to analyze a marker is ignored because the node does not
+ -- have any static and run-time semantics.
- when N_Call_Marker =>
+ when N_Call_Marker
+ | N_Variable_Reference_Marker
+ =>
null;
-- A call to analyze the Empty node is an error, but most likely it
diff --git a/gcc/ada/sem_aggr.adb b/gcc/ada/sem_aggr.adb
index 6c29b38b93a..7d6ae41c49e 100644
--- a/gcc/ada/sem_aggr.adb
+++ b/gcc/ada/sem_aggr.adb
@@ -418,6 +418,13 @@ package body Sem_Aggr is
-- array of characters is expected. This procedure simply rewrites the
-- string as an aggregate, prior to resolution.
+ ---------------------------------
+ -- Delta aggregate processing --
+ ---------------------------------
+
+ procedure Resolve_Delta_Array_Aggregate (N : Node_Id; Typ : Entity_Id);
+ procedure Resolve_Delta_Record_Aggregate (N : Node_Id; Typ : Entity_Id);
+
------------------------
-- Array_Aggr_Subtype --
------------------------
@@ -2758,10 +2765,196 @@ package body Sem_Aggr is
-----------------------------
procedure Resolve_Delta_Aggregate (N : Node_Id; Typ : Entity_Id) is
- Base : constant Node_Id := Expression (N);
+ Base : constant Node_Id := Expression (N);
+
+ begin
+ if not Is_Composite_Type (Typ) then
+ Error_Msg_N ("not a composite type", N);
+ end if;
+
+ Analyze_And_Resolve (Base, Typ);
+
+ if Is_Array_Type (Typ) then
+ Resolve_Delta_Array_Aggregate (N, Typ);
+ else
+ Resolve_Delta_Record_Aggregate (N, Typ);
+ end if;
+
+ Set_Etype (N, Typ);
+ end Resolve_Delta_Aggregate;
+
+ -----------------------------------
+ -- Resolve_Delta_Array_Aggregate --
+ -----------------------------------
+
+ procedure Resolve_Delta_Array_Aggregate (N : Node_Id; Typ : Entity_Id) is
Deltas : constant List_Id := Component_Associations (N);
+ Assoc : Node_Id;
+ Choice : Node_Id;
+ Index_Type : Entity_Id;
+
+ begin
+ Index_Type := Etype (First_Index (Typ));
+
+ Assoc := First (Deltas);
+ while Present (Assoc) loop
+ if Nkind (Assoc) = N_Iterated_Component_Association then
+ Choice := First (Choice_List (Assoc));
+ while Present (Choice) loop
+ if Nkind (Choice) = N_Others_Choice then
+ Error_Msg_N
+ ("others not allowed in delta aggregate", Choice);
+
+ else
+ Analyze_And_Resolve (Choice, Index_Type);
+ end if;
+
+ Next (Choice);
+ end loop;
+
+ declare
+ Id : constant Entity_Id := Defining_Identifier (Assoc);
+ Ent : constant Entity_Id :=
+ New_Internal_Entity
+ (E_Loop, Current_Scope, Sloc (Assoc), 'L');
+
+ begin
+ Set_Etype (Ent, Standard_Void_Type);
+ Set_Parent (Ent, Assoc);
+
+ if No (Scope (Id)) then
+ Enter_Name (Id);
+ Set_Etype (Id, Index_Type);
+ Set_Ekind (Id, E_Variable);
+ Set_Scope (Id, Ent);
+ end if;
+
+ Push_Scope (Ent);
+ Analyze_And_Resolve
+ (New_Copy_Tree (Expression (Assoc)), Component_Type (Typ));
+ End_Scope;
+ end;
+
+ else
+ Choice := First (Choice_List (Assoc));
+ while Present (Choice) loop
+ if Nkind (Choice) = N_Others_Choice then
+ Error_Msg_N
+ ("others not allowed in delta aggregate", Choice);
+
+ else
+ Analyze (Choice);
+
+ if Is_Entity_Name (Choice)
+ and then Is_Type (Entity (Choice))
+ then
+ -- Choice covers a range of values
+
+ if Base_Type (Entity (Choice)) /=
+ Base_Type (Index_Type)
+ then
+ Error_Msg_NE
+ ("choice does mat match index type of",
+ Choice, Typ);
+ end if;
+ else
+ Resolve (Choice, Index_Type);
+ end if;
+ end if;
+
+ Next (Choice);
+ end loop;
+
+ Analyze_And_Resolve (Expression (Assoc), Component_Type (Typ));
+ end if;
+
+ Next (Assoc);
+ end loop;
+ end Resolve_Delta_Array_Aggregate;
+
+ ------------------------------------
+ -- Resolve_Delta_Record_Aggregate --
+ ------------------------------------
+
+ procedure Resolve_Delta_Record_Aggregate (N : Node_Id; Typ : Entity_Id) is
+
+ -- Variables used to verify that discriminant-dependent components
+ -- appear in the same variant.
+
+ Comp_Ref : Entity_Id := Empty; -- init to avoid warning
+ Variant : Node_Id;
+
+ procedure Check_Variant (Id : Entity_Id);
+ -- If a given component of the delta aggregate appears in a variant
+ -- part, verify that it is within the same variant as that of previous
+ -- specified variant components of the delta.
+
function Get_Component_Type (Nam : Node_Id) return Entity_Id;
+ -- Locate component with a given name and return its type. If none found
+ -- report error.
+
+ function Nested_In (V1 : Node_Id; V2 : Node_Id) return Boolean;
+ -- Determine whether variant V1 is within variant V2
+
+ function Variant_Depth (N : Node_Id) return Integer;
+ -- Determine the distance of a variant to the enclosing type
+ -- declaration.
+
+ --------------------
+ -- Check_Variant --
+ --------------------
+
+ procedure Check_Variant (Id : Entity_Id) is
+ Comp : Entity_Id;
+ Comp_Variant : Node_Id;
+
+ begin
+ if not Has_Discriminants (Typ) then
+ return;
+ end if;
+
+ Comp := First_Entity (Typ);
+ while Present (Comp) loop
+ exit when Chars (Comp) = Chars (Id);
+ Next_Component (Comp);
+ end loop;
+
+ -- Find the variant, if any, whose component list includes the
+ -- component declaration.
+
+ Comp_Variant := Parent (Parent (List_Containing (Parent (Comp))));
+ if Nkind (Comp_Variant) = N_Variant then
+ if No (Variant) then
+ Variant := Comp_Variant;
+ Comp_Ref := Comp;
+
+ elsif Variant /= Comp_Variant then
+ declare
+ D1 : constant Integer := Variant_Depth (Variant);
+ D2 : constant Integer := Variant_Depth (Comp_Variant);
+
+ begin
+ if D1 = D2
+ or else
+ (D1 > D2 and then not Nested_In (Variant, Comp_Variant))
+ or else
+ (D2 > D1 and then not Nested_In (Comp_Variant, Variant))
+ then
+ pragma Assert (Present (Comp_Ref));
+ Error_Msg_Node_2 := Comp_Ref;
+ Error_Msg_NE
+ ("& and & appear in different variants", Id, Comp);
+
+ -- Otherwise retain the deeper variant for subsequent tests
+
+ elsif D2 > D1 then
+ Variant := Comp_Variant;
+ end if;
+ end;
+ end if;
+ end if;
+ end Check_Variant;
------------------------
-- Get_Component_Type --
@@ -2772,7 +2965,6 @@ package body Sem_Aggr is
begin
Comp := First_Entity (Typ);
-
while Present (Comp) loop
if Chars (Comp) = Chars (Nam) then
if Ekind (Comp) = E_Discriminant then
@@ -2789,113 +2981,76 @@ package body Sem_Aggr is
return Any_Type;
end Get_Component_Type;
- -- Local variables
-
- Assoc : Node_Id;
- Choice : Node_Id;
- Comp_Type : Entity_Id;
- Index_Type : Entity_Id;
+ ---------------
+ -- Nested_In --
+ ---------------
- -- Start of processing for Resolve_Delta_Aggregate
+ function Nested_In (V1, V2 : Node_Id) return Boolean is
+ Par : Node_Id;
- begin
- if not Is_Composite_Type (Typ) then
- Error_Msg_N ("not a composite type", N);
- end if;
+ begin
+ Par := Parent (V1);
+ while Nkind (Par) /= N_Full_Type_Declaration loop
+ if Par = V2 then
+ return True;
+ end if;
- Analyze_And_Resolve (Base, Typ);
+ Par := Parent (Par);
+ end loop;
- if Is_Array_Type (Typ) then
- Index_Type := Etype (First_Index (Typ));
- Assoc := First (Deltas);
- while Present (Assoc) loop
- if Nkind (Assoc) = N_Iterated_Component_Association then
- Choice := First (Choice_List (Assoc));
- while Present (Choice) loop
- if Nkind (Choice) = N_Others_Choice then
- Error_Msg_N
- ("others not allowed in delta aggregate", Choice);
+ return False;
+ end Nested_In;
- else
- Analyze_And_Resolve (Choice, Index_Type);
- end if;
+ -------------------
+ -- Variant_Depth --
+ -------------------
- Next (Choice);
- end loop;
+ function Variant_Depth (N : Node_Id) return Integer is
+ Depth : Integer;
+ Par : Node_Id;
- declare
- Id : constant Entity_Id := Defining_Identifier (Assoc);
- Ent : constant Entity_Id :=
- New_Internal_Entity
- (E_Loop, Current_Scope, Sloc (Assoc), 'L');
+ begin
+ Depth := 0;
+ Par := Parent (N);
+ while Nkind (Par) /= N_Full_Type_Declaration loop
+ Depth := Depth + 1;
+ Par := Parent (Par);
+ end loop;
- begin
- Set_Etype (Ent, Standard_Void_Type);
- Set_Parent (Ent, Assoc);
-
- if No (Scope (Id)) then
- Enter_Name (Id);
- Set_Etype (Id, Index_Type);
- Set_Ekind (Id, E_Variable);
- Set_Scope (Id, Ent);
- end if;
+ return Depth;
+ end Variant_Depth;
- Push_Scope (Ent);
- Analyze_And_Resolve
- (New_Copy_Tree (Expression (Assoc)), Component_Type (Typ));
- End_Scope;
- end;
+ -- Local variables
- else
- Choice := First (Choice_List (Assoc));
- while Present (Choice) loop
- if Nkind (Choice) = N_Others_Choice then
- Error_Msg_N
- ("others not allowed in delta aggregate", Choice);
+ Deltas : constant List_Id := Component_Associations (N);
- else
- Analyze (Choice);
- if Is_Entity_Name (Choice)
- and then Is_Type (Entity (Choice))
- then
- -- Choice covers a range of values.
- if Base_Type (Entity (Choice)) /=
- Base_Type (Index_Type)
- then
- Error_Msg_NE
- ("choice does mat match index type of",
- Choice, Typ);
- end if;
- else
- Resolve (Choice, Index_Type);
- end if;
- end if;
+ Assoc : Node_Id;
+ Choice : Node_Id;
+ Comp_Type : Entity_Id := Empty; -- init to avoid warning
- Next (Choice);
- end loop;
+ -- Start of processing for Resolve_Delta_Record_Aggregate
- Analyze_And_Resolve (Expression (Assoc), Component_Type (Typ));
- end if;
+ begin
+ Variant := Empty;
- Next (Assoc);
- end loop;
+ Assoc := First (Deltas);
+ while Present (Assoc) loop
+ Choice := First (Choice_List (Assoc));
+ while Present (Choice) loop
+ Comp_Type := Get_Component_Type (Choice);
- else
- Assoc := First (Deltas);
- while Present (Assoc) loop
- Choice := First (Choice_List (Assoc));
- while Present (Choice) loop
- Comp_Type := Get_Component_Type (Choice);
- Next (Choice);
- end loop;
+ if Comp_Type /= Any_Type then
+ Check_Variant (Choice);
+ end if;
- Analyze_And_Resolve (Expression (Assoc), Comp_Type);
- Next (Assoc);
+ Next (Choice);
end loop;
- end if;
- Set_Etype (N, Typ);
- end Resolve_Delta_Aggregate;
+ pragma Assert (Present (Comp_Type));
+ Analyze_And_Resolve (Expression (Assoc), Comp_Type);
+ Next (Assoc);
+ end loop;
+ end Resolve_Delta_Record_Aggregate;
---------------------------------
-- Resolve_Extension_Aggregate --
diff --git a/gcc/ada/sem_attr.adb b/gcc/ada/sem_attr.adb
index 5aef17df8ec..cc4e39c50d8 100644
--- a/gcc/ada/sem_attr.adb
+++ b/gcc/ada/sem_attr.adb
@@ -231,10 +231,10 @@ package body Sem_Attr is
E1 : Node_Id;
E2 : Node_Id;
- P_Type : Entity_Id;
+ P_Type : Entity_Id := Empty;
-- Type of prefix after analysis
- P_Base_Type : Entity_Id;
+ P_Base_Type : Entity_Id := Empty;
-- Base type of prefix after analysis
-----------------------
@@ -419,7 +419,7 @@ package body Sem_Attr is
-- required error messages.
procedure Error_Attr_P (Msg : String);
- pragma No_Return (Error_Attr);
+ pragma No_Return (Error_Attr_P);
-- Like Error_Attr, but error is posted at the start of the prefix
procedure Legal_Formal_Attribute;
@@ -446,7 +446,9 @@ package body Sem_Attr is
-- node in the aspect case).
procedure Unexpected_Argument (En : Node_Id);
- -- Signal unexpected attribute argument (En is the argument)
+ pragma No_Return (Unexpected_Argument);
+ -- Signal unexpected attribute argument (En is the argument), and then
+ -- raises Bad_Attribute to avoid any further semantic processing.
procedure Validate_Non_Static_Attribute_Function_Call;
-- Called when processing an attribute that is a function call to a
@@ -1108,8 +1110,10 @@ package body Sem_Attr is
-- node Nod is within enclosing node Encl_Nod.
procedure Placement_Error;
+ pragma No_Return (Placement_Error);
-- Emit a general error when the attributes does not appear in a
- -- postcondition-like aspect or pragma.
+ -- postcondition-like aspect or pragma, and then raises Bad_Attribute
+ -- to avoid any further semantic processing.
------------------------------
-- Check_Placement_In_Check --
diff --git a/gcc/ada/sem_ch12.adb b/gcc/ada/sem_ch12.adb
index ac5035fd1bc..23f9ca7c223 100644
--- a/gcc/ada/sem_ch12.adb
+++ b/gcc/ada/sem_ch12.adb
@@ -3466,9 +3466,9 @@ package body Sem_Ch12 is
------------------------------------------
procedure Analyze_Generic_Package_Declaration (N : Node_Id) is
- Loc : constant Source_Ptr := Sloc (N);
- Decls : constant List_Id :=
- Visible_Declarations (Specification (N));
+ Decls : constant List_Id := Visible_Declarations (Specification (N));
+ Loc : constant Source_Ptr := Sloc (N);
+
Decl : Node_Id;
Id : Entity_Id;
New_N : Node_Id;
@@ -3492,9 +3492,20 @@ package body Sem_Ch12 is
Name =>
Make_Identifier (Loc, Chars (Defining_Entity (N))));
+ -- The declaration is inserted before other declarations, but before
+ -- pragmas that may be library-unit pragmas and must appear before other
+ -- declarations. The pragma Compile_Time_Error is not in this class, and
+ -- may contain an expression that includes such a qualified name, so the
+ -- renaming declaration must appear before it.
+
+ -- Are there other pragmas that require this special handling ???
+
if Present (Decls) then
Decl := First (Decls);
- while Present (Decl) and then Nkind (Decl) = N_Pragma loop
+ while Present (Decl)
+ and then Nkind (Decl) = N_Pragma
+ and then Get_Pragma_Id (Decl) /= Pragma_Compile_Time_Error
+ loop
Next (Decl);
end loop;
@@ -4761,7 +4772,7 @@ package body Sem_Ch12 is
Use_Clauses : array (1 .. Scope_Stack_Depth) of Node_Id;
Curr_Scope : Entity_Id := Empty;
- List : Elist_Id;
+ List : Elist_Id := No_Elist; -- init to avoid warning
N_Instances : Nat := 0;
Num_Inner : Nat := 0;
Num_Scopes : Nat := 0;
@@ -5130,13 +5141,14 @@ package body Sem_Ch12 is
is
Loc : constant Source_Ptr := Sloc (N);
Gen_Id : constant Node_Id := Name (N);
+ Errs : constant Nat := Serious_Errors_Detected;
Anon_Id : constant Entity_Id :=
Make_Defining_Identifier (Sloc (Defining_Entity (N)),
Chars => New_External_Name
(Chars (Defining_Entity (N)), 'R'));
- Act_Decl_Id : Entity_Id;
+ Act_Decl_Id : Entity_Id := Empty; -- init to avoid warning
Act_Decl : Node_Id;
Act_Spec : Node_Id;
Act_Tree : Node_Id;
@@ -5723,7 +5735,9 @@ package body Sem_Ch12 is
end if;
<<Leave>>
- if Has_Aspects (N) then
+ -- Analyze aspects in declaration if no errors appear in the instance.
+
+ if Has_Aspects (N) and then Serious_Errors_Detected = Errs then
Analyze_Aspect_Specifications (N, Act_Decl_Id);
end if;
@@ -5895,8 +5909,7 @@ package body Sem_Ch12 is
Present (Next_Formal (First_Formal (Formal_Subp)));
Decl : Node_Id;
- Expr : Node_Id;
- pragma Warnings (Off, Expr);
+ Expr : Node_Id := Empty;
F1, F2 : Entity_Id;
Func : Entity_Id;
Op_Name : Name_Id;
diff --git a/gcc/ada/sem_ch13.adb b/gcc/ada/sem_ch13.adb
index 564ff0dfc0a..83d31081fac 100644
--- a/gcc/ada/sem_ch13.adb
+++ b/gcc/ada/sem_ch13.adb
@@ -1360,6 +1360,8 @@ package body Sem_Ch13 is
-----------------------------------
procedure Analyze_Aspect_Specifications (N : Node_Id; E : Entity_Id) is
+ pragma Assert (Present (E));
+
procedure Decorate (Asp : Node_Id; Prag : Node_Id);
-- Establish linkages between an aspect and its corresponding pragma
@@ -1578,6 +1580,7 @@ package body Sem_Ch13 is
Ent : Node_Id;
L : constant List_Id := Aspect_Specifications (N);
+ pragma Assert (Present (L));
Ins_Node : Node_Id := N;
-- Insert pragmas/attribute definition clause after this node when no
@@ -1605,8 +1608,6 @@ package body Sem_Ch13 is
-- of visibility for the expression analysis. Thus, we just insert
-- the pragma after the node N.
- pragma Assert (Present (L));
-
-- Loop through aspects
Aspect := First (L);
@@ -1906,9 +1907,6 @@ package body Sem_Ch13 is
-----------------------------------------
procedure Analyze_Aspect_Implicit_Dereference is
- Disc : Entity_Id;
- Parent_Disc : Entity_Id;
-
begin
if not Is_Type (E) or else not Has_Discriminants (E) then
Error_Msg_N
@@ -1924,45 +1922,56 @@ package body Sem_Ch13 is
-- Missing synchronized types???
- Disc := First_Discriminant (E);
- while Present (Disc) loop
- if Chars (Expr) = Chars (Disc)
- and then Ekind_In (Etype (Disc),
- E_Anonymous_Access_Subprogram_Type,
- E_Anonymous_Access_Type)
- then
- Set_Has_Implicit_Dereference (E);
- Set_Has_Implicit_Dereference (Disc);
- exit;
- end if;
+ declare
+ Disc : Entity_Id := First_Discriminant (E);
+ begin
+ while Present (Disc) loop
+ if Chars (Expr) = Chars (Disc)
+ and then Ekind_In
+ (Etype (Disc),
+ E_Anonymous_Access_Subprogram_Type,
+ E_Anonymous_Access_Type)
+ then
+ Set_Has_Implicit_Dereference (E);
+ Set_Has_Implicit_Dereference (Disc);
+ exit;
+ end if;
- Next_Discriminant (Disc);
- end loop;
+ Next_Discriminant (Disc);
+ end loop;
- -- Error if no proper access discriminant
+ -- Error if no proper access discriminant
- if No (Disc) then
- Error_Msg_NE ("not an access discriminant of&", Expr, E);
- return;
- end if;
- end if;
+ if Present (Disc) then
+ -- For a type extension, check whether parent has
+ -- a reference discriminant, to verify that use is
+ -- proper.
- -- For a type extension, check whether parent has a
- -- reference discriminant, to verify that use is proper.
-
- if Is_Derived_Type (E)
- and then Has_Discriminants (Etype (E))
- then
- Parent_Disc := Get_Reference_Discriminant (Etype (E));
+ if Is_Derived_Type (E)
+ and then Has_Discriminants (Etype (E))
+ then
+ declare
+ Parent_Disc : constant Entity_Id :=
+ Get_Reference_Discriminant (Etype (E));
+ begin
+ if Present (Parent_Disc)
+ and then Corresponding_Discriminant (Disc) /=
+ Parent_Disc
+ then
+ Error_Msg_N
+ ("reference discriminant does not match "
+ & "discriminant of parent type", Expr);
+ end if;
+ end;
+ end if;
- if Present (Parent_Disc)
- and then Corresponding_Discriminant (Disc) /= Parent_Disc
- then
- Error_Msg_N
- ("reference discriminant does not match discriminant "
- & "of parent type", Expr);
- end if;
+ else
+ Error_Msg_NE
+ ("not an access discriminant of&", Expr, E);
+ end if;
+ end;
end if;
+
end Analyze_Aspect_Implicit_Dereference;
-----------------------
@@ -6529,7 +6538,7 @@ package body Sem_Ch13 is
Max : Uint;
-- Minimum and maximum values of entries
- Max_Node : Node_Id;
+ Max_Node : Node_Id := Empty; -- init to avoid warning
-- Pointer to node for literal providing max value
begin
@@ -8384,7 +8393,7 @@ package body Sem_Ch13 is
-- This is the expression for the result of the function. It is
-- is build by connecting the component predicates with AND THEN.
- Expr_M : Node_Id;
+ Expr_M : Node_Id := Empty; -- init to avoid warning
-- This is the corresponding return expression for the Predicate_M
-- function. It differs in that raise expressions are marked for
-- special expansion (see Process_REs).
@@ -9925,7 +9934,7 @@ package body Sem_Ch13 is
-- this tagged type and the parent component. Tagged_Parent will point
-- to this parent type. For all other cases, Tagged_Parent is Empty.
- Parent_Last_Bit : Uint;
+ Parent_Last_Bit : Uint := No_Uint; -- init to avoid warning
-- Relevant only if Tagged_Parent is set, Parent_Last_Bit indicates the
-- last bit position for any field in the parent type. We only need to
-- check overlap for fields starting below this point.
@@ -14317,7 +14326,7 @@ package body Sem_Ch13 is
if Source_Siz /= Target_Siz then
Error_Msg
("?z?types for unchecked conversion have different sizes!",
- Eloc);
+ Eloc, Act_Unit);
if All_Errors_Mode then
Error_Msg_Name_1 := Chars (Source);
@@ -14353,17 +14362,17 @@ package body Sem_Ch13 is
if Bytes_Big_Endian then
Error_Msg
("\?z?target value will include ^ undefined "
- & "low order bits!", Eloc);
+ & "low order bits!", Eloc, Act_Unit);
else
Error_Msg
("\?z?target value will include ^ undefined "
- & "high order bits!", Eloc);
+ & "high order bits!", Eloc, Act_Unit);
end if;
else
Error_Msg
("\?z?^ trailing bits of target value will be "
- & "undefined!", Eloc);
+ & "undefined!", Eloc, Act_Unit);
end if;
else pragma Assert (Source_Siz > Target_Siz);
@@ -14371,17 +14380,17 @@ package body Sem_Ch13 is
if Bytes_Big_Endian then
Error_Msg
("\?z?^ low order bits of source will be "
- & "ignored!", Eloc);
+ & "ignored!", Eloc, Act_Unit);
else
Error_Msg
("\?z?^ high order bits of source will be "
- & "ignored!", Eloc);
+ & "ignored!", Eloc, Act_Unit);
end if;
else
Error_Msg
("\?z?^ trailing bits of source will be "
- & "ignored!", Eloc);
+ & "ignored!", Eloc, Act_Unit);
end if;
end if;
end if;
@@ -14435,10 +14444,10 @@ package body Sem_Ch13 is
Error_Msg_Node_2 := D_Source;
Error_Msg
("?z?alignment of & (^) is stricter than "
- & "alignment of & (^)!", Eloc);
+ & "alignment of & (^)!", Eloc, Act_Unit);
Error_Msg
("\?z?resulting access value may have invalid "
- & "alignment!", Eloc);
+ & "alignment!", Eloc, Act_Unit);
end if;
end;
end if;
diff --git a/gcc/ada/sem_ch2.adb b/gcc/ada/sem_ch2.adb
index f20a518d4d2..904a8f0f74f 100644
--- a/gcc/ada/sem_ch2.adb
+++ b/gcc/ada/sem_ch2.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 1992-2012, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -68,9 +68,7 @@ package body Sem_Ch2 is
-- this is the result of some kind of previous error generating a
-- junk identifier.
- if Chars (N) in Error_Name_Or_No_Name
- and then Total_Errors_Detected /= 0
- then
+ if not Is_Valid_Name (Chars (N)) and then Total_Errors_Detected /= 0 then
return;
else
Find_Direct_Name (N);
diff --git a/gcc/ada/sem_ch3.adb b/gcc/ada/sem_ch3.adb
index 1e3b78ccf2f..9dc39028033 100644
--- a/gcc/ada/sem_ch3.adb
+++ b/gcc/ada/sem_ch3.adb
@@ -2818,19 +2818,23 @@ package body Sem_Ch3 is
if Present (L) then
Context := Parent (L);
- -- Analyze the contracts of packages and their bodies
+ -- Certain contract annocations have forward visibility semantics and
+ -- must be analyzed after all declarative items have been processed.
+ -- This timing ensures that entities referenced by such contracts are
+ -- visible.
- if Nkind (Context) = N_Package_Specification
- and then L = Visible_Declarations (Context)
- then
- Analyze_Package_Contract (Defining_Entity (Context));
+ -- Analyze the contract of an immediately enclosing package spec or
+ -- body first because other contracts may depend on its information.
- elsif Nkind (Context) = N_Package_Body then
+ if Nkind (Context) = N_Package_Body then
Analyze_Package_Body_Contract (Defining_Entity (Context));
+
+ elsif Nkind (Context) = N_Package_Specification then
+ Analyze_Package_Contract (Defining_Entity (Context));
end if;
- -- Analyze the contracts of various constructs now due to the delayed
- -- visibility needs of their aspects and pragmas.
+ -- Analyze the contracts of various constructs in the declarative
+ -- list.
Analyze_Contracts (L);
@@ -2848,13 +2852,13 @@ package body Sem_Ch3 is
Remove_Visible_Refinements (Corresponding_Spec (Context));
Remove_Partial_Visible_Refinements (Corresponding_Spec (Context));
- elsif Nkind (Context) = N_Package_Declaration then
+ elsif Nkind (Context) = N_Package_Specification then
-- Partial state refinements are visible up to the end of the
-- package spec declarations. Hide the partial state refinements
-- from visibility to restore the original state conditions.
- Remove_Partial_Visible_Refinements (Corresponding_Spec (Context));
+ Remove_Partial_Visible_Refinements (Defining_Entity (Context));
end if;
-- Verify that all abstract states found in any package declared in
@@ -6639,7 +6643,7 @@ package body Sem_Ch3 is
Tdef : constant Node_Id := Type_Definition (N);
Indic : constant Node_Id := Subtype_Indication (Tdef);
Parent_Base : constant Entity_Id := Base_Type (Parent_Type);
- Implicit_Base : Entity_Id;
+ Implicit_Base : Entity_Id := Empty;
New_Indic : Node_Id;
procedure Make_Implicit_Base;
@@ -6751,7 +6755,7 @@ package body Sem_Ch3 is
N_Subtype_Indication;
D_Constraint : Node_Id;
- New_Constraint : Elist_Id;
+ New_Constraint : Elist_Id := No_Elist;
Old_Disc : Entity_Id;
New_Disc : Entity_Id;
New_N : Node_Id;
diff --git a/gcc/ada/sem_ch4.adb b/gcc/ada/sem_ch4.adb
index 538023524e3..d13140fb135 100644
--- a/gcc/ada/sem_ch4.adb
+++ b/gcc/ada/sem_ch4.adb
@@ -339,9 +339,8 @@ package body Sem_Ch4 is
--------------------------
procedure List_Operand_Interps (Opnd : Node_Id) is
- Nam : Node_Id;
- pragma Warnings (Off, Nam);
- Err : Node_Id := N;
+ Nam : Node_Id := Empty;
+ Err : Node_Id := N;
begin
if Is_Overloaded (Opnd) then
@@ -413,13 +412,46 @@ package body Sem_Ch4 is
-- Analyze_Aggregate --
-----------------------
- -- Most of the analysis of Aggregates requires that the type be known,
- -- and is therefore put off until resolution.
+ -- Most of the analysis of Aggregates requires that the type be known, and
+ -- is therefore put off until resolution of the context. Delta aggregates
+ -- have a base component that determines the enclosing aggregate type so
+ -- its type can be ascertained earlier. This also allows delta aggregates
+ -- to appear in the context of a record type with a private extension, as
+ -- per the latest update of AI12-0127.
procedure Analyze_Aggregate (N : Node_Id) is
begin
if No (Etype (N)) then
- Set_Etype (N, Any_Composite);
+ if Nkind (N) = N_Delta_Aggregate then
+ declare
+ Base : constant Node_Id := Expression (N);
+
+ I : Interp_Index;
+ It : Interp;
+
+ begin
+ Analyze (Base);
+
+ -- If the base is overloaded, propagate interpretations to the
+ -- enclosing aggregate.
+
+ if Is_Overloaded (Base) then
+ Get_First_Interp (Base, I, It);
+ Set_Etype (N, Any_Type);
+
+ while Present (It.Nam) loop
+ Add_One_Interp (N, It.Typ, It.Typ);
+ Get_Next_Interp (I, It);
+ end loop;
+
+ else
+ Set_Etype (N, Etype (Base));
+ end if;
+ end;
+
+ else
+ Set_Etype (N, Any_Composite);
+ end if;
end if;
end Analyze_Aggregate;
@@ -1043,12 +1075,11 @@ package body Sem_Ch4 is
else
declare
- Outermost : Node_Id;
+ Outermost : Node_Id := Empty; -- init to avoid warning
P : Node_Id := N;
begin
while Present (P) loop
-
-- For object declarations we can climb to the node from
-- its object definition branch or from its initializing
-- expression. We prefer to mark the child node as the
@@ -1063,7 +1094,7 @@ package body Sem_Ch4 is
Outermost := P;
end if;
- -- Avoid climbing more than needed!
+ -- Avoid climbing more than needed
exit when Stop_Subtree_Climbing (Nkind (P))
or else (Nkind (P) = N_Range
@@ -1488,6 +1519,30 @@ package body Sem_Ch4 is
and then Present (Non_Limited_View (Etype (N)))
then
Set_Etype (N, Non_Limited_View (Etype (N)));
+
+ -- If there is no completion for the type, this may be because
+ -- there is only a limited view of it and there is nothing in
+ -- the context of the current unit that has required a regular
+ -- compilation of the unit containing the type. We recognize
+ -- this unusual case by the fact that that unit is not analyzed.
+ -- Note that the call being analyzed is in a different unit from
+ -- the function declaration, and nothing indicates that the type
+ -- is a limited view.
+
+ elsif Ekind (Scope (Etype (N))) = E_Package
+ and then Present (Limited_View (Scope (Etype (N))))
+ and then not Analyzed (Unit_Declaration_Node (Scope (Etype (N))))
+ then
+ Error_Msg_NE
+ ("cannot call function that returns limited view of}",
+ N, Etype (N));
+
+ Error_Msg_NE
+ ("\there must be a regular with_clause for package & in the "
+ & "current unit, or in some unit in its context",
+ N, Scope (Etype (N)));
+
+ Set_Etype (N, Any_Type);
end if;
end if;
end if;
@@ -1667,11 +1722,11 @@ package body Sem_Ch4 is
else
Analyze_Choices (Alternatives (N), Exp_Type);
Check_Choices (N, Alternatives (N), Exp_Type, Others_Present);
- end if;
- if Exp_Type = Universal_Integer and then not Others_Present then
- Error_Msg_N
- ("case on universal integer requires OTHERS choice", Expr);
+ if Exp_Type = Universal_Integer and then not Others_Present then
+ Error_Msg_N
+ ("case on universal integer requires OTHERS choice", Expr);
+ end if;
end if;
end Analyze_Case_Expression;
@@ -4988,10 +5043,13 @@ package body Sem_Ch4 is
end if;
end if;
- Next_Entity (Comp);
+ -- Do not examine private operations if not within scope of
+ -- the synchronized type.
+
exit when not In_Scope
and then
Comp = First_Private_Entity (Base_Type (Prefix_Type));
+ Next_Entity (Comp);
end loop;
-- If the scope is a current instance, the prefix cannot be an
@@ -8649,7 +8707,8 @@ package body Sem_Ch4 is
else
-- The type of the subprogram may be a limited view obtained
-- transitively from another unit. If full view is available,
- -- use it to analyze call.
+ -- use it to analyze call. If there is no nonlimited view, then
+ -- this is diagnosed when analyzing the rewritten call.
declare
T : constant Entity_Id := Etype (Subprog);
@@ -9094,9 +9153,8 @@ package body Sem_Ch4 is
declare
Dup_Call_Node : constant Node_Id := New_Copy (New_Call_Node);
- CW_Result : Boolean;
- Prim_Result : Boolean;
- pragma Unreferenced (CW_Result);
+ Ignore : Boolean;
+ Prim_Result : Boolean := False;
begin
if not CW_Test_Only then
@@ -9111,7 +9169,7 @@ package body Sem_Ch4 is
-- was found in order to report ambiguous calls.
if not Prim_Result then
- CW_Result :=
+ Ignore :=
Try_Class_Wide_Operation
(Call_Node => New_Call_Node,
Node_To_Replace => Node_To_Replace);
@@ -9121,7 +9179,7 @@ package body Sem_Ch4 is
-- decoration if there is no ambiguity).
else
- CW_Result :=
+ Ignore :=
Try_Class_Wide_Operation
(Call_Node => Dup_Call_Node,
Node_To_Replace => Node_To_Replace);
diff --git a/gcc/ada/sem_ch5.adb b/gcc/ada/sem_ch5.adb
index 10002ea08c2..14cf2e5a732 100644
--- a/gcc/ada/sem_ch5.adb
+++ b/gcc/ada/sem_ch5.adb
@@ -391,7 +391,8 @@ package body Sem_Ch5 is
T1 : Entity_Id;
T2 : Entity_Id;
- Save_Full_Analysis : Boolean;
+ Save_Full_Analysis : Boolean := False;
+ -- Force initialization to facilitate static analysis
Saved_GM : constant Ghost_Mode_Type := Ghost_Mode;
-- Save the Ghost mode to restore on exit
diff --git a/gcc/ada/sem_ch6.adb b/gcc/ada/sem_ch6.adb
index 4f719e9b81c..a6d70e5b597 100644
--- a/gcc/ada/sem_ch6.adb
+++ b/gcc/ada/sem_ch6.adb
@@ -1039,7 +1039,7 @@ package body Sem_Ch6 is
---------------------
Expr : Node_Id;
- Obj_Decl : Node_Id;
+ Obj_Decl : Node_Id := Empty;
-- Start of processing for Analyze_Function_Return
@@ -1190,13 +1190,16 @@ package body Sem_Ch6 is
-- Case of Expr present
- if Present (Expr)
+ if Present (Expr) then
- -- Defend against previous errors
+ -- Defend against previous errors
+
+ if Nkind (Expr) = N_Empty
+ or else No (Etype (Expr))
+ then
+ return;
+ end if;
- and then Nkind (Expr) /= N_Empty
- and then Present (Etype (Expr))
- then
-- Apply constraint check. Note that this is done before the implicit
-- conversion of the expression done for anonymous access types to
-- ensure correct generation of the null-excluding check associated
@@ -1510,6 +1513,7 @@ package body Sem_Ch6 is
Process_End_Label (Handled_Statement_Sequence (N), 't', Current_Scope);
Update_Use_Clause_Chain;
+ Validate_Categorization_Dependency (N, Gen_Id);
End_Scope;
Check_Subprogram_Order (N);
@@ -3456,7 +3460,7 @@ package body Sem_Ch6 is
-- Start of processing for Analyze_Subprogram_Body_Helper
begin
- -- A [generic] subprogram body "freezes" the contract of the nearest
+ -- A [generic] subprogram body freezes the contract of the nearest
-- enclosing package body and all other contracts encountered in the
-- same declarative part up to and excluding the subprogram body:
@@ -3469,17 +3473,17 @@ package body Sem_Ch6 is
-- with Refined_Depends => (Input => Constit) ...
-- This ensures that any annotations referenced by the contract of the
- -- [generic] subprogram body are available. This form of "freezing" is
+ -- [generic] subprogram body are available. This form of freezing is
-- decoupled from the usual Freeze_xxx mechanism because it must also
-- work in the context of generics where normal freezing is disabled.
- -- Only bodies coming from source should cause this type of "freezing".
+ -- Only bodies coming from source should cause this type of freezing.
-- Expression functions that act as bodies and complete an initial
-- declaration must be included in this category, hence the use of
-- Original_Node.
if Comes_From_Source (Original_Node (N)) then
- Analyze_Previous_Contracts (N);
+ Freeze_Previous_Contracts (N);
end if;
-- Generic subprograms are handled separately. They always have a
@@ -4354,7 +4358,7 @@ package body Sem_Ch6 is
end if;
end if;
- -- A subprogram body "freezes" its own contract. Analyze the contract
+ -- A subprogram body freezes its own contract. Analyze the contract
-- after the declarations of the body have been processed as pragmas
-- are now chained on the contract of the subprogram body.
@@ -10118,7 +10122,6 @@ package body Sem_Ch6 is
function Visible_Part_Type (T : Entity_Id) return Boolean is
P : constant Node_Id := Unit_Declaration_Node (Scope (T));
- N : Node_Id;
begin
-- If the entity is a private type, then it must be declared in a
@@ -10126,34 +10129,19 @@ package body Sem_Ch6 is
if Ekind (T) in Private_Kind then
return True;
- end if;
-
- -- Otherwise, we traverse the visible part looking for its
- -- corresponding declaration. We cannot use the declaration
- -- node directly because in the private part the entity of a
- -- private type is the one in the full view, which does not
- -- indicate that it is the completion of something visible.
-
- N := First (Visible_Declarations (Specification (P)));
- while Present (N) loop
- if Nkind (N) = N_Full_Type_Declaration
- and then Present (Defining_Identifier (N))
- and then T = Defining_Identifier (N)
- then
- return True;
- elsif Nkind_In (N, N_Private_Type_Declaration,
- N_Private_Extension_Declaration)
- and then Present (Defining_Identifier (N))
- and then T = Full_View (Defining_Identifier (N))
- then
- return True;
- end if;
+ elsif Is_Type (T) and then Has_Private_Declaration (T) then
+ return True;
- Next (N);
- end loop;
+ elsif Is_List_Member (Declaration_Node (T))
+ and then List_Containing (Declaration_Node (T)) =
+ Visible_Declarations (Specification (P))
+ then
+ return True;
- return False;
+ else
+ return False;
+ end if;
end Visible_Part_Type;
-- Start of processing for Check_For_Primitive_Subprogram
diff --git a/gcc/ada/sem_ch7.adb b/gcc/ada/sem_ch7.adb
index dc00cf9f249..f50b8669529 100644
--- a/gcc/ada/sem_ch7.adb
+++ b/gcc/ada/sem_ch7.adb
@@ -707,9 +707,9 @@ package body Sem_Ch7 is
end if;
end if;
- -- A [generic] package body "freezes" the contract of the nearest
- -- enclosing package body and all other contracts encountered in the
- -- same declarative part up to and excluding the package body:
+ -- A [generic] package body freezes the contract of the nearest
+ -- enclosing package body and all other contracts encountered in
+ -- the same declarative part up to and excluding the package body:
-- package body Nearest_Enclosing_Package
-- with Refined_State => (State => Constit)
@@ -726,21 +726,21 @@ package body Sem_Ch7 is
-- This ensures that any annotations referenced by the contract of a
-- [generic] subprogram body declared within the current package body
- -- are available. This form of "freezing" is decoupled from the usual
+ -- are available. This form of freezing is decoupled from the usual
-- Freeze_xxx mechanism because it must also work in the context of
-- generics where normal freezing is disabled.
- -- Only bodies coming from source should cause this type of "freezing".
+ -- Only bodies coming from source should cause this type of freezing.
-- Instantiated generic bodies are excluded because their processing is
-- performed in a separate compilation pass which lacks enough semantic
-- information with respect to contract analysis. It is safe to suppress
- -- the "freezing" of contracts in this case because this action already
+ -- the freezing of contracts in this case because this action already
-- took place at the end of the enclosing declarative part.
if Comes_From_Source (N)
and then not Is_Generic_Instance (Spec_Id)
then
- Analyze_Previous_Contracts (N);
+ Freeze_Previous_Contracts (N);
end if;
-- A package body is Ghost when the corresponding spec is Ghost. Set
@@ -876,10 +876,6 @@ package body Sem_Ch7 is
Declare_Inherited_Private_Subprograms (Spec_Id);
end if;
- -- A package body "freezes" the contract of its initial declaration.
- -- This analysis depends on attribute Corresponding_Spec being set. Only
- -- bodies coming from source shuld cause this type of "freezing".
-
if Present (Declarations (N)) then
Analyze_Declarations (Declarations (N));
Inspect_Deferred_Constant_Completion (Declarations (N));
diff --git a/gcc/ada/sem_ch8.adb b/gcc/ada/sem_ch8.adb
index bdc8aba1e1f..d8d5b7b5c04 100644
--- a/gcc/ada/sem_ch8.adb
+++ b/gcc/ada/sem_ch8.adb
@@ -5358,6 +5358,8 @@ package body Sem_Ch8 is
-- Local variables
+ Is_Assignment_LHS : constant Boolean := Is_LHS (N) = Yes;
+
Nested_Inst : Entity_Id := Empty;
-- The entity of a nested instance which appears within Inst (if any)
@@ -5895,9 +5897,20 @@ package body Sem_Ch8 is
<<Done>>
Check_Restriction_No_Use_Of_Entity (N);
- -- Save the scenario for later examination by the ABE Processing phase
+ -- Annotate the tree by creating a variable reference marker in case the
+ -- original variable reference is folded or optimized away. The variable
+ -- reference marker is automatically saved for later examination by the
+ -- ABE Processing phase. Variable references which act as actuals in a
+ -- call require special processing and are left to Resolve_Actuals. The
+ -- reference is a write when it appears on the left hand side of an
+ -- assignment.
- Record_Elaboration_Scenario (N);
+ if not Within_Subprogram_Call (N) then
+ Build_Variable_Reference_Marker
+ (N => N,
+ Read => not Is_Assignment_LHS,
+ Write => Is_Assignment_LHS);
+ end if;
end Find_Direct_Name;
------------------------
@@ -5969,8 +5982,10 @@ package body Sem_Ch8 is
-- Local variables
- Selector : constant Node_Id := Selector_Name (N);
- Candidate : Entity_Id := Empty;
+ Is_Assignment_LHS : constant Boolean := Is_LHS (N) = Yes;
+ Selector : constant Node_Id := Selector_Name (N);
+
+ Candidate : Entity_Id := Empty;
P_Name : Entity_Id;
Id : Entity_Id;
@@ -6529,9 +6544,20 @@ package body Sem_Ch8 is
Check_Restriction_No_Use_Of_Entity (N);
- -- Save the scenario for later examination by the ABE Processing phase
+ -- Annotate the tree by creating a variable reference marker in case the
+ -- original variable reference is folded or optimized away. The variable
+ -- reference marker is automatically saved for later examination by the
+ -- ABE Processing phase. Variable references which act as actuals in a
+ -- call require special processing and are left to Resolve_Actuals. The
+ -- reference is a write when it appears on the left hand side of an
+ -- assignment.
- Record_Elaboration_Scenario (N);
+ if not Within_Subprogram_Call (N) then
+ Build_Variable_Reference_Marker
+ (N => N,
+ Read => not Is_Assignment_LHS,
+ Write => Is_Assignment_LHS);
+ end if;
end Find_Expanded_Name;
--------------------
@@ -8294,6 +8320,7 @@ package body Sem_Ch8 is
procedure Mark_Use_Type (E : Entity_Id) is
Curr : Node_Id;
+ Base : Entity_Id;
begin
-- Ignore void types and unresolved string literals and primitives
@@ -8305,12 +8332,22 @@ package body Sem_Ch8 is
return;
end if;
+ -- Primitives with class-wide operands might additionally render
+ -- their base type's use_clauses effective - so do a recursive check
+ -- here.
+
+ Base := Base_Type (Etype (E));
+
+ if Ekind (Base) = E_Class_Wide_Type then
+ Mark_Use_Type (Base);
+ end if;
+
-- The package containing the type or operator function being used
-- may be in use as well, so mark any use_package_clauses for it as
-- effective. There are also additional sanity checks performed here
-- for ignoring previous errors.
- Mark_Use_Package (Scope (Base_Type (Etype (E))));
+ Mark_Use_Package (Scope (Base));
if Nkind (E) in N_Op
and then Present (Entity (E))
@@ -8319,7 +8356,7 @@ package body Sem_Ch8 is
Mark_Use_Package (Scope (Entity (E)));
end if;
- Curr := Current_Use_Clause (Base_Type (Etype (E)));
+ Curr := Current_Use_Clause (Base);
while Present (Curr)
and then not Is_Effective_Use_Clause (Curr)
loop
@@ -8371,7 +8408,9 @@ package body Sem_Ch8 is
or else Ekind_In (Id, E_Generic_Function,
E_Generic_Procedure))
and then (Is_Potentially_Use_Visible (Id)
- or else Is_Intrinsic_Subprogram (Id))
+ or else Is_Intrinsic_Subprogram (Id)
+ or else (Ekind_In (Id, E_Function, E_Procedure)
+ and then Is_Generic_Actual_Subprogram (Id)))
then
Mark_Parameters (Id);
end if;
@@ -9057,6 +9096,7 @@ package body Sem_Ch8 is
and then Comes_From_Source (Curr)
and then not Is_Effective_Use_Clause (Curr)
and then not In_Instance
+ and then not In_Inlined_Body
then
-- We are dealing with a potentially unused use_package_clause
@@ -9400,7 +9440,10 @@ package body Sem_Ch8 is
-- Warn about detected redundant clauses
- elsif In_Open_Scopes (P) and not Force then
+ elsif not Force
+ and then In_Open_Scopes (P)
+ and then not Is_Hidden_Open_Scope (P)
+ then
if Warn_On_Redundant_Constructs and then P = Current_Scope then
Error_Msg_NE -- CODEFIX
("& is already use-visible within itself?r?",
@@ -9865,6 +9908,7 @@ package body Sem_Ch8 is
and then not Spec_Reloaded_For_Body
and then not In_Instance
+ and then not In_Inlined_Body
then
-- The type already has a use clause
diff --git a/gcc/ada/sem_ch9.adb b/gcc/ada/sem_ch9.adb
index 199cd8a8c7a..766742297fa 100644
--- a/gcc/ada/sem_ch9.adb
+++ b/gcc/ada/sem_ch9.adb
@@ -1210,13 +1210,13 @@ package body Sem_Ch9 is
Entry_Name : Entity_Id;
begin
- -- An entry body "freezes" the contract of the nearest enclosing package
+ -- An entry body freezes the contract of the nearest enclosing package
-- body and all other contracts encountered in the same declarative part
-- up to and excluding the entry body. This ensures that any annotations
-- referenced by the contract of an entry or subprogram body declared
-- within the current protected body are available.
- Analyze_Previous_Contracts (N);
+ Freeze_Previous_Contracts (N);
Tasking_Used := True;
@@ -1794,14 +1794,14 @@ package body Sem_Ch9 is
-- Start of processing for Analyze_Protected_Body
begin
- -- A protected body "freezes" the contract of the nearest enclosing
+ -- A protected body freezes the contract of the nearest enclosing
-- package body and all other contracts encountered in the same
- -- declarative part up to and excluding the protected body. This ensures
- -- that any annotations referenced by the contract of an entry or
- -- subprogram body declared within the current protected body are
- -- available.
+ -- declarative part up to and excluding the protected body. This
+ -- ensures that any annotations referenced by the contract of an
+ -- entry or subprogram body declared within the current protected
+ -- body are available.
- Analyze_Previous_Contracts (N);
+ Freeze_Previous_Contracts (N);
Tasking_Used := True;
Set_Ekind (Body_Id, E_Protected_Body);
@@ -2287,7 +2287,7 @@ package body Sem_Ch9 is
Target_Obj : Node_Id := Empty;
Req_Scope : Entity_Id;
Outer_Ent : Entity_Id;
- Synch_Type : Entity_Id;
+ Synch_Type : Entity_Id := Empty;
begin
-- Preserve relevant elaboration-related attributes of the context which
@@ -2900,13 +2900,13 @@ package body Sem_Ch9 is
-- a single task, since Spec_Id is set to the task type).
begin
- -- A task body "freezes" the contract of the nearest enclosing package
+ -- A task body freezes the contract of the nearest enclosing package
-- body and all other contracts encountered in the same declarative part
-- up to and excluding the task body. This ensures that annotations
-- referenced by the contract of an entry or subprogram body declared
-- within the current protected body are available.
- Analyze_Previous_Contracts (N);
+ Freeze_Previous_Contracts (N);
Tasking_Used := True;
Set_Scope (Body_Id, Current_Scope);
@@ -3513,10 +3513,10 @@ package body Sem_Ch9 is
-- declarations. Search for the private type declaration.
declare
- Full_T_Ifaces : Elist_Id;
+ Full_T_Ifaces : Elist_Id := No_Elist;
Iface : Node_Id;
Priv_T : Entity_Id;
- Priv_T_Ifaces : Elist_Id;
+ Priv_T_Ifaces : Elist_Id := No_Elist;
begin
Priv_T := First_Entity (Scope (T));
diff --git a/gcc/ada/sem_dim.adb b/gcc/ada/sem_dim.adb
index a271ca55960..44166002ee9 100644
--- a/gcc/ada/sem_dim.adb
+++ b/gcc/ada/sem_dim.adb
@@ -903,13 +903,13 @@ package body Sem_Dim is
Choice : Node_Id;
Dim_Aggr : Node_Id;
Dim_Symbol : Node_Id;
- Dim_Symbols : Symbol_Array := No_Symbols;
- Dim_System : System_Type := Null_System;
- Position : Nat := 0;
+ Dim_Symbols : Symbol_Array := No_Symbols;
+ Dim_System : System_Type := Null_System;
+ Position : Dimension_Position := Invalid_Position;
Unit_Name : Node_Id;
- Unit_Names : Name_Array := No_Names;
+ Unit_Names : Name_Array := No_Names;
Unit_Symbol : Node_Id;
- Unit_Symbols : Symbol_Array := No_Symbols;
+ Unit_Symbols : Symbol_Array := No_Symbols;
Errors_Count : Nat;
-- Errors_Count is a count of errors detected by the compiler so far
@@ -949,13 +949,13 @@ package body Sem_Dim is
Dim_Aggr := First (Expressions (Aggr));
Errors_Count := Serious_Errors_Detected;
while Present (Dim_Aggr) loop
- Position := Position + 1;
-
- if Position > High_Position_Bound then
+ if Position = High_Position_Bound then
Error_Msg_N ("too many dimensions in system", Aggr);
exit;
end if;
+ Position := Position + 1;
+
if Nkind (Dim_Aggr) /= N_Aggregate then
Error_Msg_N ("aggregate expected", Dim_Aggr);
diff --git a/gcc/ada/sem_disp.adb b/gcc/ada/sem_disp.adb
index 974edd35679..4cc41e3acaa 100644
--- a/gcc/ada/sem_disp.adb
+++ b/gcc/ada/sem_disp.adb
@@ -404,7 +404,7 @@ package body Sem_Disp is
Func : Entity_Id;
Subp_Entity : Entity_Id;
Indeterm_Ancestor_Call : Boolean := False;
- Indeterm_Ctrl_Type : Entity_Id;
+ Indeterm_Ctrl_Type : Entity_Id := Empty; -- init to avoid warning
Static_Tag : Node_Id := Empty;
-- If a controlling formal has a statically tagged actual, the tag of
@@ -2371,16 +2371,26 @@ package body Sem_Disp is
-----------------------------------
function Is_Inherited_Public_Operation (Op : Entity_Id) return Boolean is
- Prim : constant Entity_Id := Alias (Op);
- Scop : constant Entity_Id := Scope (Prim);
Pack_Decl : Node_Id;
+ Prim : Entity_Id := Op;
+ Scop : Entity_Id := Prim;
begin
+ -- Locate the ultimate non-hidden alias entity
+
+ while Present (Alias (Prim)) and then not Is_Hidden (Alias (Prim)) loop
+ pragma Assert (Alias (Prim) /= Prim);
+ Prim := Alias (Prim);
+ Scop := Scope (Prim);
+ end loop;
+
if Comes_From_Source (Prim) and then Ekind (Scop) = E_Package then
Pack_Decl := Unit_Declaration_Node (Scop);
- return Nkind (Pack_Decl) = N_Package_Declaration
- and then List_Containing (Unit_Declaration_Node (Prim)) =
- Visible_Declarations (Specification (Pack_Decl));
+
+ return
+ Nkind (Pack_Decl) = N_Package_Declaration
+ and then List_Containing (Unit_Declaration_Node (Prim)) =
+ Visible_Declarations (Specification (Pack_Decl));
else
return False;
diff --git a/gcc/ada/sem_elab.adb b/gcc/ada/sem_elab.adb
index 8dec4280eb3..b3077adfbf8 100644
--- a/gcc/ada/sem_elab.adb
+++ b/gcc/ada/sem_elab.adb
@@ -26,6 +26,7 @@
with Atree; use Atree;
with Debug; use Debug;
with Einfo; use Einfo;
+with Elists; use Elists;
with Errout; use Errout;
with Exp_Ch11; use Exp_Ch11;
with Exp_Tss; use Exp_Tss;
@@ -67,7 +68,7 @@ package body Sem_Elab is
-- * Diagnose at compile-time or install run-time checks to prevent ABE
-- access to data and behaviour.
--
- -- The high level idea is to accurately diagnose ABE issues within a
+ -- The high-level idea is to accurately diagnose ABE issues within a
-- single unit because the ABE mechanism can inspect the whole unit.
-- As soon as the elaboration graph extends to an external unit, the
-- diagnostics stop because the body of the unit may not be available.
@@ -127,7 +128,7 @@ package body Sem_Elab is
-- * Declaration level - A type of enclosing level. A scenario or target is
-- at the declaration level when it appears within the declarations of a
-- block statement, entry body, subprogram body, or task body, ignoring
- -- enclosing packges.
+ -- enclosing packages.
--
-- * Generic library level - A type of enclosing level. A scenario or
-- target is at the generic library level if it appears in a generic
@@ -145,8 +146,8 @@ package body Sem_Elab is
-- the library level if it appears in a package library unit, ignoring
-- enclosng packages.
--
- -- * Non-library level encapsulator - A construct that cannot be elaborated
- -- on its own and requires elaboration by a top level scenario.
+ -- * Non-library-level encapsulator - A construct that cannot be elaborated
+ -- on its own and requires elaboration by a top-level scenario.
--
-- * Scenario - A construct or context which may be elaborated or executed
-- by elaboration code. The scenarios recognized by the ABE mechanism are
@@ -180,7 +181,7 @@ package body Sem_Elab is
--
-- - For task activation, the target is the task body
--
- -- * Top level scenario - A scenario which appears in a non-generic main
+ -- * Top-level scenario - A scenario which appears in a non-generic main
-- unit. Depending on the elaboration model is in effect, the following
-- addotional restrictions apply:
--
@@ -197,7 +198,7 @@ package body Sem_Elab is
-- The Recording phase coincides with the analysis/resolution phase of the
-- compiler. It has the following objectives:
--
- -- * Record all top level scenarios for examination by the Processing
+ -- * Record all top-level scenarios for examination by the Processing
-- phase.
--
-- Saving only a certain number of nodes improves the performance of
@@ -230,9 +231,9 @@ package body Sem_Elab is
-- and/or inlining of bodies, but before the removal of Ghost code. It has
-- the following objectives:
--
- -- * Examine all top level scenarios saved during the Recording phase
+ -- * Examine all top-level scenarios saved during the Recording phase
--
- -- The top level scenarios act as roots for depth-first traversal of
+ -- The top-level scenarios act as roots for depth-first traversal of
-- the call/instantiation/task activation graph. The traversal stops
-- when an outgoing edge leaves the main unit.
--
@@ -293,7 +294,7 @@ package body Sem_Elab is
-- | | |
-- | +--> Process_Variable_Assignment |
-- | | |
- -- | +--> Process_Variable_Read |
+ -- | +--> Process_Variable_Reference |
-- | |
-- +------------------------- Processing phase -------------------------+
@@ -419,8 +420,7 @@ package body Sem_Elab is
-- The following steps describe how to add a new elaboration scenario and
-- preserve the existing architecture.
--
- -- 1) If necessary, update predicates Is_Check_Emitting_Scenario and
- -- Is_Scenario.
+ -- 1) If necessary, update predicate Is_Scenario
--
-- 2) Add predicate Is_Suitable_xxx. Include a call to it in predicate
-- Is_Suitable_Scenario.
@@ -683,10 +683,6 @@ package body Sem_Elab is
-- variable.
type Variable_Attributes is record
- SPARK_Mode_On : Boolean;
- -- This flag is set when the variable appears in a region subject to
- -- pragma SPARK_Mode with value On, or starts one such region.
-
Unit_Id : Entity_Id;
-- This attribute denotes the entity of the compilation unit where the
-- variable resides.
@@ -715,8 +711,28 @@ package body Sem_Elab is
Hash => Elaboration_Context_Hash,
Equal => "=");
+ -- The following table stores a status flag for each top-level scenario
+ -- recorded in table Top_Level_Scenarios.
+
+ Recorded_Top_Level_Scenarios_Max : constant := 503;
+
+ type Recorded_Top_Level_Scenarios_Index is
+ range 0 .. Recorded_Top_Level_Scenarios_Max - 1;
+
+ function Recorded_Top_Level_Scenarios_Hash
+ (Key : Node_Id) return Recorded_Top_Level_Scenarios_Index;
+ -- Obtain the hash value of entity Key
+
+ package Recorded_Top_Level_Scenarios is new Simple_HTable
+ (Header_Num => Recorded_Top_Level_Scenarios_Index,
+ Element => Boolean,
+ No_Element => False,
+ Key => Node_Id,
+ Hash => Recorded_Top_Level_Scenarios_Hash,
+ Equal => "=");
+
-- The following table stores all active scenarios in a recursive traversal
- -- starting from a top level scenario. This table must be maintained in a
+ -- starting from a top-level scenario. This table must be maintained in a
-- FIFO fashion.
package Scenario_Stack is new Table.Table
@@ -727,7 +743,7 @@ package body Sem_Elab is
Table_Increment => 100,
Table_Name => "Scenario_Stack");
- -- The following table stores all top level scenario saved during the
+ -- The following table stores all top-level scenario saved during the
-- Recording phase. The contents of this table act as traversal roots
-- later in the Processing phase. This table must be maintained in a
-- LIFO fashion.
@@ -741,7 +757,7 @@ package body Sem_Elab is
Table_Name => "Top_Level_Scenarios");
-- The following table stores the bodies of all eligible scenarios visited
- -- during a traversal starting from a top level scenario. The contents of
+ -- during a traversal starting from a top-level scenario. The contents of
-- this table must be reset upon each new traversal.
Visited_Bodies_Max : constant := 511;
@@ -785,12 +801,15 @@ package body Sem_Elab is
-- string " in SPARK" is added to the end of the message.
procedure Ensure_Prior_Elaboration
- (N : Node_Id;
- Unit_Id : Entity_Id;
- In_Task_Body : Boolean);
+ (N : Node_Id;
+ Unit_Id : Entity_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Guarantee the elaboration of unit Unit_Id with respect to the main unit.
- -- N denotes the related scenario. Flag In_Task_Body should be set when the
- -- need for elaboration is initiated from a task body.
+ -- N denotes the related scenario. Flag In_Partial_Fin should be set when
+ -- the need for elaboration is initiated by a partial finalization routine.
+ -- Flag In_Task_Body should be set when the need for prior elaboration is
+ -- initiated from a task body.
procedure Ensure_Prior_Elaboration_Dynamic
(N : Node_Id;
@@ -867,7 +886,7 @@ package body Sem_Elab is
-- Return the code unit which contains arbitrary node or entity N. This
-- is the unit of the file which physically contains the related construct
-- denoted by N except when N is within an instantiation. In that case the
- -- unit is that of the top level instantiation.
+ -- unit is that of the top-level instantiation.
procedure Find_Elaborated_Units;
-- Populate table Elaboration_Context with all units which have prior
@@ -962,16 +981,16 @@ package body Sem_Elab is
-- information message, otherwise it emits an error. If flag In_SPARK
-- is set, then string " in SPARK" is added to the end of the message.
- procedure Info_Variable_Read
+ procedure Info_Variable_Reference
(Ref : Node_Id;
Var_Id : Entity_Id;
Info_Msg : Boolean;
In_SPARK : Boolean);
- pragma Inline (Info_Variable_Read);
- -- Output information concerning reference Ref which reads variable Var_Id.
- -- If flag Info_Msg is set, the routine emits an information message,
- -- otherwise it emits an error. If flag In_SPARK is set, then string " in
- -- SPARK" is added to the end of the message.
+ pragma Inline (Info_Variable_Reference);
+ -- Output information concerning reference Ref which mentions variable
+ -- Var_Id. If flag Info_Msg is set, the routine emits an information
+ -- message, otherwise it emits an error. If flag In_SPARK is set, then
+ -- string " in SPARK" is added to the end of the message.
function Insertion_Node (N : Node_Id; Ins_Nod : Node_Id) return Node_Id;
pragma Inline (Insertion_Node);
@@ -1019,11 +1038,6 @@ package body Sem_Elab is
pragma Inline (Is_Bodiless_Subprogram);
-- Determine whether subprogram Subp_Id will never have a body
- function Is_Check_Emitting_Scenario (N : Node_Id) return Boolean;
- pragma Inline (Is_Check_Emitting_Scenario);
- -- Determine whether arbitrary node N denotes a scenario which may emit a
- -- conditional ABE check.
-
function Is_Controlled_Proc
(Subp_Id : Entity_Id;
Subp_Nam : Name_Id) return Boolean;
@@ -1101,6 +1115,11 @@ package body Sem_Elab is
-- Determine whether entity Id denotes the protected or unprotected version
-- of a protected subprogram.
+ function Is_Recorded_Top_Level_Scenario (N : Node_Id) return Boolean;
+ pragma Inline (Is_Recorded_Top_Level_Scenario);
+ -- Determine whether arbitrary node is a recorded top-level scenario which
+ -- appears in table Top_Level_Scenarios.
+
function Is_Safe_Activation
(Call : Node_Id;
Task_Decl : Node_Id) return Boolean;
@@ -1163,10 +1182,10 @@ package body Sem_Elab is
-- Determine whether arbitrary node N denotes a suitable assignment for ABE
-- processing.
- function Is_Suitable_Variable_Read (N : Node_Id) return Boolean;
- pragma Inline (Is_Suitable_Variable_Read);
- -- Determine whether arbitrary node N is a suitable variable read for ABE
- -- processing.
+ function Is_Suitable_Variable_Reference (N : Node_Id) return Boolean;
+ pragma Inline (Is_Suitable_Variable_Reference);
+ -- Determine whether arbitrary node N is a suitable variable reference for
+ -- ABE processing.
function Is_Task_Entry (Id : Entity_Id) return Boolean;
pragma Inline (Is_Task_Entry);
@@ -1202,86 +1221,111 @@ package body Sem_Elab is
-- Pop the top of the scenario stack. A check is made to ensure that the
-- scenario being removed is the same as N.
- procedure Process_Access (Attr : Node_Id; In_Task_Body : Boolean);
+ procedure Process_Access
+ (Attr : Node_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Perform ABE checks and diagnostics for 'Access to entry, operator, or
- -- subprogram denoted by Attr. Flag In_Task_Body should be set when the
- -- processing is initiated from a task body.
+ -- subprogram denoted by Attr. Flag In_Partial_Fin shoud be set when the
+ -- processing is initiated by a partial finalization routine. Flag
+ -- In_Task_Body should be set when the processing is initiated from a task
+ -- body.
generic
with procedure Process_Single_Activation
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Obj_Id : Entity_Id;
- Task_Attrs : Task_Attributes;
- In_Task_Body : Boolean);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Obj_Id : Entity_Id;
+ Task_Attrs : Task_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Perform ABE checks and diagnostics for task activation call Call
-- which activates task Obj_Id. Call_Attrs are the attributes of the
-- activation call. Task_Attrs are the attributes of the task type.
- -- Flag In_Task_Body should be set when the processing is initiated
- -- from a task body.
+ -- Flag In_Partial_Fin shoud be set when the processing is initiated
+ -- by a partial finalization routine. Flag In_Task_Body should be set
+ -- when the processing is initiated from a task body.
procedure Process_Activation_Call
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- In_Task_Body : Boolean);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Perform ABE checks and diagnostics for activation call Call by invoking
-- routine Process_Single_Activation on each task object being activated.
- -- Call_Attrs are the attributes of the activation call. Flag In_Task_Body
- -- should be set when the processing is initiated from a task body.
+ -- Call_Attrs are the attributes of the activation call. In_Partial_Fin
+ -- shoud be set when the processing is initiated by a partial finalization
+ -- routine. Flag In_Task_Body should be set when the processing is started
+ -- from a task body.
procedure Process_Activation_Conditional_ABE_Impl
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Obj_Id : Entity_Id;
- Task_Attrs : Task_Attributes;
- In_Task_Body : Boolean);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Obj_Id : Entity_Id;
+ Task_Attrs : Task_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Perform common conditional ABE checks and diagnostics for call Call
-- which activates task Obj_Id ignoring the Ada or SPARK rules. CAll_Attrs
-- are the attributes of the activation call. Task_Attrs are the attributes
- -- of the task type. Flag In_Task_Body should be set when the processing is
- -- initiated from a task body.
+ -- of the task type. Flag In_Partial_Fin shoud be set when the processing
+ -- is initiated by a partial finalization routine. Flag In_Task_Body should
+ -- be set when the processing is initiated from a task body.
procedure Process_Activation_Guaranteed_ABE_Impl
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Obj_Id : Entity_Id;
- Task_Attrs : Task_Attributes;
- In_Task_Body : Boolean);
- -- Perform common guaranteed ABE checks and diagnostics for call Call
- -- which activates task Obj_Id ignoring the Ada or SPARK rules. CAll_Attrs
- -- are the attributes of the activation call. Task_Attrs are the attributes
- -- of the task type. Flag In_Task_Body should be set when the processing is
- -- initiated from a task body.
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Obj_Id : Entity_Id;
+ Task_Attrs : Task_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
+ -- Perform common guaranteed ABE checks and diagnostics for call Call which
+ -- activates task Obj_Id ignoring the Ada or SPARK rules. Task_Attrs are
+ -- the attributes of the task type. The following parameters are provided
+ -- for compatibility and are unused.
+ --
+ -- Call_Attrs
+ -- In_Partial_Fin
+ -- In_Task_Body
procedure Process_Call
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- In_Task_Body : Boolean);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Top-level dispatcher for processing of calls. Perform ABE checks and
-- diagnostics for call Call which invokes target Target_Id. Call_Attrs
- -- are the attributes of the call. Flag In_Task_Body should be set when
- -- the processing is initiated from a task body.
+ -- are the attributes of the call. Flag In_Partial_Fin shoud be set when
+ -- the processing is initiated by a partial finalization routine. Flag
+ -- In_Task_Body should be set when the processing is started from a task
+ -- body.
procedure Process_Call_Ada
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- Target_Attrs : Target_Attributes;
- In_Task_Body : Boolean);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ Target_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Perform ABE checks and diagnostics for call Call which invokes target
-- Target_Id using the Ada rules. Call_Attrs are the attributes of the
- -- call. Target_Attrs are attributes of the target. Flag In_Task_Body
- -- should be set when the processing is initiated from a task body.
+ -- call. Target_Attrs are attributes of the target. Flag In_Partial_Fin
+ -- shoud be set when the processing is initiated by a partial finalization
+ -- routine. Flag In_Task_Body should be set when the processing is started
+ -- from a task body.
procedure Process_Call_Conditional_ABE
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- Target_Attrs : Target_Attributes);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ Target_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean);
-- Perform common conditional ABE checks and diagnostics for call Call that
-- invokes target Target_Id ignoring the Ada or SPARK rules. Call_Attrs are
-- the attributes of the call. Target_Attrs are attributes of the target.
+ -- Flag In_Partial_Fin shoud be set when the processing is initiated by a
+ -- partial finalization routine.
procedure Process_Call_Guaranteed_ABE
(Call : Node_Id;
@@ -1292,49 +1336,59 @@ package body Sem_Elab is
-- the attributes of the call.
procedure Process_Call_SPARK
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- Target_Attrs : Target_Attributes);
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ Target_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean);
-- Perform ABE checks and diagnostics for call Call which invokes target
-- Target_Id using the SPARK rules. Call_Attrs are the attributes of the
- -- call. Target_Attrs are attributes of the target.
+ -- call. Target_Attrs are attributes of the target. Flag In_Partial_Fin
+ -- shoud be set when the processing is initiated by a partial finalization
+ -- routine.
procedure Process_Guaranteed_ABE (N : Node_Id);
- -- Top level dispatcher for processing of scenarios which result in a
+ -- Top-level dispatcher for processing of scenarios which result in a
-- guaranteed ABE.
procedure Process_Instantiation
- (Exp_Inst : Node_Id;
- In_Task_Body : Boolean);
- -- Top level dispatcher for processing of instantiations. Perform ABE
+ (Exp_Inst : Node_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
+ -- Top-level dispatcher for processing of instantiations. Perform ABE
-- checks and diagnostics for expanded instantiation Exp_Inst. Flag
- -- In_Task_Body should be set when the processing is initiated from a
- -- task body.
+ -- In_Partial_Fin shoud be set when the processing is initiated by a
+ -- partial finalization routine. Flag In_Task_Body should be set when
+ -- the processing is initiated from a task body.
procedure Process_Instantiation_Ada
- (Exp_Inst : Node_Id;
- Inst : Node_Id;
- Inst_Attrs : Instantiation_Attributes;
- Gen_Id : Entity_Id;
- Gen_Attrs : Target_Attributes;
- In_Task_Body : Boolean);
+ (Exp_Inst : Node_Id;
+ Inst : Node_Id;
+ Inst_Attrs : Instantiation_Attributes;
+ Gen_Id : Entity_Id;
+ Gen_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Perform ABE checks and diagnostics for expanded instantiation Exp_Inst
-- of generic Gen_Id using the Ada rules. Inst is the instantiation node.
- -- Inst_Attrs are the attributes of the instance. Gen_Attrs are the
- -- attributes of the generic. Flag In_Task_Body should be set when the
- -- processing is initiated from a task body.
+ -- Inst_Attrs are the attributes of the instance. Gen_Attrs denotes the
+ -- attributes of the generic. Flag In_Partial_Fin shoud be set when the
+ -- processing is initiated by a partial finalization routine. In_Task_Body
+ -- should be set when the processing is initiated from a task body.
procedure Process_Instantiation_Conditional_ABE
- (Exp_Inst : Node_Id;
- Inst : Node_Id;
- Inst_Attrs : Instantiation_Attributes;
- Gen_Id : Entity_Id;
- Gen_Attrs : Target_Attributes);
+ (Exp_Inst : Node_Id;
+ Inst : Node_Id;
+ Inst_Attrs : Instantiation_Attributes;
+ Gen_Id : Entity_Id;
+ Gen_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean);
-- Perform common conditional ABE checks and diagnostics for expanded
-- instantiation Exp_Inst of generic Gen_Id ignoring the Ada or SPARK
-- rules. Inst is the instantiation node. Inst_Attrs are the attributes
- -- of the instance. Gen_Attrs are the attributes of the generic.
+ -- of the instance. Gen_Attrs are the attributes of the generic. Flag
+ -- In_Partial_Fin shoud be set when the processing is initiated by a
+ -- partial finalization routine.
procedure Process_Instantiation_Guaranteed_ABE (Exp_Inst : Node_Id);
-- Perform common guaranteed ABE checks and diagnostics for expanded
@@ -1342,23 +1396,30 @@ package body Sem_Elab is
-- rules.
procedure Process_Instantiation_SPARK
- (Exp_Inst : Node_Id;
- Inst : Node_Id;
- Inst_Attrs : Instantiation_Attributes;
- Gen_Id : Entity_Id;
- Gen_Attrs : Target_Attributes);
+ (Exp_Inst : Node_Id;
+ Inst : Node_Id;
+ Inst_Attrs : Instantiation_Attributes;
+ Gen_Id : Entity_Id;
+ Gen_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean);
-- Perform ABE checks and diagnostics for expanded instantiation Exp_Inst
-- of generic Gen_Id using the SPARK rules. Inst is the instantiation node.
- -- Inst_Attrs are the attributes of the instance. Gen_Attrs are the
- -- attributes of the generic.
-
- procedure Process_Scenario (N : Node_Id; In_Task_Body : Boolean := False);
- -- Top level dispatcher for processing of various elaboration scenarios.
- -- Perform ABE checks and diagnostics for scenario N. Flag In_Task_Body
- -- should be set when the processing is initiated from a task body.
+ -- Inst_Attrs are the attributes of the instance. Gen_Attrs denotes the
+ -- attributes of the generic. Flag In_Partial_Fin shoud be set when the
+ -- processing is initiated by a partial finalization routine.
+
+ procedure Process_Scenario
+ (N : Node_Id;
+ In_Partial_Fin : Boolean := False;
+ In_Task_Body : Boolean := False);
+ -- Top-level dispatcher for processing of various elaboration scenarios.
+ -- Perform ABE checks and diagnostics for scenario N. Flag In_Partial_Fin
+ -- shoud be set when the processing is initiated by a partial finalization
+ -- routine. Flag In_Task_Body should be set when the processing is started
+ -- from a task body.
procedure Process_Variable_Assignment (Asmt : Node_Id);
- -- Top level dispatcher for processing of variable assignments. Perform ABE
+ -- Top-level dispatcher for processing of variable assignments. Perform ABE
-- checks and diagnostics for assignment statement Asmt.
procedure Process_Variable_Assignment_Ada
@@ -1373,9 +1434,16 @@ package body Sem_Elab is
-- Perform ABE checks and diagnostics for assignment statement Asmt that
-- updates the value of variable Var_Id using the SPARK rules.
- procedure Process_Variable_Read (Ref : Node_Id);
- -- Perform ABE checks and diagnostics for reference Ref that reads a
- -- variable.
+ procedure Process_Variable_Reference (Ref : Node_Id);
+ -- Top-level dispatcher for processing of variable references. Perform ABE
+ -- checks and diagnostics for variable reference Ref.
+
+ procedure Process_Variable_Reference_Read
+ (Ref : Node_Id;
+ Var_Id : Entity_Id;
+ Attrs : Variable_Attributes);
+ -- Perform ABE checks and diagnostics for reference Ref described by its
+ -- attributes Attrs, that reads variable Var_Id.
procedure Push_Active_Scenario (N : Node_Id);
pragma Inline (Push_Active_Scenario);
@@ -1383,18 +1451,29 @@ package body Sem_Elab is
function Root_Scenario return Node_Id;
pragma Inline (Root_Scenario);
- -- Return the top level scenario which started a recursive search for other
- -- scenarios. It is assumed that there is a valid top level scenario on the
+ -- Return the top-level scenario which started a recursive search for other
+ -- scenarios. It is assumed that there is a valid top-level scenario on the
-- active scenario stack.
+ procedure Set_Is_Recorded_Top_Level_Scenario
+ (N : Node_Id;
+ Val : Boolean := True);
+ pragma Inline (Set_Is_Recorded_Top_Level_Scenario);
+ -- Mark scenario N as being recorded in table Top_Level_Scenarios
+
function Static_Elaboration_Checks return Boolean;
pragma Inline (Static_Elaboration_Checks);
-- Determine whether the static model is in effect
- procedure Traverse_Body (N : Node_Id; In_Task_Body : Boolean);
+ procedure Traverse_Body
+ (N : Node_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean);
-- Inspect the declarations and statements of subprogram body N for
- -- suitable elaboration scenarios and process them. Flag In_Task_Body
- -- should be set when the traversal is initiated from a task body.
+ -- suitable elaboration scenarios and process them. Flag In_Partial_Fin
+ -- shoud be set when the processing is initiated by a partial finalization
+ -- routine. Flag In_Task_Body should be set when the traversal is initiated
+ -- from a task body.
procedure Update_Elaboration_Scenario (New_N : Node_Id; Old_N : Node_Id);
pragma Inline (Update_Elaboration_Scenario);
@@ -1597,6 +1676,12 @@ package body Sem_Elab is
if ASIS_Mode then
return;
+ -- Nothing to do when the call is being preanalyzed as the marker will
+ -- be inserted in the wrong place.
+
+ elsif Preanalysis_Active then
+ return;
+
-- Nothing to do when the input does not denote a call or a requeue
elsif not Nkind_In (N, N_Entry_Call_Statement,
@@ -1606,12 +1691,6 @@ package body Sem_Elab is
then
return;
- -- Nothing to do when the call is being preanalyzed as the marker will
- -- be inserted in the wrong place.
-
- elsif Preanalysis_Active then
- return;
-
-- Nothing to do when the call is analyzed/resolved too early within an
-- intermediate context.
@@ -1758,6 +1837,146 @@ package body Sem_Elab is
Record_Elaboration_Scenario (Marker);
end Build_Call_Marker;
+ -------------------------------------
+ -- Build_Variable_Reference_Marker --
+ -------------------------------------
+
+ procedure Build_Variable_Reference_Marker
+ (N : Node_Id;
+ Read : Boolean;
+ Write : Boolean)
+ is
+ function In_Pragma (Nod : Node_Id) return Boolean;
+ -- Determine whether arbitrary node Nod appears within a pragma
+
+ ---------------
+ -- In_Pragma --
+ ---------------
+
+ function In_Pragma (Nod : Node_Id) return Boolean is
+ Par : Node_Id;
+
+ begin
+ Par := Nod;
+ while Present (Par) loop
+ if Nkind (Par) = N_Pragma then
+ return True;
+
+ -- Prevent the search from going too far
+
+ elsif Is_Body_Or_Package_Declaration (Par) then
+ exit;
+ end if;
+
+ Par := Parent (Par);
+ end loop;
+
+ return False;
+ end In_Pragma;
+
+ -- Local variables
+
+ Marker : Node_Id;
+ Prag : Node_Id;
+ Var_Attrs : Variable_Attributes;
+ Var_Id : Entity_Id;
+
+ -- Start of processing for Build_Variable_Reference_Marker
+
+ begin
+ -- Nothing to do for ASIS. As a result, ABE checks and diagnostics are
+ -- not performed in this mode.
+
+ if ASIS_Mode then
+ return;
+
+ -- Nothing to do when the reference is being preanalyzed as the marker
+ -- will be inserted in the wrong place.
+
+ elsif Preanalysis_Active then
+ return;
+
+ -- Nothing to do when the input does not denote a reference
+
+ elsif not Nkind_In (N, N_Expanded_Name, N_Identifier) then
+ return;
+
+ -- Nothing to do for internally-generated references
+
+ elsif not Comes_From_Source (N) then
+ return;
+
+ -- Nothing to do when the reference is erroneous, left in a bad state,
+ -- or does not denote a variable.
+
+ elsif not (Present (Entity (N))
+ and then Ekind (Entity (N)) = E_Variable
+ and then Entity (N) /= Any_Id)
+ then
+ return;
+ end if;
+
+ Extract_Variable_Reference_Attributes
+ (Ref => N,
+ Var_Id => Var_Id,
+ Attrs => Var_Attrs);
+
+ Prag := SPARK_Pragma (Var_Id);
+
+ if Comes_From_Source (Var_Id)
+
+ -- Both the variable and the reference must appear in SPARK_Mode On
+ -- regions because this scenario falls under the SPARK rules.
+
+ and then Present (Prag)
+ and then Get_SPARK_Mode_From_Annotation (Prag) = On
+ and then Is_SPARK_Mode_On_Node (N)
+
+ -- The reference must not be considered when it appears in a pragma.
+ -- If the pragma has run-time semantics, then the reference will be
+ -- reconsidered once the pragma is expanded.
+
+ -- Performance note: parent traversal
+
+ and then not In_Pragma (N)
+ then
+ null;
+
+ -- Otherwise the reference is not suitable for ABE processing. This
+ -- prevents the generation of variable markers which will never play
+ -- a role in ABE diagnostics.
+
+ else
+ return;
+ end if;
+
+ -- At this point it is known that the variable reference will play some
+ -- role in ABE checks and diagnostics. Create a corresponding variable
+ -- marker in case the original variable reference is folded or optimized
+ -- away.
+
+ Marker := Make_Variable_Reference_Marker (Sloc (N));
+
+ -- Inherit the attributes of the original variable reference
+
+ Set_Target (Marker, Var_Id);
+ Set_Is_Read (Marker, Read);
+ Set_Is_Write (Marker, Write);
+
+ -- The marker is inserted prior to the original variable reference. The
+ -- insertion must take place even when the reference does not occur in
+ -- the main unit to keep the tree symmetric. This ensures that internal
+ -- name serialization is consistent in case the variable marker causes
+ -- the tree to transform in some way.
+
+ Insert_Action (N, Marker);
+
+ -- The marker becomes the "corresponding" scenario for the reference.
+ -- Save the marker for later processing for the ABE phase.
+
+ Record_Elaboration_Scenario (Marker);
+ end Build_Variable_Reference_Marker;
+
---------------------------------
-- Check_Elaboration_Scenarios --
---------------------------------
@@ -1776,12 +1995,12 @@ package body Sem_Elab is
Find_Elaborated_Units;
- -- Examine each top level scenario saved during the Recording phase and
+ -- Examine each top-level scenario saved during the Recording phase and
-- perform various actions depending on the elaboration model in effect.
for Index in Top_Level_Scenarios.First .. Top_Level_Scenarios.Last loop
- -- Clear the table of visited scenario bodies for each new top level
+ -- Clear the table of visited scenario bodies for each new top-level
-- scenario.
Visited_Bodies.Reset;
@@ -1852,7 +2071,7 @@ package body Sem_Elab is
Level := Find_Enclosing_Level (Call);
- -- Library level calls are always considered because they are part of
+ -- Library-level calls are always considered because they are part of
-- the associated unit's elaboration actions.
if Level in Library_Level then
@@ -1996,9 +2215,10 @@ package body Sem_Elab is
------------------------------
procedure Ensure_Prior_Elaboration
- (N : Node_Id;
- Unit_Id : Entity_Id;
- In_Task_Body : Boolean)
+ (N : Node_Id;
+ Unit_Id : Entity_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
Prag_Nam : Name_Id;
@@ -2035,11 +2255,18 @@ package body Sem_Elab is
Prag_Nam := Name_Elaborate_All;
end if;
+ -- Nothing to do when the need for prior elaboration came from a partial
+ -- finalization routine which occurs in an initialization context. This
+ -- behaviour parallels that of the old ABE mechanism.
+
+ if In_Partial_Fin then
+ return;
+
-- Nothing to do when the need for prior elaboration came from a task
-- body and switch -gnatd.y (disable implicit pragma Elaborate_All on
-- task bodies) is in effect.
- if Debug_Flag_Dot_Y and then In_Task_Body then
+ elsif Debug_Flag_Dot_Y and then In_Task_Body then
return;
-- Nothing to do when the unit is elaborated prior to the main unit.
@@ -2932,14 +3159,45 @@ package body Sem_Elab is
Var_Id : out Entity_Id;
Attrs : out Variable_Attributes)
is
+ function Get_Renamed_Variable (Id : Entity_Id) return Entity_Id;
+ -- Obtain the ultimate renamed variable of variable Id
+
+ --------------------------
+ -- Get_Renamed_Variable --
+ --------------------------
+
+ function Get_Renamed_Variable (Id : Entity_Id) return Entity_Id is
+ Ren_Id : Entity_Id;
+
+ begin
+ Ren_Id := Id;
+ while Present (Renamed_Entity (Ren_Id))
+ and then Nkind (Renamed_Entity (Ren_Id)) in N_Entity
+ loop
+ Ren_Id := Renamed_Entity (Ren_Id);
+ end loop;
+
+ return Ren_Id;
+ end Get_Renamed_Variable;
+
+ -- Start of processing for Extract_Variable_Reference_Attributes
+
begin
- -- Traverse a possible chain of renamings to obtain the original
- -- variable being referenced.
+ -- Extraction for variable reference markers
+
+ if Nkind (Ref) = N_Variable_Reference_Marker then
+ Var_Id := Target (Ref);
+
+ -- Extraction for expanded names and identifiers
- Var_Id := Get_Renamed_Entity (Entity (Ref));
+ else
+ Var_Id := Entity (Ref);
+ end if;
- Attrs.SPARK_Mode_On := Is_SPARK_Mode_On_Node (Ref);
- Attrs.Unit_Id := Find_Top_Unit (Var_Id);
+ -- Obtain the original variable which the reference mentions
+
+ Var_Id := Get_Renamed_Variable (Var_Id);
+ Attrs.Unit_Id := Find_Top_Unit (Var_Id);
-- At this point certain attributes should always be available
@@ -3356,7 +3614,7 @@ package body Sem_Elab is
return Declaration_Level;
end if;
- -- The current construct is a declaration level encapsulator
+ -- The current construct is a declaration-level encapsulator
elsif Nkind_In (Curr, N_Entry_Body,
N_Subprogram_Body,
@@ -3379,9 +3637,9 @@ package body Sem_Elab is
return Declaration_Level;
end if;
- -- The current construct is a non-library level encapsulator which
+ -- The current construct is a non-library-level encapsulator which
-- indicates that the node cannot possibly appear at any level.
- -- Note that this check must come after the declaration level check
+ -- Note that this check must come after the declaration-level check
-- because both predicates share certain nodes.
elsif Is_Non_Library_Level_Encapsulator (Curr) then
@@ -3870,7 +4128,7 @@ package body Sem_Elab is
Nested_OK : Boolean := False) return Boolean
is
function Find_Enclosing_Context (N : Node_Id) return Node_Id;
- -- Return the nearest enclosing non-library level or compilation unit
+ -- Return the nearest enclosing non-library-level or compilation unit
-- node which which encapsulates arbitrary node N. Return Empty is no
-- such context is available.
@@ -3916,7 +4174,7 @@ package body Sem_Elab is
return Par;
end if;
- -- Reaching a compilation unit node without hitting a non-library
+ -- Reaching a compilation unit node without hitting a non-library-
-- level encapsulator indicates that N is at the library level in
-- which case the compilation unit is the context.
@@ -3998,7 +4256,7 @@ package body Sem_Elab is
procedure Initialize is
begin
- -- Set the soft link which enables Atree.Rewrite to update a top level
+ -- Set the soft link which enables Atree.Rewrite to update a top-level
-- scenario each time it is transformed into another node.
Set_Rewriting_Proc (Update_Elaboration_Scenario'Access);
@@ -4226,24 +4484,26 @@ package body Sem_Elab is
In_SPARK => In_SPARK);
end Info_Instantiation;
- ------------------------
- -- Info_Variable_Read --
- ------------------------
+ -----------------------------
+ -- Info_Variable_Reference --
+ -----------------------------
- procedure Info_Variable_Read
+ procedure Info_Variable_Reference
(Ref : Node_Id;
Var_Id : Entity_Id;
Info_Msg : Boolean;
In_SPARK : Boolean)
is
begin
- Elab_Msg_NE
- (Msg => "read of variable & during elaboration",
- N => Ref,
- Id => Var_Id,
- Info_Msg => Info_Msg,
- In_SPARK => In_SPARK);
- end Info_Variable_Read;
+ if Is_Read (Ref) then
+ Elab_Msg_NE
+ (Msg => "read of variable & during elaboration",
+ N => Ref,
+ Id => Var_Id,
+ Info_Msg => Info_Msg,
+ In_SPARK => In_SPARK);
+ end if;
+ end Info_Variable_Reference;
--------------------
-- Insertion_Node --
@@ -4602,19 +4862,6 @@ package body Sem_Elab is
return False;
end Is_Bodiless_Subprogram;
- --------------------------------
- -- Is_Check_Emitting_Scenario --
- --------------------------------
-
- function Is_Check_Emitting_Scenario (N : Node_Id) return Boolean is
- begin
- return
- Nkind_In (N, N_Call_Marker,
- N_Function_Instantiation,
- N_Package_Instantiation,
- N_Procedure_Instantiation);
- end Is_Check_Emitting_Scenario;
-
------------------------
-- Is_Controlled_Proc --
------------------------
@@ -4870,6 +5117,15 @@ package body Sem_Elab is
and then Present (Protected_Subprogram (Id));
end Is_Protected_Body_Subp;
+ ------------------------------------
+ -- Is_Recorded_Top_Level_Scenario --
+ ------------------------------------
+
+ function Is_Recorded_Top_Level_Scenario (N : Node_Id) return Boolean is
+ begin
+ return Recorded_Top_Level_Scenarios.Get (N);
+ end Is_Recorded_Top_Level_Scenario;
+
------------------------
-- Is_Safe_Activation --
------------------------
@@ -5200,7 +5456,7 @@ package body Sem_Elab is
or else Is_Suitable_Call (N)
or else Is_Suitable_Instantiation (N)
or else Is_Suitable_Variable_Assignment (N)
- or else Is_Suitable_Variable_Read (N);
+ or else Is_Suitable_Variable_Reference (N);
end Is_Suitable_Scenario;
-------------------------------------
@@ -5297,187 +5553,19 @@ package body Sem_Elab is
and then Corresponding_Body (Var_Unit) = N_Unit_Id;
end Is_Suitable_Variable_Assignment;
- -------------------------------
- -- Is_Suitable_Variable_Read --
- -------------------------------
-
- function Is_Suitable_Variable_Read (N : Node_Id) return Boolean is
- function In_Pragma (Nod : Node_Id) return Boolean;
- -- Determine whether arbitrary node Nod appears within a pragma
-
- function Is_Variable_Read (Ref : Node_Id) return Boolean;
- -- Determine whether variable reference Ref constitutes a read
-
- ---------------
- -- In_Pragma --
- ---------------
-
- function In_Pragma (Nod : Node_Id) return Boolean is
- Par : Node_Id;
-
- begin
- Par := Nod;
- while Present (Par) loop
- if Nkind (Par) = N_Pragma then
- return True;
-
- -- Prevent the search from going too far
-
- elsif Is_Body_Or_Package_Declaration (Par) then
- exit;
- end if;
-
- Par := Parent (Par);
- end loop;
-
- return False;
- end In_Pragma;
-
- ----------------------
- -- Is_Variable_Read --
- ----------------------
-
- function Is_Variable_Read (Ref : Node_Id) return Boolean is
- function Is_Out_Actual (Call : Node_Id) return Boolean;
- -- Determine whether the corresponding formal of actual Ref which
- -- appears in call Call has mode OUT.
-
- -------------------
- -- Is_Out_Actual --
- -------------------
-
- function Is_Out_Actual (Call : Node_Id) return Boolean is
- Actual : Node_Id;
- Call_Attrs : Call_Attributes;
- Formal : Entity_Id;
- Target_Id : Entity_Id;
-
- begin
- Extract_Call_Attributes
- (Call => Call,
- Target_Id => Target_Id,
- Attrs => Call_Attrs);
-
- -- Inspect the actual and formal parameters, trying to find the
- -- corresponding formal for Ref.
-
- Actual := First_Actual (Call);
- Formal := First_Formal (Target_Id);
- while Present (Actual) and then Present (Formal) loop
- if Actual = Ref then
- return Ekind (Formal) = E_Out_Parameter;
- end if;
-
- Next_Actual (Actual);
- Next_Formal (Formal);
- end loop;
-
- return False;
- end Is_Out_Actual;
-
- -- Local variables
-
- Context : constant Node_Id := Parent (Ref);
-
- -- Start of processing for Is_Variable_Read
-
- begin
- -- The majority of variable references are reads, and they can appear
- -- in a great number of contexts. To determine whether a reference is
- -- a read, it is more practical to find out whether it is a write.
-
- -- A reference is a write when it appears immediately on the left-
- -- hand side of an assignment.
-
- if Nkind (Context) = N_Assignment_Statement
- and then Name (Context) = Ref
- then
- return False;
-
- -- A reference is a write when it acts as an actual in a subprogram
- -- call and the corresponding formal has mode OUT.
-
- elsif Nkind_In (Context, N_Function_Call,
- N_Procedure_Call_Statement)
- and then Is_Out_Actual (Context)
- then
- return False;
- end if;
-
- -- Any other reference is a read
-
- return True;
- end Is_Variable_Read;
-
- -- Local variables
-
- Prag : Node_Id;
- Var_Id : Entity_Id;
-
- -- Start of processing for Is_Suitable_Variable_Read
+ ------------------------------------
+ -- Is_Suitable_Variable_Reference --
+ ------------------------------------
+ function Is_Suitable_Variable_Reference (N : Node_Id) return Boolean is
begin
- -- This scenario is relevant only when the static model is in effect
- -- because it is graph-dependent and does not involve any run-time
- -- checks. Allowing it in the dynamic model would create confusing
- -- noise.
-
- if not Static_Elaboration_Checks then
- return False;
-
- -- Attributes and operator sumbols are not considered to be suitable
- -- references even though they are part of predicate Is_Entity_Name.
+ -- Expanded names and identifiers are intentionally ignored because they
+ -- be folded, optimized away, etc. Variable references markers play the
+ -- role of variable references and provide a uniform foundation for ABE
+ -- processing.
- elsif not Nkind_In (N, N_Expanded_Name, N_Identifier) then
- return False;
-
- -- Nothing to do for internally-generated references because they are
- -- assumed to be ABE safe.
-
- elsif not Comes_From_Source (N) then
- return False;
- end if;
-
- -- Sanitize the reference
-
- Var_Id := Entity (N);
-
- if No (Var_Id) then
- return False;
-
- elsif Var_Id = Any_Id then
- return False;
-
- elsif Ekind (Var_Id) /= E_Variable then
- return False;
- end if;
-
- Prag := SPARK_Pragma (Var_Id);
-
- -- To qualify, the reference must meet the following prerequisites:
-
- return
- Comes_From_Source (Var_Id)
-
- -- Both the variable and the reference must appear in SPARK_Mode On
- -- regions because this scenario falls under the SPARK rules.
-
- and then Present (Prag)
- and then Get_SPARK_Mode_From_Annotation (Prag) = On
- and then Is_SPARK_Mode_On_Node (N)
-
- -- The reference must denote a variable read
-
- and then Is_Variable_Read (N)
-
- -- The reference must not be considered when it appears in a pragma.
- -- If the pragma has run-time semantics, then the reference will be
- -- reconsidered once the pragma is expanded.
-
- -- Performance note: parent traversal
-
- and then not In_Pragma (N);
- end Is_Suitable_Variable_Read;
+ return Nkind (N) = N_Variable_Reference_Marker;
+ end Is_Suitable_Variable_Reference;
-------------------
-- Is_Task_Entry --
@@ -5501,7 +5589,7 @@ package body Sem_Elab is
begin
-- The root appears within the declaratons of a block statement, entry
-- body, subprogram body, or task body ignoring enclosing packages. The
- -- root is always within the main unit. An up level target is a notion
+ -- root is always within the main unit. An up-level target is a notion
-- applicable only to the static model because scenarios are reached by
-- means of graph traversal started from a fixed declarative or library
-- level.
@@ -5511,7 +5599,7 @@ package body Sem_Elab is
if Static_Elaboration_Checks
and then Find_Enclosing_Level (Root) = Declaration_Level
then
- -- The target is within the main unit. It acts as an up level target
+ -- The target is within the main unit. It acts as an up-level target
-- when it appears within a context which encloses the root.
-- package body Main_Unit is
@@ -5527,7 +5615,7 @@ package body Sem_Elab is
return not In_Same_Context (Root, Target_Decl, Nested_OK => True);
-- Otherwise the target is external to the main unit which makes it
- -- an up level target.
+ -- an up-level target.
else
return True;
@@ -5542,14 +5630,32 @@ package body Sem_Elab is
-------------------------------
procedure Kill_Elaboration_Scenario (N : Node_Id) is
+ package Scenarios renames Top_Level_Scenarios;
+
begin
- -- Eliminate the scenario by suppressing the generation of conditional
- -- ABE checks or guaranteed ABE failures. Note that other diagnostics
- -- must be carried out ignoring the fact that the scenario is within
- -- dead code.
+ -- Eliminate a recorded top-level scenario when it appears within dead
+ -- code because it will not be executed at elaboration time.
+
+ if Is_Scenario (N)
+ and then Is_Recorded_Top_Level_Scenario (N)
+ then
+ -- Performance node: list traversal
+
+ for Index in Scenarios.First .. Scenarios.Last loop
+ if Scenarios.Table (Index) = N then
+ Scenarios.Table (Index) := Empty;
- if Is_Scenario (N) then
- Set_Is_Elaboration_Checks_OK_Node (N, False);
+ -- The top-level scenario is no longer recorded
+
+ Set_Is_Recorded_Top_Level_Scenario (N, False);
+ return;
+ end if;
+ end loop;
+
+ -- A recorded top-level scenario must be in the table of recorded
+ -- top-level scenarios.
+
+ pragma Assert (False);
end if;
end Kill_Elaboration_Scenario;
@@ -5652,8 +5758,8 @@ package body Sem_Elab is
Info_Msg => False,
In_SPARK => True);
- elsif Is_Suitable_Variable_Read (N) then
- Info_Variable_Read
+ elsif Is_Suitable_Variable_Reference (N) then
+ Info_Variable_Reference
(Ref => N,
Var_Id => Target_Id,
Info_Msg => False,
@@ -5817,8 +5923,8 @@ package body Sem_Elab is
procedure Output_Variable_Assignment (N : Node_Id);
-- Emit a specific diagnostic message for assignment statement N
- procedure Output_Variable_Read (N : Node_Id);
- -- Emit a specific diagnostic message for reference N which reads a
+ procedure Output_Variable_Reference (N : Node_Id);
+ -- Emit a specific diagnostic message for reference N which mentions a
-- variable.
-------------------
@@ -6148,11 +6254,11 @@ package body Sem_Elab is
Error_Msg_NE ("\\ variable & assigned #", Error_Nod, Var_Id);
end Output_Variable_Assignment;
- --------------------------
- -- Output_Variable_Read --
- --------------------------
+ -------------------------------
+ -- Output_Variable_Reference --
+ -------------------------------
- procedure Output_Variable_Read (N : Node_Id) is
+ procedure Output_Variable_Reference (N : Node_Id) is
Dummy : Variable_Attributes;
Var_Id : Entity_Id;
@@ -6163,8 +6269,11 @@ package body Sem_Elab is
Attrs => Dummy);
Error_Msg_Sloc := Sloc (N);
- Error_Msg_NE ("\\ variable & read #", Error_Nod, Var_Id);
- end Output_Variable_Read;
+
+ if Is_Read (N) then
+ Error_Msg_NE ("\\ variable & read #", Error_Nod, Var_Id);
+ end if;
+ end Output_Variable_Reference;
-- Local variables
@@ -6225,10 +6334,10 @@ package body Sem_Elab is
elsif Nkind (N) = N_Assignment_Statement then
Output_Variable_Assignment (N);
- -- Variable read
+ -- Variable references
- elsif Is_Suitable_Variable_Read (N) then
- Output_Variable_Read (N);
+ elsif Is_Suitable_Variable_Reference (N) then
+ Output_Variable_Reference (N);
else
pragma Assert (False);
@@ -6253,7 +6362,11 @@ package body Sem_Elab is
-- Process_Access --
--------------------
- procedure Process_Access (Attr : Node_Id; In_Task_Body : Boolean) is
+ procedure Process_Access
+ (Attr : Node_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
+ is
function Build_Access_Marker (Target_Id : Entity_Id) return Node_Id;
pragma Inline (Build_Access_Marker);
-- Create a suitable call marker which invokes target Target_Id
@@ -6340,17 +6453,19 @@ package body Sem_Elab is
if Debug_Flag_Dot_O then
Process_Scenario
- (N => Build_Access_Marker (Target_Id),
- In_Task_Body => In_Task_Body);
+ (N => Build_Access_Marker (Target_Id),
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
-- Otherwise ensure that the unit with the corresponding body is
-- elaborated prior to the main unit.
else
Ensure_Prior_Elaboration
- (N => Attr,
- Unit_Id => Target_Attrs.Unit_Id,
- In_Task_Body => In_Task_Body);
+ (N => Attr,
+ Unit_Id => Target_Attrs.Unit_Id,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
end if;
end Process_Access;
@@ -6359,9 +6474,10 @@ package body Sem_Elab is
-----------------------------
procedure Process_Activation_Call
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- In_Task_Body : Boolean)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
procedure Process_Task_Object (Obj_Id : Entity_Id; Typ : Entity_Id);
-- Perform ABE checks and diagnostics for object Obj_Id with type Typ.
@@ -6389,11 +6505,12 @@ package body Sem_Elab is
Attrs => Task_Attrs);
Process_Single_Activation
- (Call => Call,
- Call_Attrs => Call_Attrs,
- Obj_Id => Obj_Id,
- Task_Attrs => Task_Attrs,
- In_Task_Body => In_Task_Body);
+ (Call => Call,
+ Call_Attrs => Call_Attrs,
+ Obj_Id => Obj_Id,
+ Task_Attrs => Task_Attrs,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
-- Examine the component type when the object is an array
@@ -6507,11 +6624,12 @@ package body Sem_Elab is
---------------------------------------------
procedure Process_Activation_Conditional_ABE_Impl
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Obj_Id : Entity_Id;
- Task_Attrs : Task_Attributes;
- In_Task_Body : Boolean)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Obj_Id : Entity_Id;
+ Task_Attrs : Task_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
Check_OK : constant Boolean :=
not Is_Ignored_Ghost_Entity (Obj_Id)
@@ -6650,12 +6768,19 @@ package body Sem_Elab is
if Earlier_In_Extended_Unit (Root, Task_Attrs.Body_Decl) then
+ -- Do not emit any ABE diagnostics when the activation occurs in
+ -- a partial finalization context because this leads to confusing
+ -- noise.
+
+ if In_Partial_Fin then
+ null;
+
-- ABE diagnostics are emitted only in the static model because
-- there is a well-defined order to visiting scenarios. Without
-- this order diagnostics appear jumbled and result in unwanted
-- noise.
- if Static_Elaboration_Checks then
+ elsif Static_Elaboration_Checks then
Error_Msg_Sloc := Sloc (Call);
Error_Msg_N
("??task & will be activated # before elaboration of its "
@@ -6707,12 +6832,16 @@ package body Sem_Elab is
else
Ensure_Prior_Elaboration
- (N => Call,
- Unit_Id => Task_Attrs.Unit_Id,
- In_Task_Body => In_Task_Body);
+ (N => Call,
+ Unit_Id => Task_Attrs.Unit_Id,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
end if;
- Traverse_Body (Task_Attrs.Body_Decl, In_Task_Body => True);
+ Traverse_Body
+ (N => Task_Attrs.Body_Decl,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => True);
end Process_Activation_Conditional_ABE_Impl;
procedure Process_Activation_Conditional_ABE is
@@ -6723,13 +6852,15 @@ package body Sem_Elab is
--------------------------------------------
procedure Process_Activation_Guaranteed_ABE_Impl
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Obj_Id : Entity_Id;
- Task_Attrs : Task_Attributes;
- In_Task_Body : Boolean)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Obj_Id : Entity_Id;
+ Task_Attrs : Task_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
pragma Unreferenced (Call_Attrs);
+ pragma Unreferenced (In_Partial_Fin);
pragma Unreferenced (In_Task_Body);
Check_OK : constant Boolean :=
@@ -6868,19 +6999,108 @@ package body Sem_Elab is
------------------
procedure Process_Call
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- In_Task_Body : Boolean)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
+ function In_Initialization_Context (N : Node_Id) return Boolean;
+ -- Determine whether arbitrary node N appears within a type init proc,
+ -- primitive [Deep_]Initialize, or a block created for initialization
+ -- purposes.
+
+ function Is_Partial_Finalization_Proc return Boolean;
+ pragma Inline (Is_Partial_Finalization_Proc);
+ -- Determine whether call Call with target Target_Id invokes a partial
+ -- finalization procedure.
+
+ -------------------------------
+ -- In_Initialization_Context --
+ -------------------------------
+
+ function In_Initialization_Context (N : Node_Id) return Boolean is
+ Par : Node_Id;
+ Spec_Id : Entity_Id;
+
+ begin
+ -- Climb the parent chain looking for initialization actions
+
+ Par := Parent (N);
+ while Present (Par) loop
+
+ -- A block may be part of the initialization actions of a default
+ -- initialized object.
+
+ if Nkind (Par) = N_Block_Statement
+ and then Is_Initialization_Block (Par)
+ then
+ return True;
+
+ -- A subprogram body may denote an initialization routine
+
+ elsif Nkind (Par) = N_Subprogram_Body then
+ Spec_Id := Unique_Defining_Entity (Par);
+
+ -- The current subprogram body denotes a type init proc or
+ -- primitive [Deep_]Initialize.
+
+ if Is_Init_Proc (Spec_Id)
+ or else Is_Controlled_Proc (Spec_Id, Name_Initialize)
+ or else Is_TSS (Spec_Id, TSS_Deep_Initialize)
+ then
+ return True;
+ end if;
+
+ -- Prevent the search from going too far
+
+ elsif Is_Body_Or_Package_Declaration (Par) then
+ exit;
+ end if;
+
+ Par := Parent (Par);
+ end loop;
+
+ return False;
+ end In_Initialization_Context;
+
+ ----------------------------------
+ -- Is_Partial_Finalization_Proc --
+ ----------------------------------
+
+ function Is_Partial_Finalization_Proc return Boolean is
+ begin
+ -- To qualify, the target must denote primitive [Deep_]Finalize or a
+ -- finalizer procedure, and the call must appear in an initialization
+ -- context.
+
+ return
+ (Is_Controlled_Proc (Target_Id, Name_Finalize)
+ or else Is_Finalizer_Proc (Target_Id)
+ or else Is_TSS (Target_Id, TSS_Deep_Finalize))
+ and then In_Initialization_Context (Call);
+ end Is_Partial_Finalization_Proc;
+
+ -- Local variables
+
+ Partial_Fin_On : Boolean;
SPARK_Rules_On : Boolean;
Target_Attrs : Target_Attributes;
+ -- Start of processing for Process_Call
+
begin
Extract_Target_Attributes
(Target_Id => Target_Id,
Attrs => Target_Attrs);
+ -- The call occurs in a partial finalization context when a prior
+ -- scenario is already in that mode, or when the target denotes a
+ -- [Deep_]Finalize primitive or a finalizer within an initialization
+ -- context.
+
+ Partial_Fin_On := In_Partial_Fin or else Is_Partial_Finalization_Proc;
+
-- The SPARK rules are in effect when both the call and target are
-- subject to SPARK_Mode On.
@@ -6954,28 +7174,30 @@ package body Sem_Elab is
elsif SPARK_Rules_On and Debug_Flag_Dot_V then
Process_Call_SPARK
- (Call => Call,
- Call_Attrs => Call_Attrs,
- Target_Id => Target_Id,
- Target_Attrs => Target_Attrs);
+ (Call => Call,
+ Call_Attrs => Call_Attrs,
+ Target_Id => Target_Id,
+ Target_Attrs => Target_Attrs,
+ In_Partial_Fin => In_Partial_Fin);
-- Otherwise the Ada rules are in effect, or SPARK code is allowed to
-- violate the SPARK rules.
else
Process_Call_Ada
- (Call => Call,
- Call_Attrs => Call_Attrs,
- Target_Id => Target_Id,
- Target_Attrs => Target_Attrs,
- In_Task_Body => In_Task_Body);
+ (Call => Call,
+ Call_Attrs => Call_Attrs,
+ Target_Id => Target_Id,
+ Target_Attrs => Target_Attrs,
+ In_Partial_Fin => Partial_Fin_On,
+ In_Task_Body => In_Task_Body);
end if;
-- Inspect the target body (and barried function) for other suitable
-- elaboration scenarios.
- Traverse_Body (Target_Attrs.Body_Barf, In_Task_Body);
- Traverse_Body (Target_Attrs.Body_Decl, In_Task_Body);
+ Traverse_Body (Target_Attrs.Body_Barf, Partial_Fin_On, In_Task_Body);
+ Traverse_Body (Target_Attrs.Body_Decl, Partial_Fin_On, In_Task_Body);
end Process_Call;
----------------------
@@ -6983,67 +7205,13 @@ package body Sem_Elab is
----------------------
procedure Process_Call_Ada
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- Target_Attrs : Target_Attributes;
- In_Task_Body : Boolean)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ Target_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
- function In_Initialization_Context (N : Node_Id) return Boolean;
- -- Determine whether arbitrary node N appears within a type init proc or
- -- primitive [Deep_]Initialize.
-
- -------------------------------
- -- In_Initialization_Context --
- -------------------------------
-
- function In_Initialization_Context (N : Node_Id) return Boolean is
- Par : Node_Id;
- Spec_Id : Entity_Id;
-
- begin
- -- Climb the parent chain looking for initialization actions
-
- Par := Parent (N);
- while Present (Par) loop
-
- -- A block may be part of the initialization actions of a default
- -- initialized object.
-
- if Nkind (Par) = N_Block_Statement
- and then Is_Initialization_Block (Par)
- then
- return True;
-
- -- A subprogram body may denote an initialization routine
-
- elsif Nkind (Par) = N_Subprogram_Body then
- Spec_Id := Unique_Defining_Entity (Par);
-
- -- The current subprogram body denotes a type init proc or
- -- primitive [Deep_]Initialize.
-
- if Is_Init_Proc (Spec_Id)
- or else Is_Controlled_Proc (Spec_Id, Name_Initialize)
- or else Is_TSS (Spec_Id, TSS_Deep_Initialize)
- then
- return True;
- end if;
-
- -- Prevent the search from going too far
-
- elsif Is_Body_Or_Package_Declaration (Par) then
- exit;
- end if;
-
- Par := Parent (Par);
- end loop;
-
- return False;
- end In_Initialization_Context;
-
- -- Local variables
-
Check_OK : constant Boolean :=
not Call_Attrs.Ghost_Mode_Ignore
and then not Target_Attrs.Ghost_Mode_Ignore
@@ -7053,8 +7221,6 @@ package body Sem_Elab is
-- target have active elaboration checks, and both are not ignored Ghost
-- constructs.
- -- Start of processing for Process_Call_Ada
-
begin
-- Nothing to do for an Ada dispatching call because there are no ABE
-- diagnostics for either models. ABE checks for the dynamic model are
@@ -7088,10 +7254,11 @@ package body Sem_Elab is
and then In_Extended_Main_Code_Unit (Target_Attrs.Body_Decl)
then
Process_Call_Conditional_ABE
- (Call => Call,
- Call_Attrs => Call_Attrs,
- Target_Id => Target_Id,
- Target_Attrs => Target_Attrs);
+ (Call => Call,
+ Call_Attrs => Call_Attrs,
+ Target_Id => Target_Id,
+ Target_Attrs => Target_Attrs,
+ In_Partial_Fin => In_Partial_Fin);
-- Otherwise the target body is not available in this compilation or it
-- resides in an external unit. Install a run-time ABE check to verify
@@ -7105,35 +7272,17 @@ package body Sem_Elab is
Id => Target_Attrs.Unit_Id);
end if;
- -- No implicit pragma Elaborate[_All] is generated when the call has
- -- elaboration checks suppressed. This behaviour parallels that of the
- -- old ABE mechanism.
-
- if not Call_Attrs.Elab_Checks_OK then
- null;
-
- -- No implicit pragma Elaborate[_All] is generated for finalization
- -- actions when primitive [Deep_]Finalize is not defined in the main
- -- unit and the call appears within some initialization actions. This
- -- behaviour parallels that of the old ABE mechanism.
+ -- Ensure that the unit with the target body is elaborated prior to the
+ -- main unit. The implicit Elaborate[_All] is generated only when the
+ -- call has elaboration checks enabled. This behaviour parallels that of
+ -- the old ABE mechanism.
- -- Performance note: parent traversal
-
- elsif (Is_Controlled_Proc (Target_Id, Name_Finalize)
- or else Is_TSS (Target_Id, TSS_Deep_Finalize))
- and then not In_Extended_Main_Code_Unit (Target_Attrs.Spec_Decl)
- and then In_Initialization_Context (Call)
- then
- null;
-
- -- Otherwise ensure that the unit with the target body is elaborated
- -- prior to the main unit.
-
- else
+ if Call_Attrs.Elab_Checks_OK then
Ensure_Prior_Elaboration
- (N => Call,
- Unit_Id => Target_Attrs.Unit_Id,
- In_Task_Body => In_Task_Body);
+ (N => Call,
+ Unit_Id => Target_Attrs.Unit_Id,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
end if;
end Process_Call_Ada;
@@ -7142,10 +7291,11 @@ package body Sem_Elab is
----------------------------------
procedure Process_Call_Conditional_ABE
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- Target_Attrs : Target_Attributes)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ Target_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean)
is
Check_OK : constant Boolean :=
not Call_Attrs.Ghost_Mode_Ignore
@@ -7186,11 +7336,17 @@ package body Sem_Elab is
if Earlier_In_Extended_Unit (Root, Target_Attrs.Body_Decl) then
+ -- Do not emit any ABE diagnostics when the call occurs in a partial
+ -- finalization context because this leads to confusing noise.
+
+ if In_Partial_Fin then
+ null;
+
-- ABE diagnostics are emitted only in the static model because there
-- is a well-defined order to visiting scenarios. Without this order
-- diagnostics appear jumbled and result in unwanted noise.
- if Static_Elaboration_Checks then
+ elsif Static_Elaboration_Checks then
Error_Msg_NE ("??cannot call & before body seen", Call, Target_Id);
Error_Msg_N ("\Program_Error may be raised at run time", Call);
@@ -7329,10 +7485,11 @@ package body Sem_Elab is
------------------------
procedure Process_Call_SPARK
- (Call : Node_Id;
- Call_Attrs : Call_Attributes;
- Target_Id : Entity_Id;
- Target_Attrs : Target_Attributes)
+ (Call : Node_Id;
+ Call_Attrs : Call_Attributes;
+ Target_Id : Entity_Id;
+ Target_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean)
is
begin
-- A call to a source target or to a target which emulates Ada or SPARK
@@ -7376,10 +7533,11 @@ package body Sem_Elab is
and then In_Extended_Main_Code_Unit (Target_Attrs.Body_Decl)
then
Process_Call_Conditional_ABE
- (Call => Call,
- Call_Attrs => Call_Attrs,
- Target_Id => Target_Id,
- Target_Attrs => Target_Attrs);
+ (Call => Call,
+ Call_Attrs => Call_Attrs,
+ Target_Id => Target_Id,
+ Target_Attrs => Target_Attrs,
+ In_Partial_Fin => In_Partial_Fin);
-- Otherwise the target body is not available in this compilation or it
-- resides in an external unit. There is no need to guarantee the prior
@@ -7416,9 +7574,10 @@ package body Sem_Elab is
if Is_Activation_Proc (Target_Id) then
Process_Activation_Guaranteed_ABE
- (Call => N,
- Call_Attrs => Call_Attrs,
- In_Task_Body => False);
+ (Call => N,
+ Call_Attrs => Call_Attrs,
+ In_Partial_Fin => False,
+ In_Task_Body => False);
else
Process_Call_Guaranteed_ABE
@@ -7442,8 +7601,9 @@ package body Sem_Elab is
---------------------------
procedure Process_Instantiation
- (Exp_Inst : Node_Id;
- In_Task_Body : Boolean)
+ (Exp_Inst : Node_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
Gen_Attrs : Target_Attributes;
Gen_Id : Entity_Id;
@@ -7524,23 +7684,25 @@ package body Sem_Elab is
elsif SPARK_Rules_On and Debug_Flag_Dot_V then
Process_Instantiation_SPARK
- (Exp_Inst => Exp_Inst,
- Inst => Inst,
- Inst_Attrs => Inst_Attrs,
- Gen_Id => Gen_Id,
- Gen_Attrs => Gen_Attrs);
+ (Exp_Inst => Exp_Inst,
+ Inst => Inst,
+ Inst_Attrs => Inst_Attrs,
+ Gen_Id => Gen_Id,
+ Gen_Attrs => Gen_Attrs,
+ In_Partial_Fin => In_Partial_Fin);
-- Otherwise the Ada rules are in effect, or SPARK code is allowed to
-- violate the SPARK rules.
else
Process_Instantiation_Ada
- (Exp_Inst => Exp_Inst,
- Inst => Inst,
- Inst_Attrs => Inst_Attrs,
- Gen_Id => Gen_Id,
- Gen_Attrs => Gen_Attrs,
- In_Task_Body => In_Task_Body);
+ (Exp_Inst => Exp_Inst,
+ Inst => Inst,
+ Inst_Attrs => Inst_Attrs,
+ Gen_Id => Gen_Id,
+ Gen_Attrs => Gen_Attrs,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
end if;
end Process_Instantiation;
@@ -7549,12 +7711,13 @@ package body Sem_Elab is
-------------------------------
procedure Process_Instantiation_Ada
- (Exp_Inst : Node_Id;
- Inst : Node_Id;
- Inst_Attrs : Instantiation_Attributes;
- Gen_Id : Entity_Id;
- Gen_Attrs : Target_Attributes;
- In_Task_Body : Boolean)
+ (Exp_Inst : Node_Id;
+ Inst : Node_Id;
+ Inst_Attrs : Instantiation_Attributes;
+ Gen_Id : Entity_Id;
+ Gen_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
is
Check_OK : constant Boolean :=
not Inst_Attrs.Ghost_Mode_Ignore
@@ -7591,11 +7754,12 @@ package body Sem_Elab is
and then In_Extended_Main_Code_Unit (Gen_Attrs.Body_Decl)
then
Process_Instantiation_Conditional_ABE
- (Exp_Inst => Exp_Inst,
- Inst => Inst,
- Inst_Attrs => Inst_Attrs,
- Gen_Id => Gen_Id,
- Gen_Attrs => Gen_Attrs);
+ (Exp_Inst => Exp_Inst,
+ Inst => Inst,
+ Inst_Attrs => Inst_Attrs,
+ Gen_Id => Gen_Id,
+ Gen_Attrs => Gen_Attrs,
+ In_Partial_Fin => In_Partial_Fin);
-- Otherwise the generic body is not available in this compilation or it
-- resides in an external unit. Install a run-time ABE check to verify
@@ -7616,9 +7780,10 @@ package body Sem_Elab is
if Inst_Attrs.Elab_Checks_OK then
Ensure_Prior_Elaboration
- (N => Inst,
- Unit_Id => Gen_Attrs.Unit_Id,
- In_Task_Body => In_Task_Body);
+ (N => Inst,
+ Unit_Id => Gen_Attrs.Unit_Id,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
end if;
end Process_Instantiation_Ada;
@@ -7627,11 +7792,12 @@ package body Sem_Elab is
-------------------------------------------
procedure Process_Instantiation_Conditional_ABE
- (Exp_Inst : Node_Id;
- Inst : Node_Id;
- Inst_Attrs : Instantiation_Attributes;
- Gen_Id : Entity_Id;
- Gen_Attrs : Target_Attributes)
+ (Exp_Inst : Node_Id;
+ Inst : Node_Id;
+ Inst_Attrs : Instantiation_Attributes;
+ Gen_Id : Entity_Id;
+ Gen_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean)
is
Check_OK : constant Boolean :=
not Inst_Attrs.Ghost_Mode_Ignore
@@ -7676,11 +7842,17 @@ package body Sem_Elab is
if Earlier_In_Extended_Unit (Root, Gen_Attrs.Body_Decl) then
+ -- Do not emit any ABE diagnostics when the instantiation occurs in a
+ -- partial finalization context because this leads to unwanted noise.
+
+ if In_Partial_Fin then
+ null;
+
-- ABE diagnostics are emitted only in the static model because there
-- is a well-defined order to visiting scenarios. Without this order
-- diagnostics appear jumbled and result in unwanted noise.
- if Static_Elaboration_Checks then
+ elsif Static_Elaboration_Checks then
Error_Msg_NE
("??cannot instantiate & before body seen", Inst, Gen_Id);
Error_Msg_N ("\Program_Error may be raised at run time", Inst);
@@ -7832,11 +8004,12 @@ package body Sem_Elab is
---------------------------------
procedure Process_Instantiation_SPARK
- (Exp_Inst : Node_Id;
- Inst : Node_Id;
- Inst_Attrs : Instantiation_Attributes;
- Gen_Id : Entity_Id;
- Gen_Attrs : Target_Attributes)
+ (Exp_Inst : Node_Id;
+ Inst : Node_Id;
+ Inst_Attrs : Instantiation_Attributes;
+ Gen_Id : Entity_Id;
+ Gen_Attrs : Target_Attributes;
+ In_Partial_Fin : Boolean)
is
Req_Nam : Name_Id;
@@ -7882,11 +8055,12 @@ package body Sem_Elab is
and then In_Extended_Main_Code_Unit (Gen_Attrs.Body_Decl)
then
Process_Instantiation_Conditional_ABE
- (Exp_Inst => Exp_Inst,
- Inst => Inst,
- Inst_Attrs => Inst_Attrs,
- Gen_Id => Gen_Id,
- Gen_Attrs => Gen_Attrs);
+ (Exp_Inst => Exp_Inst,
+ Inst => Inst,
+ Inst_Attrs => Inst_Attrs,
+ Gen_Id => Gen_Id,
+ Gen_Attrs => Gen_Attrs,
+ In_Partial_Fin => In_Partial_Fin);
-- Otherwise the generic body is not available in this compilation or
-- it resides in an external unit. There is no need to guarantee the
@@ -8017,11 +8191,11 @@ package body Sem_Elab is
end if;
end Process_Variable_Assignment_SPARK;
- ---------------------------
- -- Process_Variable_Read --
- ---------------------------
+ --------------------------------
+ -- Process_Variable_Reference --
+ --------------------------------
- procedure Process_Variable_Read (Ref : Node_Id) is
+ procedure Process_Variable_Reference (Ref : Node_Id) is
Var_Attrs : Variable_Attributes;
Var_Id : Entity_Id;
@@ -8031,6 +8205,24 @@ package body Sem_Elab is
Var_Id => Var_Id,
Attrs => Var_Attrs);
+ if Is_Read (Ref) then
+ Process_Variable_Reference_Read
+ (Ref => Ref,
+ Var_Id => Var_Id,
+ Attrs => Var_Attrs);
+ end if;
+ end Process_Variable_Reference;
+
+ -------------------------------------
+ -- Process_Variable_Reference_Read --
+ -------------------------------------
+
+ procedure Process_Variable_Reference_Read
+ (Ref : Node_Id;
+ Var_Id : Entity_Id;
+ Attrs : Variable_Attributes)
+ is
+ begin
-- Output relevant information when switch -gnatel (info messages on
-- implicit Elaborate[_All] pragmas) is in effect.
@@ -8046,7 +8238,7 @@ package body Sem_Elab is
-- Nothing to do when the variable appears within the main unit because
-- diagnostics on reads are relevant only for external variables.
- if Is_Same_Unit (Var_Attrs.Unit_Id, Cunit_Entity (Main_Unit)) then
+ if Is_Same_Unit (Attrs.Unit_Id, Cunit_Entity (Main_Unit)) then
null;
-- Nothing to do when the variable is already initialized. Note that the
@@ -8058,7 +8250,7 @@ package body Sem_Elab is
-- Nothing to do when the external unit guarantees the initialization of
-- the variable by means of pragma Elaborate_Body.
- elsif Has_Pragma_Elaborate_Body (Var_Attrs.Unit_Id) then
+ elsif Has_Pragma_Elaborate_Body (Attrs.Unit_Id) then
null;
-- A variable read imposes an Elaborate requirement on the context of
@@ -8071,7 +8263,7 @@ package body Sem_Elab is
Target_Id => Var_Id,
Req_Nam => Name_Elaborate);
end if;
- end Process_Variable_Read;
+ end Process_Variable_Reference_Read;
--------------------------
-- Push_Active_Scenario --
@@ -8086,7 +8278,11 @@ package body Sem_Elab is
-- Process_Scenario --
----------------------
- procedure Process_Scenario (N : Node_Id; In_Task_Body : Boolean := False) is
+ procedure Process_Scenario
+ (N : Node_Id;
+ In_Partial_Fin : Boolean := False;
+ In_Task_Body : Boolean := False)
+ is
Call_Attrs : Call_Attributes;
Target_Id : Entity_Id;
@@ -8098,7 +8294,7 @@ package body Sem_Elab is
-- 'Access
if Is_Suitable_Access (N) then
- Process_Access (N, In_Task_Body);
+ Process_Access (N, In_Partial_Fin, In_Task_Body);
-- Calls
@@ -8119,33 +8315,46 @@ package body Sem_Elab is
if Is_Activation_Proc (Target_Id) then
Process_Activation_Conditional_ABE
- (Call => N,
- Call_Attrs => Call_Attrs,
- In_Task_Body => In_Task_Body);
+ (Call => N,
+ Call_Attrs => Call_Attrs,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
else
Process_Call
- (Call => N,
- Call_Attrs => Call_Attrs,
- Target_Id => Target_Id,
- In_Task_Body => In_Task_Body);
+ (Call => N,
+ Call_Attrs => Call_Attrs,
+ Target_Id => Target_Id,
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
end if;
end if;
-- Instantiations
elsif Is_Suitable_Instantiation (N) then
- Process_Instantiation (N, In_Task_Body);
+ Process_Instantiation (N, In_Partial_Fin, In_Task_Body);
-- Variable assignments
elsif Is_Suitable_Variable_Assignment (N) then
Process_Variable_Assignment (N);
- -- Variable read
+ -- Variable references
+
+ elsif Is_Suitable_Variable_Reference (N) then
- elsif Is_Suitable_Variable_Read (N) then
- Process_Variable_Read (N);
+ -- In general, only variable references found within the main unit
+ -- are processed because the ALI information supplied to binde is for
+ -- the main unit only. However, to preserve the consistency of the
+ -- tree and ensure proper serialization of internal names, external
+ -- variable references also receive corresponding variable reference
+ -- markers (see Build_Varaible_Reference_Marker). Regardless of the
+ -- reason, external variable references must not be processed.
+
+ if In_Main_Context (N) then
+ Process_Variable_Reference (N);
+ end if;
end if;
-- Remove the current scenario from the stack of active scenarios once
@@ -8182,7 +8391,7 @@ package body Sem_Elab is
return;
end if;
- -- Ensure that a library level call does not appear in a preelaborated
+ -- Ensure that a library-level call does not appear in a preelaborated
-- unit. The check must come before ignoring scenarios within external
-- units or inside generics because calls in those context must also be
-- verified.
@@ -8236,7 +8445,7 @@ package body Sem_Elab is
Possible_Local_Raise (N, Standard_Program_Error);
elsif Is_Suitable_Variable_Assignment (N)
- or else Is_Suitable_Variable_Read (N)
+ or else Is_Suitable_Variable_Reference (N)
then
null;
@@ -8256,23 +8465,23 @@ package body Sem_Elab is
Level := Find_Enclosing_Level (N);
- -- Declaration level scenario
+ -- Declaration-level scenario
if Declaration_Level_OK and then Level = Declaration_Level then
null;
- -- Library level scenario
+ -- Library-level scenario
elsif Level in Library_Level then
null;
- -- Instantiation library level scenario
+ -- Instantiation library-level scenario
elsif Level = Instantiation then
null;
-- Otherwise the scenario does not appear at the proper level and
- -- cannot possibly act as a top level scenario.
+ -- cannot possibly act as a top-level scenario.
else
return;
@@ -8289,16 +8498,21 @@ package body Sem_Elab is
-- later processing by the ABE phase.
Top_Level_Scenarios.Append (N);
+ Set_Is_Recorded_Top_Level_Scenario (N);
+ end Record_Elaboration_Scenario;
- -- Mark a scenario which may produce run-time conditional ABE checks or
- -- guaranteed ABE failures as recorded. The flag ensures that scenario
- -- rewriting performed by Atree.Rewrite will be properly reflected in
- -- all relevant internal data structures.
+ ---------------------------------------
+ -- Recorded_Top_Level_Scenarios_Hash --
+ ---------------------------------------
- if Is_Check_Emitting_Scenario (N) then
- Set_Is_Recorded_Scenario (N);
- end if;
- end Record_Elaboration_Scenario;
+ function Recorded_Top_Level_Scenarios_Hash
+ (Key : Node_Id) return Recorded_Top_Level_Scenarios_Index
+ is
+ begin
+ return
+ Recorded_Top_Level_Scenarios_Index
+ (Key mod Recorded_Top_Level_Scenarios_Max);
+ end Recorded_Top_Level_Scenarios_Hash;
-------------------
-- Root_Scenario --
@@ -8315,6 +8529,18 @@ package body Sem_Elab is
return Stack.Table (Stack.First);
end Root_Scenario;
+ ----------------------------------------
+ -- Set_Is_Recorded_Top_Level_Scenario --
+ ----------------------------------------
+
+ procedure Set_Is_Recorded_Top_Level_Scenario
+ (N : Node_Id;
+ Val : Boolean := True)
+ is
+ begin
+ Recorded_Top_Level_Scenarios.Set (N, Val);
+ end Set_Is_Recorded_Top_Level_Scenario;
+
-------------------------------
-- Static_Elaboration_Checks --
-------------------------------
@@ -8328,85 +8554,177 @@ package body Sem_Elab is
-- Traverse_Body --
-------------------
- procedure Traverse_Body (N : Node_Id; In_Task_Body : Boolean) is
- function Is_Potential_Scenario (Nod : Node_Id) return Traverse_Result;
- -- Determine whether arbitrary node Nod denotes a suitable scenario and
- -- if so, process it.
+ procedure Traverse_Body
+ (N : Node_Id;
+ In_Partial_Fin : Boolean;
+ In_Task_Body : Boolean)
+ is
+ procedure Find_And_Process_Nested_Scenarios;
+ pragma Inline (Find_And_Process_Nested_Scenarios);
+ -- Examine the declarations and statements of subprogram body N for
+ -- suitable scenarios. Save each discovered scenario and process it
+ -- accordingly.
+
+ procedure Process_Nested_Scenarios (Nested : Elist_Id);
+ pragma Inline (Process_Nested_Scenarios);
+ -- Invoke Process_Scenario on each individual scenario whith appears in
+ -- list Nested.
+
+ ---------------------------------------
+ -- Find_And_Process_Nested_Scenarios --
+ ---------------------------------------
+
+ procedure Find_And_Process_Nested_Scenarios is
+ Body_Id : constant Entity_Id := Defining_Entity (N);
+
+ function Is_Potential_Scenario
+ (Nod : Node_Id) return Traverse_Result;
+ -- Determine whether arbitrary node Nod denotes a suitable scenario.
+ -- If it does, save it in the Nested_Scenarios list of the subprogram
+ -- body, and process it.
+
+ procedure Save_Scenario (Nod : Node_Id);
+ pragma Inline (Save_Scenario);
+ -- Save scenario Nod in the Nested_Scenarios list of the subprogram
+ -- body.
- procedure Traverse_Potential_Scenarios is
- new Traverse_Proc (Is_Potential_Scenario);
+ procedure Traverse_List (List : List_Id);
+ pragma Inline (Traverse_List);
+ -- Invoke Traverse_Potential_Scenarios on each node in list List
- procedure Traverse_List (List : List_Id);
- -- Inspect list List for suitable elaboration scenarios and process them
+ procedure Traverse_Potential_Scenarios is
+ new Traverse_Proc (Is_Potential_Scenario);
- ---------------------------
- -- Is_Potential_Scenario --
- ---------------------------
+ ---------------------------
+ -- Is_Potential_Scenario --
+ ---------------------------
- function Is_Potential_Scenario (Nod : Node_Id) return Traverse_Result is
- begin
- -- Special cases
+ function Is_Potential_Scenario
+ (Nod : Node_Id) return Traverse_Result
+ is
+ begin
+ -- Special cases
- -- Skip constructs which do not have elaboration of their own and
- -- need to be elaborated by other means such as invocation, task
- -- activation, etc.
+ -- Skip constructs which do not have elaboration of their own and
+ -- need to be elaborated by other means such as invocation, task
+ -- activation, etc.
- if Is_Non_Library_Level_Encapsulator (Nod) then
- return Skip;
+ if Is_Non_Library_Level_Encapsulator (Nod) then
+ return Skip;
- -- Terminate the traversal of a task body with an accept statement
- -- when no entry calls in elaboration are allowed because the task
- -- will block at run-time and none of the remaining statements will
- -- be executed.
+ -- Terminate the traversal of a task body with an accept statement
+ -- when no entry calls in elaboration are allowed because the task
+ -- will block at run-time and the remaining statements will not be
+ -- executed.
- elsif Nkind_In (Original_Node (Nod), N_Accept_Statement,
- N_Selective_Accept)
- and then Restriction_Active (No_Entry_Calls_In_Elaboration_Code)
- then
- return Abandon;
+ elsif Nkind_In (Original_Node (Nod), N_Accept_Statement,
+ N_Selective_Accept)
+ and then Restriction_Active (No_Entry_Calls_In_Elaboration_Code)
+ then
+ return Abandon;
- -- Certain nodes carry semantic lists which act as repositories until
- -- expansion transforms the node and relocates the contents. Examine
- -- these lists in case expansion is disabled.
+ -- Certain nodes carry semantic lists which act as repositories
+ -- until expansion transforms the node and relocates the contents.
+ -- Examine these lists in case expansion is disabled.
- elsif Nkind_In (Nod, N_And_Then, N_Or_Else) then
- Traverse_List (Actions (Nod));
+ elsif Nkind_In (Nod, N_And_Then, N_Or_Else) then
+ Traverse_List (Actions (Nod));
- elsif Nkind_In (Nod, N_Elsif_Part, N_Iteration_Scheme) then
- Traverse_List (Condition_Actions (Nod));
+ elsif Nkind_In (Nod, N_Elsif_Part, N_Iteration_Scheme) then
+ Traverse_List (Condition_Actions (Nod));
- elsif Nkind (Nod) = N_If_Expression then
- Traverse_List (Then_Actions (Nod));
- Traverse_List (Else_Actions (Nod));
+ elsif Nkind (Nod) = N_If_Expression then
+ Traverse_List (Then_Actions (Nod));
+ Traverse_List (Else_Actions (Nod));
- elsif Nkind_In (Nod, N_Component_Association,
- N_Iterated_Component_Association)
- then
- Traverse_List (Loop_Actions (Nod));
+ elsif Nkind_In (Nod, N_Component_Association,
+ N_Iterated_Component_Association)
+ then
+ Traverse_List (Loop_Actions (Nod));
- -- General case
+ -- General case
- elsif Is_Suitable_Scenario (Nod) then
- Process_Scenario (Nod, In_Task_Body);
- end if;
+ -- Save a suitable scenario in the Nested_Scenarios list of the
+ -- subprogram body. As a result any subsequent traversals of the
+ -- subprogram body started from a different top-level scenario no
+ -- longer need to reexamine the tree.
- return OK;
- end Is_Potential_Scenario;
+ elsif Is_Suitable_Scenario (Nod) then
+ Save_Scenario (Nod);
+ Process_Scenario (Nod, In_Partial_Fin, In_Task_Body);
+ end if;
- -------------------
- -- Traverse_List --
- -------------------
+ return OK;
+ end Is_Potential_Scenario;
- procedure Traverse_List (List : List_Id) is
- Item : Node_Id;
+ -------------------
+ -- Save_Scenario --
+ -------------------
+
+ procedure Save_Scenario (Nod : Node_Id) is
+ Nested : Elist_Id;
+
+ begin
+ Nested := Nested_Scenarios (Body_Id);
+
+ if No (Nested) then
+ Nested := New_Elmt_List;
+ Set_Nested_Scenarios (Body_Id, Nested);
+ end if;
+
+ Append_Elmt (Nod, Nested);
+ end Save_Scenario;
+
+ -------------------
+ -- Traverse_List --
+ -------------------
+
+ procedure Traverse_List (List : List_Id) is
+ Item : Node_Id;
+
+ begin
+ Item := First (List);
+ while Present (Item) loop
+ Traverse_Potential_Scenarios (Item);
+ Next (Item);
+ end loop;
+ end Traverse_List;
+
+ -- Start of processing for Find_And_Process_Nested_Scenarios
begin
- Item := First (List);
- while Present (Item) loop
- Traverse_Potential_Scenarios (Item);
- Next (Item);
+ -- Examine the declarations for suitable scenarios
+
+ Traverse_List (Declarations (N));
+
+ -- Examine the handled sequence of statements. This also includes any
+ -- exceptions handlers.
+
+ Traverse_Potential_Scenarios (Handled_Statement_Sequence (N));
+ end Find_And_Process_Nested_Scenarios;
+
+ ------------------------------
+ -- Process_Nested_Scenarios --
+ ------------------------------
+
+ procedure Process_Nested_Scenarios (Nested : Elist_Id) is
+ Nested_Elmt : Elmt_Id;
+
+ begin
+ Nested_Elmt := First_Elmt (Nested);
+ while Present (Nested_Elmt) loop
+ Process_Scenario
+ (N => Node (Nested_Elmt),
+ In_Partial_Fin => In_Partial_Fin,
+ In_Task_Body => In_Task_Body);
+
+ Next_Elmt (Nested_Elmt);
end loop;
- end Traverse_List;
+ end Process_Nested_Scenarios;
+
+ -- Local variables
+
+ Nested : Elist_Id;
-- Start of processing for Traverse_Body
@@ -8421,7 +8739,7 @@ package body Sem_Elab is
end if;
-- Nothing to do if the body was already traversed during the processing
- -- of the same top level scenario.
+ -- of the same top-level scenario.
if Visited_Bodies.Get (N) then
return;
@@ -8432,14 +8750,23 @@ package body Sem_Elab is
Visited_Bodies.Set (N, True);
end if;
- -- Examine the declarations for suitable scenarios
+ Nested := Nested_Scenarios (Defining_Entity (N));
+
+ -- The subprogram body was already examined as part of the elaboration
+ -- graph starting from a different top-level scenario. There is no need
+ -- to traverse the declarations and statements again because this will
+ -- yield the exact same scenarios. Use the nested scenarios collected
+ -- during the first inspection of the body.
- Traverse_List (Declarations (N));
+ if Present (Nested) then
+ Process_Nested_Scenarios (Nested);
- -- Examine the handled sequence of statements. This also includes any
- -- exceptions handlers.
+ -- Otherwise examine the declarations and statements of the subprogram
+ -- body for suitable scenarios, save and process them accordingly.
- Traverse_Potential_Scenarios (Handled_Statement_Sequence (N));
+ else
+ Find_And_Process_Nested_Scenarios;
+ end if;
end Traverse_Body;
---------------------------------
@@ -8450,14 +8777,18 @@ package body Sem_Elab is
package Scenarios renames Top_Level_Scenarios;
begin
+ -- Nothing to do when the old and new scenarios are one and the same
+
+ if Old_N = New_N then
+ return;
+
-- A scenario is being transformed by Atree.Rewrite. Update all relevant
-- internal data structures to reflect this change. This ensures that a
-- potential run-time conditional ABE check or a guaranteed ABE failure
-- is inserted at the proper place in the tree.
- if Is_Check_Emitting_Scenario (Old_N)
- and then Is_Recorded_Scenario (Old_N)
- and then Old_N /= New_N
+ elsif Is_Scenario (Old_N)
+ and then Is_Recorded_Top_Level_Scenario (Old_N)
then
-- Performance note: list traversal
@@ -8465,13 +8796,17 @@ package body Sem_Elab is
if Scenarios.Table (Index) = Old_N then
Scenarios.Table (Index) := New_N;
- Set_Is_Recorded_Scenario (Old_N, False);
- Set_Is_Recorded_Scenario (New_N);
+ -- The old top-level scenario is no longer recorded, but the
+ -- new one is.
+
+ Set_Is_Recorded_Top_Level_Scenario (Old_N, False);
+ Set_Is_Recorded_Top_Level_Scenario (New_N);
return;
end if;
end loop;
- -- A recorded scenario must be in the table of recorded scenarios
+ -- A recorded top-level scenario must be in the table of recorded
+ -- top-level scenarios.
pragma Assert (False);
end if;
diff --git a/gcc/ada/sem_elab.ads b/gcc/ada/sem_elab.ads
index ddcd43306b0..69d65d8cd69 100644
--- a/gcc/ada/sem_elab.ads
+++ b/gcc/ada/sem_elab.ads
@@ -34,6 +34,15 @@ package Sem_Elab is
-- Create a call marker for call or requeue statement N and record it for
-- later processing by the ABE mechanism.
+ procedure Build_Variable_Reference_Marker
+ (N : Node_Id;
+ Read : Boolean;
+ Write : Boolean);
+ -- Create a variable reference marker for arbitrary node N if it mentions a
+ -- variable, and record it for later processing by the ABE mechanism. Flag
+ -- Read should be set when the reference denotes a read. Flag Write should
+ -- be set when the reference denotes a write.
+
procedure Check_Elaboration_Scenarios;
-- Examine each scenario recorded during analysis/resolution and apply the
-- Ada or SPARK elaboration rules taking into account the model in effect.
diff --git a/gcc/ada/sem_eval.adb b/gcc/ada/sem_eval.adb
index 0c6c2ea7472..01eb8144e68 100644
--- a/gcc/ada/sem_eval.adb
+++ b/gcc/ada/sem_eval.adb
@@ -2301,7 +2301,7 @@ package body Sem_Eval is
Left_Str : constant Node_Id := Get_String_Val (Left);
Left_Len : Nat;
Right_Str : constant Node_Id := Get_String_Val (Right);
- Folded_Val : String_Id;
+ Folded_Val : String_Id := No_String;
begin
-- Establish new string literal, and store left operand. We make
diff --git a/gcc/ada/sem_intr.adb b/gcc/ada/sem_intr.adb
index ad8c388c616..886c2b4f432 100644
--- a/gcc/ada/sem_intr.adb
+++ b/gcc/ada/sem_intr.adb
@@ -101,7 +101,7 @@ package body Sem_Intr is
Nam : constant Entity_Id := Entity (Name (N));
Arg1 : constant Node_Id := First_Actual (N);
Typ : Entity_Id;
- Rtyp : Entity_Id;
+ Rtyp : Entity_Id := Empty;
Cnam : Name_Id;
Unam : Node_Id;
diff --git a/gcc/ada/sem_prag.adb b/gcc/ada/sem_prag.adb
index eae149805fa..b071aa8c892 100644
--- a/gcc/ada/sem_prag.adb
+++ b/gcc/ada/sem_prag.adb
@@ -217,7 +217,7 @@ package body Sem_Prag is
Freeze_Id : Entity_Id);
-- Subsidiary to the analysis of pragmas Contract_Cases, Part_Of, Post, and
-- Pre. Emit a freezing-related error message where Freeze_Id is the entity
- -- of a body which caused contract "freezing" and Contract_Id denotes the
+ -- of a body which caused contract freezing and Contract_Id denotes the
-- entity of the affected contstruct.
procedure Duplication_Error (Prag : Node_Id; Prev : Node_Id);
@@ -432,7 +432,7 @@ package body Sem_Prag is
-- Emit a clarification message when the case guard contains
-- at least one undefined reference, possibly due to contract
- -- "freezing".
+ -- freezing.
if Errors /= Serious_Errors_Detected
and then Present (Freeze_Id)
@@ -447,7 +447,7 @@ package body Sem_Prag is
-- Emit a clarification message when the consequence contains
-- at least one undefined reference, possibly due to contract
- -- "freezing".
+ -- freezing.
if Errors /= Serious_Errors_Detected
and then Present (Freeze_Id)
@@ -3287,8 +3287,8 @@ package body Sem_Prag is
if not Is_Child_Or_Sibling (Pack_Id, Scope (Encap_Id)) then
SPARK_Msg_NE
- ("indicator Part_Of must denote abstract state or public "
- & "descendant of & (SPARK RM 7.2.6(3))",
+ ("indicator Part_Of must denote abstract state of & "
+ & "or of its public descendant (SPARK RM 7.2.6(3))",
Indic, Parent_Unit);
return;
@@ -3301,8 +3301,8 @@ package body Sem_Prag is
else
SPARK_Msg_NE
- ("indicator Part_Of must denote abstract state or public "
- & "descendant of & (SPARK RM 7.2.6(3))",
+ ("indicator Part_Of must denote abstract state of & "
+ & "or of its public descendant (SPARK RM 7.2.6(3))",
Indic, Parent_Unit);
return;
end if;
@@ -3327,7 +3327,7 @@ package body Sem_Prag is
elsif Placement = Private_State_Space then
if Scope (Encap_Id) /= Pack_Id then
SPARK_Msg_NE
- ("indicator Part_Of must designate an abstract state of "
+ ("indicator Part_Of must denote an abstract state of "
& "package & (SPARK RM 7.2.6(2))", Indic, Pack_Id);
Error_Msg_Name_1 := Chars (Pack_Id);
SPARK_Msg_NE
@@ -3510,7 +3510,7 @@ package body Sem_Prag is
end if;
-- Emit a clarification message when the encapsulator is undefined,
- -- possibly due to contract "freezing".
+ -- possibly due to contract freezing.
if Errors /= Serious_Errors_Detected
and then Present (Freeze_Id)
@@ -5817,8 +5817,8 @@ package body Sem_Prag is
procedure Check_Grouping (L : List_Id) is
HSS : Node_Id;
- Prag : Node_Id;
Stmt : Node_Id;
+ Prag : Node_Id := Empty; -- init to avoid warning
begin
-- Inspect the list of declarations or statements looking for
@@ -5872,16 +5872,15 @@ package body Sem_Prag is
else
while Present (Stmt) loop
-
-- The current pragma is either the first pragma
- -- of the group or is a member of the group. Stop
- -- the search as the placement is legal.
+ -- of the group or is a member of the group.
+ -- Stop the search as the placement is legal.
if Stmt = N then
raise Stop_Search;
- -- Skip group members, but keep track of the last
- -- pragma in the group.
+ -- Skip group members, but keep track of the
+ -- last pragma in the group.
elsif Is_Loop_Pragma (Stmt) then
Prag := Stmt;
@@ -11390,6 +11389,7 @@ package body Sem_Prag is
SPARK_Msg_N
("expression of external state property must be "
& "static", Expr);
+ return;
end if;
-- The lack of expression defaults the property to True
@@ -16474,6 +16474,20 @@ package body Sem_Prag is
return;
end if;
+ -- Ada 2012 (AI05-0030): Cannot apply the implementation_kind
+ -- By_Protected_Procedure to the primitive procedure of a task
+ -- interface.
+
+ if Chars (Arg2) = Name_By_Protected_Procedure
+ and then Is_Interface (Typ)
+ and then Is_Task_Interface (Typ)
+ then
+ Error_Pragma_Arg
+ ("implementation kind By_Protected_Procedure cannot be "
+ & "applied to a task interface primitive", Arg2);
+ return;
+ end if;
+
-- Procedures declared inside a protected type must be accepted
elsif Ekind (Proc_Id) = E_Procedure
@@ -16489,20 +16503,6 @@ package body Sem_Prag is
return;
end if;
- -- Ada 2012 (AI05-0030): Cannot apply the implementation_kind
- -- By_Protected_Procedure to the primitive procedure of a task
- -- interface.
-
- if Chars (Arg2) = Name_By_Protected_Procedure
- and then Is_Interface (Typ)
- and then Is_Task_Interface (Typ)
- then
- Error_Pragma_Arg
- ("implementation kind By_Protected_Procedure cannot be "
- & "applied to a task interface primitive", Arg2);
- return;
- end if;
-
Record_Rep_Item (Proc_Id, N);
end Implemented;
@@ -24253,11 +24253,16 @@ package body Sem_Prag is
else
OK := Set_Warning_Switch (Chr);
end if;
- end if;
- if not OK then
+ if not OK then
+ Error_Pragma_Arg
+ ("invalid warning switch character " & Chr,
+ Arg1);
+ end if;
+
+ else
Error_Pragma_Arg
- ("invalid warning switch character " & Chr,
+ ("invalid wide character in warning switch ",
Arg1);
end if;
@@ -24608,7 +24613,7 @@ package body Sem_Prag is
Preanalyze_Assert_Expression (Expr, Standard_Boolean);
-- Emit a clarification message when the expression contains at least
- -- one undefined reference, possibly due to contract "freezing".
+ -- one undefined reference, possibly due to contract freezing.
if Errors /= Serious_Errors_Detected
and then Present (Freeze_Id)
@@ -27358,7 +27363,7 @@ package body Sem_Prag is
Constit_Id := Entity_Of (Constit);
-- When a constituent is declared after a subprogram body
- -- that caused "freezing" of the related contract where
+ -- that caused freezing of the related contract where
-- pragma Refined_State resides, the constituent appears
-- undefined and carries Any_Id as its entity.
@@ -28398,8 +28403,8 @@ package body Sem_Prag is
end if;
end if;
- -- When the item appears in the private state space of a packge, it must
- -- be a part of some state declared by the said package.
+ -- When the item appears in the private state space of a package, it
+ -- must be a part of some state declared by the said package.
else pragma Assert (Placement = Private_State_Space);
@@ -28747,7 +28752,7 @@ package body Sem_Prag is
Depends : Node_Id;
Formal : Entity_Id;
Global : Node_Id;
- Spec_Id : Entity_Id;
+ Spec_Id : Entity_Id := Empty;
Subp_Decl : Node_Id;
Typ : Entity_Id;
@@ -29290,7 +29295,7 @@ package body Sem_Prag is
elsif Present (Corresponding_Aspect (Prag)) then
return Parent (Corresponding_Aspect (Prag));
- -- No candidate packge [body] found
+ -- No candidate package [body] found
else
return Empty;
@@ -29364,10 +29369,11 @@ package body Sem_Prag is
elsif N = Name_Off then
return Off;
- -- Any other argument is illegal
+ -- Any other argument is illegal. Assume that no SPARK mode applies to
+ -- avoid potential cascaded errors.
else
- raise Program_Error;
+ return None;
end if;
end Get_SPARK_Mode_Type;
diff --git a/gcc/ada/sem_prag.ads b/gcc/ada/sem_prag.ads
index 33dbe488ae1..57fb8e57af9 100644
--- a/gcc/ada/sem_prag.ads
+++ b/gcc/ada/sem_prag.ads
@@ -191,6 +191,8 @@ package Sem_Prag is
Pragma_Remote_Types => False,
Pragma_Shared_Passive => False,
Pragma_Task_Dispatching_Policy => False,
+ Pragma_Unmodified => False,
+ Pragma_Unreferenced => False,
Pragma_Warnings => False,
others => True);
diff --git a/gcc/ada/sem_res.adb b/gcc/ada/sem_res.adb
index f5c5f9e96dc..024b879fd14 100644
--- a/gcc/ada/sem_res.adb
+++ b/gcc/ada/sem_res.adb
@@ -1030,7 +1030,7 @@ package body Sem_Res is
if Nkind (N) in N_Has_Etype and then Etype (N) = Any_Type then
return;
elsif Nkind (N) in N_Has_Chars
- and then Chars (N) in Error_Name_Or_No_Name
+ and then not Is_Valid_Name (Chars (N))
then
return;
end if;
@@ -1212,7 +1212,7 @@ package body Sem_Res is
Func : constant Entity_Id := Entity (Name (N));
Is_Binary : constant Boolean := Present (Act2);
Op_Node : Node_Id;
- Opnd_Type : Entity_Id;
+ Opnd_Type : Entity_Id := Empty;
Orig_Type : Entity_Id := Empty;
Pack : Entity_Id;
@@ -1523,6 +1523,7 @@ package body Sem_Res is
-- Operator may be defined in an extension of System
elsif Present (System_Aux_Id)
+ and then Present (Opnd_Type)
and then Scope (Opnd_Type) = System_Aux_Id
then
null;
@@ -2439,22 +2440,27 @@ package body Sem_Res is
Set_Entity (N, Seen);
Generate_Reference (Seen, N);
- elsif Nkind (N) = N_Case_Expression then
- Set_Etype (N, Expr_Type);
-
- elsif Nkind (N) = N_Character_Literal then
- Set_Etype (N, Expr_Type);
-
- elsif Nkind (N) = N_If_Expression then
+ elsif Nkind_In (N, N_Case_Expression,
+ N_Character_Literal,
+ N_Delta_Aggregate,
+ N_If_Expression)
+ then
Set_Etype (N, Expr_Type);
-- AI05-0139-2: Expression is overloaded because type has
-- implicit dereference. If type matches context, no implicit
- -- dereference is involved.
+ -- dereference is involved. If the expression is an entity,
+ -- generate a reference to it, as this is not done for an
+ -- overloaded construct during analysis.
elsif Has_Implicit_Dereference (Expr_Type) then
Set_Etype (N, Expr_Type);
Set_Is_Overloaded (N, False);
+
+ if Is_Entity_Name (N) then
+ Generate_Reference (Entity (N), N);
+ end if;
+
exit Interp_Loop;
elsif Is_Overloaded (N)
@@ -3138,12 +3144,12 @@ package body Sem_Res is
Loc : constant Source_Ptr := Sloc (N);
A : Node_Id;
A_Id : Entity_Id;
- A_Typ : Entity_Id;
+ A_Typ : Entity_Id := Empty; -- init to avoid warning
F : Entity_Id;
F_Typ : Entity_Id;
Prev : Node_Id := Empty;
Orig_A : Node_Id;
- Real_F : Entity_Id;
+ Real_F : Entity_Id := Empty; -- init to avoid warning
Real_Subp : Entity_Id;
-- If the subprogram being called is an inherited operation for
@@ -3744,6 +3750,21 @@ package body Sem_Res is
and then Is_Entity_Name (A)
and then Comes_From_Source (A)
then
+ -- Annotate the tree by creating a variable reference marker when
+ -- the actual denotes a variable reference, in case the reference
+ -- is folded or optimized away. The variable reference marker is
+ -- automatically saved for later examination by the ABE Processing
+ -- phase. The status of the reference is set as follows:
+
+ -- status mode
+ -- read IN, IN OUT
+ -- write IN OUT, OUT
+
+ Build_Variable_Reference_Marker
+ (N => A,
+ Read => Ekind (F) /= E_Out_Parameter,
+ Write => Ekind (F) /= E_In_Parameter);
+
Orig_A := Entity (A);
if Present (Orig_A) then
@@ -5130,6 +5151,38 @@ package body Sem_Res is
if not Is_Static_Coextension (N) then
Set_Is_Dynamic_Coextension (N);
+
+ -- ??? We currently do not handle finalization and deallocation
+ -- of coextensions properly so let's at least warn the user
+ -- about it.
+
+ if Is_Controlled_Active (Desig_T) then
+ if Is_Controlled_Active
+ (Defining_Identifier
+ (Parent (Associated_Node_For_Itype (Typ))))
+ then
+ Error_Msg_N
+ ("??coextension will not be finalized when its "
+ & "associated owner is finalized", N);
+ else
+ Error_Msg_N
+ ("??coextension will not be finalized when its "
+ & "associated owner is deallocated", N);
+ end if;
+ else
+ if Is_Controlled_Active
+ (Defining_Identifier
+ (Parent (Associated_Node_For_Itype (Typ))))
+ then
+ Error_Msg_N
+ ("??coextension will not be deallocated when "
+ & "its associated owner is finalized", N);
+ else
+ Error_Msg_N
+ ("??coextension will not be deallocated when "
+ & "its associated owner is deallocated", N);
+ end if;
+ end if;
end if;
-- Cleanup for potential static coextensions
@@ -5137,6 +5190,19 @@ package body Sem_Res is
else
Set_Is_Dynamic_Coextension (N, False);
Set_Is_Static_Coextension (N, False);
+
+ -- ??? It seems we also do not properly finalize anonymous
+ -- access-to-controlled objects within their declared scope and
+ -- instead finalize them with their associated unit. Warn the
+ -- user about it here.
+
+ if Ekind (Typ) = E_Anonymous_Access_Type
+ and then Is_Controlled_Active (Desig_T)
+ then
+ Error_Msg_N
+ ("??anonymous access-to-controlled object will be finalized "
+ & "when its enclosing unit goes out of scope", N);
+ end if;
end if;
end if;
@@ -7210,9 +7276,13 @@ package body Sem_Res is
elsif Ekind (E) = E_Generic_Function then
Error_Msg_N ("illegal use of generic function", N);
- -- In Ada 83 an OUT parameter cannot be read
+ -- In Ada 83 an OUT parameter cannot be read, but attributes of
+ -- array types (i.e. bounds and length) are legal.
elsif Ekind (E) = E_Out_Parameter
+ and then (Nkind (Parent (N)) /= N_Attribute_Reference
+ or else Is_Scalar_Type (Etype (E)))
+
and then (Nkind (Parent (N)) in N_Op
or else Nkind (Parent (N)) = N_Explicit_Dereference
or else Is_Assignment_Or_Object_Expression
diff --git a/gcc/ada/sem_spark.adb b/gcc/ada/sem_spark.adb
index 5107d3bc5f4..42517ea0829 100644
--- a/gcc/ada/sem_spark.adb
+++ b/gcc/ada/sem_spark.adb
@@ -2349,6 +2349,7 @@ package body Sem_SPARK is
| N_With_Clause
| N_Use_Type_Clause
| N_Validate_Unchecked_Conversion
+ | N_Variable_Reference_Marker
=>
null;
diff --git a/gcc/ada/sem_spark.ads b/gcc/ada/sem_spark.ads
index d7abd8ad74a..d6977880d47 100644
--- a/gcc/ada/sem_spark.ads
+++ b/gcc/ada/sem_spark.ads
@@ -27,10 +27,10 @@
-- rules that are enforced are defined in the anti-aliasing section of the
-- SPARK RM 6.4.2
--
--- Analyze_SPARK is called by Gnat1drv, when GNATprove mode is activated. It
--- does an analysis of the source code, looking for code that is considered
--- as SPARK and launches another function called Analyze_Node that will do
--- the whole analysis.
+-- Check_Safe_Pointers is called by Gnat1drv, when GNATprove mode is
+-- activated. It does an analysis of the source code, looking for code that is
+-- considered as SPARK and launches another function called Analyze_Node that
+-- will do the whole analysis.
--
-- A path is an abstraction of a name, of which all indices, slices (for
-- indexed components) and function calls have been abstracted and all
diff --git a/gcc/ada/sem_util.adb b/gcc/ada/sem_util.adb
index 3698bbf16bd..102da89e9ca 100644
--- a/gcc/ada/sem_util.adb
+++ b/gcc/ada/sem_util.adb
@@ -141,7 +141,9 @@ package body Sem_Util is
function Subprogram_Name (N : Node_Id) return String;
-- Return the fully qualified name of the enclosing subprogram for the
- -- given node N.
+ -- given node N, with file:line:col information appended, e.g.
+ -- "subp:file:line:col", corresponding to the source location of the
+ -- body of the subprogram.
------------------------------
-- Abstract_Interface_List --
@@ -594,6 +596,8 @@ package body Sem_Util is
-----------
procedure Inner (E : Entity_Id) is
+ Scop : Node_Id;
+
begin
-- If entity has an internal name, skip by it, and print its scope.
-- Note that we strip a final R from the name before the test; this
@@ -615,21 +619,23 @@ package body Sem_Util is
end if;
end;
+ Scop := Scope (E);
+
-- Just print entity name if its scope is at the outer level
- if Scope (E) = Standard_Standard then
+ if Scop = Standard_Standard then
null;
-- If scope comes from source, write scope and entity
- elsif Comes_From_Source (Scope (E)) then
- Append_Entity_Name (Temp, Scope (E));
+ elsif Comes_From_Source (Scop) then
+ Append_Entity_Name (Temp, Scop);
Append (Temp, '.');
-- If in wrapper package skip past it
- elsif Is_Wrapper_Package (Scope (E)) then
- Append_Entity_Name (Temp, Scope (Scope (E)));
+ elsif Present (Scop) and then Is_Wrapper_Package (Scop) then
+ Append_Entity_Name (Temp, Scope (Scop));
Append (Temp, '.');
-- Otherwise nothing to output (happens in unnamed block statements)
@@ -4025,7 +4031,7 @@ package body Sem_Util is
if SPARK_Mode_Is_Off (Pack) then
null;
- -- State refinement can only occur in a completing packge body. Do
+ -- State refinement can only occur in a completing package body. Do
-- not verify proper state refinement when the body is subject to
-- pragma SPARK_Mode Off because this disables the requirement for
-- state refinement.
@@ -7835,6 +7841,66 @@ package body Sem_Util is
raise Program_Error;
end Find_Corresponding_Discriminant;
+ -------------------
+ -- Find_DIC_Type --
+ -------------------
+
+ function Find_DIC_Type (Typ : Entity_Id) return Entity_Id is
+ Curr_Typ : Entity_Id;
+ -- The current type being examined in the parent hierarchy traversal
+
+ DIC_Typ : Entity_Id;
+ -- The type which carries the DIC pragma. This variable denotes the
+ -- partial view when private types are involved.
+
+ Par_Typ : Entity_Id;
+ -- The parent type of the current type. This variable denotes the full
+ -- view when private types are involved.
+
+ begin
+ -- The input type defines its own DIC pragma, therefore it is the owner
+
+ if Has_Own_DIC (Typ) then
+ DIC_Typ := Typ;
+
+ -- Otherwise the DIC pragma is inherited from a parent type
+
+ else
+ pragma Assert (Has_Inherited_DIC (Typ));
+
+ -- Climb the parent chain
+
+ Curr_Typ := Typ;
+ loop
+ -- Inspect the parent type. Do not consider subtypes as they
+ -- inherit the DIC attributes from their base types.
+
+ DIC_Typ := Base_Type (Etype (Curr_Typ));
+
+ -- Look at the full view of a private type because the type may
+ -- have a hidden parent introduced in the full view.
+
+ Par_Typ := DIC_Typ;
+
+ if Is_Private_Type (Par_Typ)
+ and then Present (Full_View (Par_Typ))
+ then
+ Par_Typ := Full_View (Par_Typ);
+ end if;
+
+ -- Stop the climb once the nearest parent type which defines a DIC
+ -- pragma of its own is encountered or when the root of the parent
+ -- chain is reached.
+
+ exit when Has_Own_DIC (DIC_Typ) or else Curr_Typ = Par_Typ;
+
+ Curr_Typ := Par_Typ;
+ end loop;
+ end if;
+
+ return DIC_Typ;
+ end Find_DIC_Type;
+
----------------------------------
-- Find_Enclosing_Iterator_Loop --
----------------------------------
@@ -13193,14 +13259,14 @@ package body Sem_Util is
if Ekind (Proc_Nam) = E_Procedure
and then Present (Parameter_Specifications (Parent (Proc_Nam)))
then
- Param := Parameter_Type (First (
- Parameter_Specifications (Parent (Proc_Nam))));
+ Param :=
+ Parameter_Type
+ (First (Parameter_Specifications (Parent (Proc_Nam))));
- -- The formal may be an anonymous access type.
+ -- The formal may be an anonymous access type
if Nkind (Param) = N_Access_Definition then
Param_Typ := Entity (Subtype_Mark (Param));
-
else
Param_Typ := Etype (Param);
end if;
@@ -14860,10 +14926,6 @@ package body Sem_Util is
function Within_Check (Nod : Node_Id) return Boolean;
-- Determine whether an arbitrary node appears in a check node
- function Within_Subprogram_Call (Nod : Node_Id) return Boolean;
- -- Determine whether an arbitrary node appears in an entry, function, or
- -- procedure call.
-
function Within_Volatile_Function (Id : Entity_Id) return Boolean;
-- Determine whether an arbitrary entity appears in a volatile function
@@ -14926,36 +14988,6 @@ package body Sem_Util is
return False;
end Within_Check;
- ----------------------------
- -- Within_Subprogram_Call --
- ----------------------------
-
- function Within_Subprogram_Call (Nod : Node_Id) return Boolean is
- Par : Node_Id;
-
- begin
- -- Climb the parent chain looking for a function or procedure call
-
- Par := Nod;
- while Present (Par) loop
- if Nkind_In (Par, N_Entry_Call_Statement,
- N_Function_Call,
- N_Procedure_Call_Statement)
- then
- return True;
-
- -- Prevent the search from going too far
-
- elsif Is_Body_Or_Package_Declaration (Par) then
- exit;
- end if;
-
- Par := Parent (Par);
- end loop;
-
- return False;
- end Within_Subprogram_Call;
-
------------------------------
-- Within_Volatile_Function --
------------------------------
@@ -15416,7 +15448,7 @@ package body Sem_Util is
Anc_Part : Node_Id;
Assoc : Node_Id;
Choice : Node_Id;
- Comp_Typ : Entity_Id;
+ Comp_Typ : Entity_Id := Empty; -- init to avoid warning
Expr : Node_Id;
begin
@@ -15492,6 +15524,7 @@ package body Sem_Util is
-- The type of the choice must have preelaborable initialization if
-- the association carries a <>.
+ pragma Assert (Present (Comp_Typ));
if Box_Present (Assoc) then
if not Has_Preelaborable_Initialization (Comp_Typ) then
return False;
@@ -17526,8 +17559,8 @@ package body Sem_Util is
L_Ndims : constant Nat := Number_Dimensions (L_Typ);
R_Ndims : constant Nat := Number_Dimensions (R_Typ);
- L_Index : Node_Id;
- R_Index : Node_Id;
+ L_Index : Node_Id := Empty; -- init to ...
+ R_Index : Node_Id := Empty; -- ...avoid warnings
L_Low : Node_Id;
L_High : Node_Id;
L_Len : Uint;
@@ -19504,9 +19537,9 @@ package body Sem_Util is
N : constant Entity_Id := Make_Temporary (Sloc_Value, Id_Char);
begin
- Set_Ekind (N, Kind);
- Set_Is_Internal (N, True);
- Append_Entity (N, Scope_Id);
+ Set_Ekind (N, Kind);
+ Set_Is_Internal (N, True);
+ Append_Entity (N, Scope_Id);
if Kind in Type_Kind then
Init_Size_Align (N);
@@ -23295,6 +23328,7 @@ package body Sem_Util is
function Subprogram_Name (N : Node_Id) return String is
Buf : Bounded_String;
Ent : Node_Id := N;
+ Nod : Node_Id;
begin
while Present (Ent) loop
@@ -23303,17 +23337,32 @@ package body Sem_Util is
Ent := Defining_Unit_Name (Specification (Ent));
exit;
- when N_Package_Body
+ when N_Subprogram_Declaration =>
+ Nod := Corresponding_Body (Ent);
+
+ if Present (Nod) then
+ Ent := Nod;
+ else
+ Ent := Defining_Unit_Name (Specification (Ent));
+ end if;
+
+ exit;
+
+ when N_Subprogram_Instantiation
+ | N_Package_Body
| N_Package_Specification
- | N_Subprogram_Specification
=>
Ent := Defining_Unit_Name (Ent);
exit;
+ when N_Protected_Type_Declaration =>
+ Ent := Corresponding_Body (Ent);
+ exit;
+
when N_Protected_Body
- | N_Protected_Type_Declaration
| N_Task_Body
=>
+ Ent := Defining_Identifier (Ent);
exit;
when others =>
@@ -23324,18 +23373,55 @@ package body Sem_Util is
end loop;
if No (Ent) then
- return "unknown subprogram";
+ return "unknown subprogram:unknown file:0:0";
end if;
-- If the subprogram is a child unit, use its simple name to start the
-- construction of the fully qualified name.
if Nkind (Ent) = N_Defining_Program_Unit_Name then
- Append_Entity_Name (Buf, Defining_Identifier (Ent));
- else
- Append_Entity_Name (Buf, Ent);
+ Ent := Defining_Identifier (Ent);
+ end if;
+
+ Append_Entity_Name (Buf, Ent);
+
+ -- Append homonym number if needed
+
+ if Nkind (N) in N_Entity and then Has_Homonym (N) then
+ declare
+ H : Entity_Id := Homonym (N);
+ Nr : Nat := 1;
+
+ begin
+ while Present (H) loop
+ if Scope (H) = Scope (N) then
+ Nr := Nr + 1;
+ end if;
+
+ H := Homonym (H);
+ end loop;
+
+ if Nr > 1 then
+ Append (Buf, '#');
+ Append (Buf, Nr);
+ end if;
+ end;
end if;
+ -- Append source location of Ent to Buf so that the string will
+ -- look like "subp:file:line:col".
+
+ declare
+ Loc : constant Source_Ptr := Sloc (Ent);
+ begin
+ Append (Buf, ':');
+ Append (Buf, Reference_Name (Get_Source_File_Index (Loc)));
+ Append (Buf, ':');
+ Append (Buf, Nat (Get_Logical_Line_Number (Loc)));
+ Append (Buf, ':');
+ Append (Buf, Nat (Get_Column_Number (Loc)));
+ end;
+
return +Buf;
end Subprogram_Name;
@@ -24184,6 +24270,36 @@ package body Sem_Util is
return Scope_Within_Or_Same (Scope (E), S);
end Within_Scope;
+ ----------------------------
+ -- Within_Subprogram_Call --
+ ----------------------------
+
+ function Within_Subprogram_Call (N : Node_Id) return Boolean is
+ Par : Node_Id;
+
+ begin
+ -- Climb the parent chain looking for a function or procedure call
+
+ Par := N;
+ while Present (Par) loop
+ if Nkind_In (Par, N_Entry_Call_Statement,
+ N_Function_Call,
+ N_Procedure_Call_Statement)
+ then
+ return True;
+
+ -- Prevent the search from going too far
+
+ elsif Is_Body_Or_Package_Declaration (Par) then
+ exit;
+ end if;
+
+ Par := Parent (Par);
+ end loop;
+
+ return False;
+ end Within_Subprogram_Call;
+
----------------
-- Wrong_Type --
----------------
diff --git a/gcc/ada/sem_util.ads b/gcc/ada/sem_util.ads
index c6958cb1aaa..9aaa1160ed7 100644
--- a/gcc/ada/sem_util.ads
+++ b/gcc/ada/sem_util.ads
@@ -769,6 +769,11 @@ package Sem_Util is
-- analyzed. Subsequent uses of this id on a different type denotes the
-- discriminant at the same position in this new type.
+ function Find_DIC_Type (Typ : Entity_Id) return Entity_Id;
+ -- Subsidiary to all Build_DIC_Procedure_xxx routines. Find the type which
+ -- defines the Default_Initial_Condition pragma of type Typ. This is either
+ -- Typ itself or a parent type when the pragma is inherited.
+
function Find_Enclosing_Iterator_Loop (Id : Entity_Id) return Entity_Id;
-- Find the nearest iterator loop which encloses arbitrary entity Id. If
-- such a loop exists, return the entity of its identifier (E_Loop scope),
@@ -2735,6 +2740,10 @@ package Sem_Util is
function Within_Scope (E : Entity_Id; S : Entity_Id) return Boolean;
-- Returns True if entity E is declared within scope S
+ function Within_Subprogram_Call (N : Node_Id) return Boolean;
+ -- Determine whether arbitrary node N appears in an entry, function, or
+ -- procedure call.
+
procedure Wrong_Type (Expr : Node_Id; Expected_Type : Entity_Id);
-- Output error message for incorrectly typed expression. Expr is the node
-- for the incorrectly typed construct (Etype (Expr) is the type found),
diff --git a/gcc/ada/sem_warn.adb b/gcc/ada/sem_warn.adb
index 0e498d3e6cb..ff94cf84e41 100644
--- a/gcc/ada/sem_warn.adb
+++ b/gcc/ada/sem_warn.adb
@@ -3344,7 +3344,8 @@ package body Sem_Warn is
-----------------------------
procedure Warn_On_Known_Condition (C : Node_Id) is
- Test_Result : Boolean;
+ Test_Result : Boolean := False;
+ -- Force initialization to facilitate static analysis
function Is_Known_Branch return Boolean;
-- If the type of the condition is Boolean, the constant value of the
diff --git a/gcc/ada/set_targ.adb b/gcc/ada/set_targ.adb
index f25c9f84f81..93b71018d89 100755
--- a/gcc/ada/set_targ.adb
+++ b/gcc/ada/set_targ.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 2013-2016, Free Software Foundation, Inc. --
+-- Copyright (C) 2013-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -580,6 +580,7 @@ package body Set_Targ is
-- Checks that we have one or more spaces and skips them
procedure FailN (S : String);
+ pragma No_Return (FailN);
-- Calls Fail adding " name in file xxx", where name is the currently
-- gathered name in Nam_Buf, surrounded by quotes, and xxx is the
-- name of the file.
diff --git a/gcc/ada/sigtramp-qnx.c b/gcc/ada/sigtramp-qnx.c
new file mode 100644
index 00000000000..6e70534c08c
--- /dev/null
+++ b/gcc/ada/sigtramp-qnx.c
@@ -0,0 +1,273 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * S I G T R A M P *
+ * *
+ * Asm Implementation File *
+ * *
+ * Copyright (C) 2017, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * As a special exception under Section 7 of GPL version 3, you are granted *
+ * additional permissions described in the GCC Runtime Library Exception, *
+ * version 3.1, as published by the Free Software Foundation. *
+ * *
+ * In particular, you can freely distribute your programs built with the *
+ * GNAT Pro compiler, including any required library run-time units, using *
+ * any licensing terms of your choosing. See the AdaCore Software License *
+ * for full details. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/**********************************************
+ * QNX version of the __gnat_sigtramp service *
+ **********************************************/
+
+#include <ucontext.h>
+
+#include "sigtramp.h"
+/* See sigtramp.h for a general explanation of functionality. */
+
+extern void __gnat_sigtramp_common
+ (int signo, void *siginfo, void *sigcontext,
+ __sigtramphandler_t * handler);
+
+void __gnat_sigtramp (int signo, void *si, void *sc,
+ __sigtramphandler_t * handler)
+ __attribute__((optimize(2)));
+
+void __gnat_sigtramp (int signo, void *si, void *ucontext,
+ __sigtramphandler_t * handler)
+{
+ struct sigcontext *mcontext = &((ucontext_t *) ucontext)->uc_mcontext;
+
+ __gnat_sigtramp_common (signo, si, mcontext, handler);
+}
+
+/* asm string construction helpers. */
+
+#define STR(TEXT) #TEXT
+/* stringify expanded TEXT, surrounding it with double quotes. */
+
+#define S(E) STR(E)
+/* stringify E, which will resolve as text but may contain macros
+ still to be expanded. */
+
+/* asm (TEXT) outputs <tab>TEXT. These facilitate the output of
+ multiline contents: */
+#define TAB(S) "\t" S
+#define CR(S) S "\n"
+
+#undef TCR
+#define TCR(S) TAB(CR(S))
+
+/* Trampoline body block
+ --------------------- */
+
+#define COMMON_CFI(REG) \
+ ".cfi_offset " S(REGNO_##REG) "," S(REG_OFFSET_##REG)
+
+#ifdef __x86_64__
+/*****************************************
+ * x86-64 *
+ *****************************************/
+
+// CFI register numbers
+#define REGNO_RAX 0
+#define REGNO_RDX 1
+#define REGNO_RCX 2
+#define REGNO_RBX 3
+#define REGNO_RSI 4
+#define REGNO_RDI 5
+#define REGNO_RBP 6
+#define REGNO_RSP 7
+#define REGNO_R8 8
+#define REGNO_R9 9
+#define REGNO_R10 10
+#define REGNO_R11 11
+#define REGNO_R12 12
+#define REGNO_R13 13
+#define REGNO_R14 14
+#define REGNO_R15 15 /* Used as CFA */
+#define REGNO_RPC 16 /* aka %rip */
+
+// Registers offset from the regset structure
+#define REG_OFFSET_RDI 0x00
+#define REG_OFFSET_RSI 0x08
+#define REG_OFFSET_RDX 0x10
+#define REG_OFFSET_R10 0x18
+#define REG_OFFSET_R8 0x20
+#define REG_OFFSET_R9 0x28
+#define REG_OFFSET_RAX 0x30
+#define REG_OFFSET_RBX 0x38
+#define REG_OFFSET_RBP 0x40
+#define REG_OFFSET_RCX 0x48
+#define REG_OFFSET_R11 0x50
+#define REG_OFFSET_R12 0x58
+#define REG_OFFSET_R13 0x60
+#define REG_OFFSET_R14 0x68
+#define REG_OFFSET_R15 0x70
+#define REG_OFFSET_RPC 0x78 /* RIP */
+#define REG_OFFSET_RSP 0x90
+
+#define CFI_COMMON_REGS \
+CR("# CFI for common registers\n") \
+TCR(COMMON_CFI(RSP)) \
+TCR(COMMON_CFI(R15)) \
+TCR(COMMON_CFI(R14)) \
+TCR(COMMON_CFI(R13)) \
+TCR(COMMON_CFI(R12)) \
+TCR(COMMON_CFI(R11)) \
+TCR(COMMON_CFI(RCX)) \
+TCR(COMMON_CFI(RBP)) \
+TCR(COMMON_CFI(RBX)) \
+TCR(COMMON_CFI(RAX)) \
+TCR(COMMON_CFI(R9)) \
+TCR(COMMON_CFI(R8)) \
+TCR(COMMON_CFI(R10)) \
+TCR(COMMON_CFI(RSI)) \
+TCR(COMMON_CFI(RDI)) \
+TCR(COMMON_CFI(RDX)) \
+TCR(COMMON_CFI(RPC)) \
+TCR(".cfi_return_column " S(REGNO_RPC))
+
+#define SIGTRAMP_BODY \
+TCR(".cfi_def_cfa 15, 0") \
+CFI_COMMON_REGS \
+CR("") \
+TCR("# Allocate frame and save the non-volatile") \
+TCR("# registers we're going to modify") \
+TCR("subq $8, %rsp") \
+TCR("# Setup CFA_REG = context, which we'll retrieve as our CFA value") \
+TCR("movq %rdx, %r15") \
+TCR("# Call the real handler. The signo, siginfo and sigcontext") \
+TCR("# arguments are the same as those we received") \
+TCR("call *%rcx") \
+TCR("# This part should never be executed") \
+TCR("addq $8, %rsp") \
+TCR("ret")
+#endif
+
+#ifdef __aarch64__
+/*****************************************
+ * Aarch64 *
+ *****************************************/
+
+/* CFA reg: any callee saved register will do */
+#define CFA_REG 19
+
+/* General purpose registers */
+#define REG_OFFSET_GR(n) (n * 8)
+#define REGNO_GR(n) n
+
+/* point to the ELR value of the mcontext registers list */
+#define REG_OFFSET_ELR (32 * 8)
+#define REGNO_PC 30
+
+#define CFI_DEF_CFA \
+ TCR(".cfi_def_cfa " S(CFA_REG) ", 0")
+
+#define CFI_COMMON_REGS \
+ CR("# CFI for common registers\n") \
+ TCR(COMMON_CFI(GR(0))) \
+ TCR(COMMON_CFI(GR(1))) \
+ TCR(COMMON_CFI(GR(2))) \
+ TCR(COMMON_CFI(GR(3))) \
+ TCR(COMMON_CFI(GR(4))) \
+ TCR(COMMON_CFI(GR(5))) \
+ TCR(COMMON_CFI(GR(6))) \
+ TCR(COMMON_CFI(GR(7))) \
+ TCR(COMMON_CFI(GR(8))) \
+ TCR(COMMON_CFI(GR(9))) \
+ TCR(COMMON_CFI(GR(10))) \
+ TCR(COMMON_CFI(GR(11))) \
+ TCR(COMMON_CFI(GR(12))) \
+ TCR(COMMON_CFI(GR(13))) \
+ TCR(COMMON_CFI(GR(14))) \
+ TCR(COMMON_CFI(GR(15))) \
+ TCR(COMMON_CFI(GR(16))) \
+ TCR(COMMON_CFI(GR(17))) \
+ TCR(COMMON_CFI(GR(18))) \
+ TCR(COMMON_CFI(GR(19))) \
+ TCR(COMMON_CFI(GR(20))) \
+ TCR(COMMON_CFI(GR(21))) \
+ TCR(COMMON_CFI(GR(22))) \
+ TCR(COMMON_CFI(GR(23))) \
+ TCR(COMMON_CFI(GR(24))) \
+ TCR(COMMON_CFI(GR(25))) \
+ TCR(COMMON_CFI(GR(26))) \
+ TCR(COMMON_CFI(GR(27))) \
+ TCR(COMMON_CFI(GR(28))) \
+ TCR(COMMON_CFI(GR(29))) \
+ TCR(".cfi_offset " S(REGNO_PC) "," S(REG_OFFSET_ELR)) \
+ TCR(".cfi_return_column " S(REGNO_PC))
+
+#define SIGTRAMP_BODY \
+ CFI_DEF_CFA \
+ CFI_COMMON_REGS \
+ TCR("# Push FP and LR on stack") \
+ TCR("stp x29, x30, [sp, #-16]!") \
+ TCR("# Push register used to hold the CFA on stack") \
+ TCR("str x" S(CFA_REG) ", [sp, #-8]!") \
+ TCR("# Set the CFA: x2 value") \
+ TCR("mov x" S(CFA_REG) ", x2") \
+ TCR("# Call the handler") \
+ TCR("blr x3") \
+ TCR("# Release our frame and return (should never get here!).") \
+ TCR("ldr x" S(CFA_REG) " , [sp], 8") \
+ TCR("ldp x29, x30, [sp], 16") \
+ TCR("ret")
+
+#endif /* AARCH64 */
+
+/* Symbol definition block
+ ----------------------- */
+
+#if defined (__x86_64__) || defined (__aarch64__)
+#define FUNC_ALIGN TCR(".p2align 4,,15")
+#else
+#define FUNC_ALIGN
+#endif
+
+#define SIGTRAMP_START(SYM) \
+CR("# " S(SYM) " cfi trampoline") \
+TCR(".type " S(SYM) ", @function") \
+CR("") \
+FUNC_ALIGN \
+CR(S(SYM) ":") \
+TCR(".cfi_startproc") \
+TCR(".cfi_signal_frame")
+
+/* Symbol termination block
+ ------------------------ */
+
+#define SIGTRAMP_END(SYM) \
+CR(".cfi_endproc") \
+TCR(".size " S(SYM) ", .-" S(SYM))
+
+/*----------------------------
+ -- And now, the real code --
+ ---------------------------- */
+
+/* Text section start. The compiler isn't aware of that switch. */
+
+asm (".text\n"
+ TCR(".align 2"));
+
+/* sigtramp stub for common registers. */
+
+#define TRAMP_COMMON __gnat_sigtramp_common
+
+asm (SIGTRAMP_START(TRAMP_COMMON));
+asm (SIGTRAMP_BODY);
+asm (SIGTRAMP_END(TRAMP_COMMON));
diff --git a/gcc/ada/sinfo.adb b/gcc/ada/sinfo.adb
index dc4e8fb2c1a..20ff3b26557 100644
--- a/gcc/ada/sinfo.adb
+++ b/gcc/ada/sinfo.adb
@@ -2090,16 +2090,13 @@ package body Sinfo is
return Flag4 (N);
end Is_Qualified_Universal_Literal;
- function Is_Recorded_Scenario
+ function Is_Read
(N : Node_Id) return Boolean is
begin
pragma Assert (False
- or else NT (N).Nkind = N_Call_Marker
- or else NT (N).Nkind = N_Function_Instantiation
- or else NT (N).Nkind = N_Package_Instantiation
- or else NT (N).Nkind = N_Procedure_Instantiation);
- return Flag6 (N);
- end Is_Recorded_Scenario;
+ or else NT (N).Nkind = N_Variable_Reference_Marker);
+ return Flag1 (N);
+ end Is_Read;
function Is_Source_Call
(N : Node_Id) return Boolean is
@@ -2179,6 +2176,14 @@ package body Sinfo is
return Flag5 (N);
end Is_Task_Master;
+ function Is_Write
+ (N : Node_Id) return Boolean is
+ begin
+ pragma Assert (False
+ or else NT (N).Nkind = N_Variable_Reference_Marker);
+ return Flag2 (N);
+ end Is_Write;
+
function Iteration_Scheme
(N : Node_Id) return Node_Id is
begin
@@ -3277,7 +3282,8 @@ package body Sinfo is
(N : Node_Id) return Entity_Id is
begin
pragma Assert (False
- or else NT (N).Nkind = N_Call_Marker);
+ or else NT (N).Nkind = N_Call_Marker
+ or else NT (N).Nkind = N_Variable_Reference_Marker);
return Node1 (N);
end Target;
@@ -5512,16 +5518,13 @@ package body Sinfo is
Set_Flag4 (N, Val);
end Set_Is_Qualified_Universal_Literal;
- procedure Set_Is_Recorded_Scenario
+ procedure Set_Is_Read
(N : Node_Id; Val : Boolean := True) is
begin
pragma Assert (False
- or else NT (N).Nkind = N_Call_Marker
- or else NT (N).Nkind = N_Function_Instantiation
- or else NT (N).Nkind = N_Package_Instantiation
- or else NT (N).Nkind = N_Procedure_Instantiation);
- Set_Flag6 (N, Val);
- end Set_Is_Recorded_Scenario;
+ or else NT (N).Nkind = N_Variable_Reference_Marker);
+ Set_Flag1 (N, Val);
+ end Set_Is_Read;
procedure Set_Is_Source_Call
(N : Node_Id; Val : Boolean := True) is
@@ -5601,6 +5604,14 @@ package body Sinfo is
Set_Flag5 (N, Val);
end Set_Is_Task_Master;
+ procedure Set_Is_Write
+ (N : Node_Id; Val : Boolean := True) is
+ begin
+ pragma Assert (False
+ or else NT (N).Nkind = N_Variable_Reference_Marker);
+ Set_Flag2 (N, Val);
+ end Set_Is_Write;
+
procedure Set_Iteration_Scheme
(N : Node_Id; Val : Node_Id) is
begin
@@ -6699,7 +6710,8 @@ package body Sinfo is
(N : Node_Id; Val : Entity_Id) is
begin
pragma Assert (False
- or else NT (N).Nkind = N_Call_Marker);
+ or else NT (N).Nkind = N_Call_Marker
+ or else NT (N).Nkind = N_Variable_Reference_Marker);
Set_Node1 (N, Val); -- semantic field, no parent set
end Set_Target;
diff --git a/gcc/ada/sinfo.ads b/gcc/ada/sinfo.ads
index cf220e4e563..f9f84ac416b 100644
--- a/gcc/ada/sinfo.ads
+++ b/gcc/ada/sinfo.ads
@@ -38,7 +38,7 @@
-- The tree contains not only the full syntactic representation of the
-- program, but also the results of semantic analysis. In particular, the
--- nodes for defining identifiers, defining character literals and defining
+-- nodes for defining identifiers, defining character literals, and defining
-- operator symbols, collectively referred to as entities, represent what
-- would normally be regarded as the symbol table information. In addition a
-- number of the tree nodes contain semantic information.
@@ -213,7 +213,7 @@ package Sinfo is
-- The Present function tests for Empty, which in this case signals the end
-- of the list. First returns Empty immediately if the list is empty.
- -- Present is defined in Atree, First and Next are defined in Nlists.
+ -- Present is defined in Atree; First and Next are defined in Nlists.
-- The exceptions to this rule occur with {DEFINING_IDENTIFIERS} in all
-- contexts, which is handled as described in the previous section, and
@@ -389,7 +389,7 @@ package Sinfo is
-- In the following node definitions, all fields, both syntactic and
-- semantic, are documented. The one exception is in the case of entities
- -- (defining identifiers, character literals and operator symbols), where
+ -- (defining identifiers, character literals, and operator symbols), where
-- the usage of the fields depends on the entity kind. Entity fields are
-- fully documented in the separate package Einfo.
@@ -1116,7 +1116,7 @@ package Sinfo is
-- complete a subprogram declaration.
-- Corresponding_Spec_Of_Stub (Node2-Sem)
- -- This field is present in subprogram, package, task and protected body
+ -- This field is present in subprogram, package, task, and protected body
-- stubs where it points to the corresponding spec of the stub. Due to
-- clashes in the structure of nodes, we cannot use Corresponding_Spec.
@@ -1754,7 +1754,7 @@ package Sinfo is
-- Is_Generic_Contract_Pragma (Flag2-Sem)
-- This flag is present in N_Pragma nodes. It is set when the pragma is
- -- a source construct, applies to a generic unit or its body and denotes
+ -- a source construct, applies to a generic unit or its body, and denotes
-- one of the following contract-related annotations:
-- Abstract_State
-- Contract_Cases
@@ -1863,11 +1863,9 @@ package Sinfo is
-- the resolution of accidental overloading of binary or unary operators
-- which may occur in instances.
- -- Is_Recorded_Scenario (Flag6-Sem)
- -- Present in call marker and instantiation nodes. Set when the scenario
- -- was saved by the ABE Recording phase. This flag aids the ABE machinery
- -- to keep its internal data up-to-date in case the node is transformed
- -- by Atree.Rewrite.
+ -- Is_Read (Flag1-Sem)
+ -- Present in variable reference markers. Set when the original variable
+ -- reference constitues a read of the variable.
-- Is_Source_Call (Flag4-Sem)
-- Present in call marker nodes. Set when the related call came from
@@ -1912,10 +1910,14 @@ package Sinfo is
-- nodes which emulate the body of a task unit.
-- Is_Task_Master (Flag5-Sem)
- -- A flag set in a Subprogram_Body, Block_Statement or Task_Body node to
+ -- A flag set in a Subprogram_Body, Block_Statement, or Task_Body node to
-- indicate that the construct is a task master (i.e. has declared tasks
-- or declares an access to a task type).
+ -- Is_Write (Flag2-Sem)
+ -- Present in variable reference markers. Set when the original variable
+ -- reference constitues a write of the variable.
+
-- Itype (Node1-Sem)
-- Used in N_Itype_Reference node to reference an itype for which it is
-- important to ensure that it is defined. See description of this node
@@ -2017,7 +2019,7 @@ package Sinfo is
-- calls to Freeze_Expression.
-- Next_Entity (Node2-Sem)
- -- Present in defining identifiers, defining character literals and
+ -- Present in defining identifiers, defining character literals, and
-- defining operator symbols (i.e. in all entities). The entities of a
-- scope are chained, and this field is used as the forward pointer for
-- this list. See Einfo for further details.
@@ -2234,7 +2236,7 @@ package Sinfo is
-- because Analyze wants to insert extra actions on this list.
-- Rounded_Result (Flag18-Sem)
- -- Present in N_Type_Conversion, N_Op_Divide and N_Op_Multiply nodes.
+ -- Present in N_Type_Conversion, N_Op_Divide, and N_Op_Multiply nodes.
-- Used in the fixed-point cases to indicate that the result must be
-- rounded as a result of the use of the 'Round attribute. Also used for
-- integer N_Op_Divide nodes to indicate that the result should be
@@ -2267,7 +2269,7 @@ package Sinfo is
-- operation named (statically) in a dispatching call.
-- Scope (Node3-Sem)
- -- Present in defining identifiers, defining character literals and
+ -- Present in defining identifiers, defining character literals, and
-- defining operator symbols (i.e. in all entities). The entities of a
-- scope all use this field to reference the corresponding scope entity.
-- See Einfo for further details.
@@ -2318,8 +2320,9 @@ package Sinfo is
-- only execute if invalid values are present).
-- Target (Node1-Sem)
- -- Present in call marker nodes. References the entity of the entry,
- -- operator, or subprogram invoked by the related call or requeue.
+ -- Present in call and variable reference marker nodes. References the
+ -- entity of the original entity, operator, or subprogram being invoked,
+ -- or the original variable being read or written.
-- Target_Type (Node2-Sem)
-- Used in an N_Validate_Unchecked_Conversion node to point to the target
@@ -2338,7 +2341,7 @@ package Sinfo is
-- always set to No_List.
-- Treat_Fixed_As_Integer (Flag14-Sem)
- -- This flag appears in operator nodes for divide, multiply, mod and rem
+ -- This flag appears in operator nodes for divide, multiply, mod, and rem
-- on fixed-point operands. It indicates that the operands are to be
-- treated as integer values, ignoring small values. This flag is only
-- set as a result of expansion of fixed-point operations. Typically a
@@ -2728,7 +2731,7 @@ package Sinfo is
-- pain to allow these aspects to pervade the pragma syntax, and the
-- representation of pragma nodes internally. So what we do is to
-- replace these ASPECT_MARK forms with identifiers whose name is one
- -- of the special internal names _Pre, _Post or _Type_Invariant.
+ -- of the special internal names _Pre, _Post, or _Type_Invariant.
-- We do a similar replacement of these Aspect_Mark forms in the
-- Expression of a pragma argument association for the cases of
@@ -3025,8 +3028,8 @@ package Sinfo is
-- [abstract] [limited] new [NULL_EXCLUSION] parent_SUBTYPE_INDICATION
-- [[and INTERFACE_LIST] RECORD_EXTENSION_PART]
- -- Note: ABSTRACT, LIMITED and record extension part are not permitted
- -- in Ada 83 mode
+ -- Note: ABSTRACT, LIMITED, and record extension part are not permitted
+ -- in Ada 83 mode.
-- Note: a record extension part is required if ABSTRACT is present
@@ -3337,7 +3340,7 @@ package Sinfo is
-- Subtype_Indication field or else the Access_Definition field.
-- N_Component_Definition
- -- Sloc points to ALIASED, ACCESS or to first token of subtype mark
+ -- Sloc points to ALIASED, ACCESS, or to first token of subtype mark
-- Aliased_Present (Flag4)
-- Null_Exclusion_Present (Flag11)
-- Subtype_Indication (Node5) (set to Empty if not present)
@@ -3485,7 +3488,7 @@ package Sinfo is
-- end record
-- | null record
- -- Note: the Abstract_Present, Tagged_Present and Limited_Present
+ -- Note: the Abstract_Present, Tagged_Present, and Limited_Present
-- flags appear only for a record definition appearing in a record
-- type definition.
@@ -4013,7 +4016,7 @@ package Sinfo is
-- Instead the Attribute_Name and Expressions fields of the parent
-- node (N_Attribute_Reference node) hold the information.
- -- Note: if ACCESS, DELTA or DIGITS appears in an attribute
+ -- Note: if ACCESS, DELTA, or DIGITS appears in an attribute
-- designator, then they are treated as identifiers internally
-- rather than the keywords of the same name.
@@ -7036,7 +7039,6 @@ package Sinfo is
-- Is_Elaboration_Checks_OK_Node (Flag1-Sem)
-- Is_SPARK_Mode_On_Node (Flag2-Sem)
-- Is_Declaration_Level_Node (Flag5-Sem)
- -- Is_Recorded_Scenario (Flag6-Sem)
-- Is_Known_Guaranteed_ABE (Flag18-Sem)
-- N_Procedure_Instantiation
@@ -7050,7 +7052,6 @@ package Sinfo is
-- Is_Elaboration_Checks_OK_Node (Flag1-Sem)
-- Is_SPARK_Mode_On_Node (Flag2-Sem)
-- Is_Declaration_Level_Node (Flag5-Sem)
- -- Is_Recorded_Scenario (Flag6-Sem)
-- Must_Override (Flag14) set if overriding indicator present
-- Must_Not_Override (Flag15) set if not_overriding indicator present
-- Is_Known_Guaranteed_ABE (Flag18-Sem)
@@ -7066,7 +7067,6 @@ package Sinfo is
-- Is_Elaboration_Checks_OK_Node (Flag1-Sem)
-- Is_SPARK_Mode_On_Node (Flag2-Sem)
-- Is_Declaration_Level_Node (Flag5-Sem)
- -- Is_Recorded_Scenario (Flag6-Sem)
-- Must_Override (Flag14) set if overriding indicator present
-- Must_Not_Override (Flag15) set if not_overriding indicator present
-- Is_Known_Guaranteed_ABE (Flag18-Sem)
@@ -7824,7 +7824,6 @@ package Sinfo is
-- Is_Dispatching_Call (Flag3-Sem)
-- Is_Source_Call (Flag4-Sem)
-- Is_Declaration_Level_Node (Flag5-Sem)
- -- Is_Recorded_Scenario (Flag6-Sem)
-- Is_Known_Guaranteed_ABE (Flag18-Sem)
------------------------
@@ -7911,7 +7910,7 @@ package Sinfo is
-- to aspects/pragmas Contract_Cases and Test_Case. The ordering in the
-- list is in LIFO fashion.
- -- Classifications contains pragmas that either declare, categorize or
+ -- Classifications contains pragmas that either declare, categorize, or
-- establish dependencies between subprogram or package inputs and
-- outputs. Currently the following pragmas appear in this list:
-- Abstract_States
@@ -8455,6 +8454,37 @@ package Sinfo is
-- Note: in the case where a debug source file is generated, the Sloc
-- for this node points to the VALIDATE keyword in the file output.
+ -------------------------------
+ -- Variable_Reference_Marker --
+ -------------------------------
+
+ -- This node is created during the analysis of direct or expanded names,
+ -- and the resolution of entry and subprogram calls. It performs several
+ -- functions:
+
+ -- * Variable reference markers provide a uniform model for handling
+ -- variable references by the ABE mechanism, regardless of whether
+ -- expansion took place.
+
+ -- * The variable reference marker captures the entity of the variable
+ -- being read or written.
+
+ -- * The variable reference markers aid the ABE Processing phase by
+ -- signaling the presence of a call in case the original variable
+ -- reference was transformed by expansion.
+
+ -- Sprint syntax: r#target# -- for a read
+ -- rw#target# -- for a read/write
+ -- w#target# -- for a write
+
+ -- The Sprint syntax shown above is not enabled by default
+
+ -- N_Variable_Reference_Marker
+ -- Sloc points to Sloc of original variable reference
+ -- Target (Node1-Sem)
+ -- Is_Read (Flag1-Sem)
+ -- Is_Write (Flag2-Sem)
+
-----------
-- Empty --
-----------
@@ -8877,6 +8907,7 @@ package Sinfo is
N_Triggering_Alternative,
N_Use_Type_Clause,
N_Validate_Unchecked_Conversion,
+ N_Variable_Reference_Marker,
N_Variant,
N_Variant_Part,
N_With_Clause,
@@ -9733,8 +9764,8 @@ package Sinfo is
function Is_Qualified_Universal_Literal
(N : Node_Id) return Boolean; -- Flag4
- function Is_Recorded_Scenario
- (N : Node_Id) return Boolean; -- Flag6
+ function Is_Read
+ (N : Node_Id) return Boolean; -- Flag1
function Is_Source_Call
(N : Node_Id) return Boolean; -- Flag4
@@ -9760,6 +9791,9 @@ package Sinfo is
function Is_Task_Master
(N : Node_Id) return Boolean; -- Flag5
+ function Is_Write
+ (N : Node_Id) return Boolean; -- Flag2
+
function Iteration_Scheme
(N : Node_Id) return Node_Id; -- Node2
@@ -10822,8 +10856,8 @@ package Sinfo is
procedure Set_Is_Qualified_Universal_Literal
(N : Node_Id; Val : Boolean := True); -- Flag4
- procedure Set_Is_Recorded_Scenario
- (N : Node_Id; Val : Boolean := True); -- Flag6
+ procedure Set_Is_Read
+ (N : Node_Id; Val : Boolean := True); -- Flag1
procedure Set_Is_Source_Call
(N : Node_Id; Val : Boolean := True); -- Flag4
@@ -10849,6 +10883,9 @@ package Sinfo is
procedure Set_Is_Task_Master
(N : Node_Id; Val : Boolean := True); -- Flag5
+ procedure Set_Is_Write
+ (N : Node_Id; Val : Boolean := True); -- Flag2
+
procedure Set_Iteration_Scheme
(N : Node_Id; Val : Node_Id); -- Node2
@@ -13023,7 +13060,14 @@ package Sinfo is
4 => False, -- unused
5 => False), -- unused
- -- Entries for Empty, Error and Unused. Even thought these have a Chars
+ N_Variable_Reference_Marker =>
+ (1 => False, -- Target (Node1-Sem)
+ 2 => False, -- unused
+ 3 => False, -- unused
+ 4 => False, -- unused
+ 5 => False), -- unused
+
+ -- Entries for Empty, Error, and Unused. Even though these have a Chars
-- field for debugging purposes, they are not really syntactic fields, so
-- we mark all fields as unused.
@@ -13276,7 +13320,7 @@ package Sinfo is
pragma Inline (Is_Prefixed_Call);
pragma Inline (Is_Protected_Subprogram_Body);
pragma Inline (Is_Qualified_Universal_Literal);
- pragma Inline (Is_Recorded_Scenario);
+ pragma Inline (Is_Read);
pragma Inline (Is_Source_Call);
pragma Inline (Is_SPARK_Mode_On_Node);
pragma Inline (Is_Static_Coextension);
@@ -13285,6 +13329,7 @@ package Sinfo is
pragma Inline (Is_Task_Allocation_Block);
pragma Inline (Is_Task_Body_Procedure);
pragma Inline (Is_Task_Master);
+ pragma Inline (Is_Write);
pragma Inline (Iteration_Scheme);
pragma Inline (Itype);
pragma Inline (Kill_Range_Check);
@@ -13634,7 +13679,7 @@ package Sinfo is
pragma Inline (Set_Is_Prefixed_Call);
pragma Inline (Set_Is_Protected_Subprogram_Body);
pragma Inline (Set_Is_Qualified_Universal_Literal);
- pragma Inline (Set_Is_Recorded_Scenario);
+ pragma Inline (Set_Is_Read);
pragma Inline (Set_Is_Source_Call);
pragma Inline (Set_Is_SPARK_Mode_On_Node);
pragma Inline (Set_Is_Static_Coextension);
@@ -13643,6 +13688,7 @@ package Sinfo is
pragma Inline (Set_Is_Task_Allocation_Block);
pragma Inline (Set_Is_Task_Body_Procedure);
pragma Inline (Set_Is_Task_Master);
+ pragma Inline (Set_Is_Write);
pragma Inline (Set_Iteration_Scheme);
pragma Inline (Set_Iterator_Specification);
pragma Inline (Set_Itype);
diff --git a/gcc/ada/spark_xrefs.adb b/gcc/ada/spark_xrefs.adb
index 8fab555ac20..e59114d48c7 100644
--- a/gcc/ada/spark_xrefs.adb
+++ b/gcc/ada/spark_xrefs.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 2011-2016, Free Software Foundation, Inc. --
+-- Copyright (C) 2011-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -23,8 +23,9 @@
-- --
------------------------------------------------------------------------------
-with Output; use Output;
-with Put_SPARK_Xrefs;
+with Lib.Xref;
+with Output; use Output;
+with Sem_Util; use Sem_Util;
package body SPARK_Xrefs is
@@ -33,174 +34,48 @@ package body SPARK_Xrefs is
------------
procedure dspark is
- begin
- -- Dump SPARK cross-reference file table
- Write_Line ("SPARK Xrefs File Table");
- Write_Line ("----------------------");
+ procedure Dump (Index : Nat; AXR : SPARK_Xref_Record);
+
+ procedure Dump_SPARK_Xrefs is new
+ Lib.Xref.SPARK_Specific.Iterate_SPARK_Xrefs (Dump);
- for Index in 1 .. SPARK_File_Table.Last loop
- declare
- AFR : SPARK_File_Record renames SPARK_File_Table.Table (Index);
+ ----------
+ -- Dump --
+ ----------
- begin
- Write_Str (" ");
- Write_Int (Int (Index));
- Write_Str (". File_Num = ");
- Write_Int (Int (AFR.File_Num));
- Write_Str (" File_Name = """);
+ procedure Dump (Index : Nat; AXR : SPARK_Xref_Record) is
+ begin
+ Write_Str (" ");
+ Write_Int (Index);
+ Write_Char ('.');
- if AFR.File_Name /= null then
- Write_Str (AFR.File_Name.all);
- end if;
+ Write_Str (" Entity = " & Unique_Name (AXR.Entity));
+ Write_Str (" (");
+ Write_Int (Nat (AXR.Entity));
+ Write_Str (")");
- Write_Char ('"');
- Write_Str (" From = ");
- Write_Int (Int (AFR.From_Scope));
- Write_Str (" To = ");
- Write_Int (Int (AFR.To_Scope));
- Write_Eol;
- end;
- end loop;
+ Write_Str (" Scope = " & Unique_Name (AXR.Ref_Scope));
+ Write_Str (" (");
+ Write_Int (Nat (AXR.Ref_Scope));
+ Write_Str (")");
- -- Dump SPARK cross-reference scope table
+ Write_Str (" Ref_Type = '" & AXR.Rtype & "'");
- Write_Eol;
- Write_Line ("SPARK Xrefs Scope Table");
- Write_Line ("-----------------------");
-
- for Index in 1 .. SPARK_Scope_Table.Last loop
- declare
- ASR : SPARK_Scope_Record renames SPARK_Scope_Table.Table (Index);
-
- begin
- Write_Str (" ");
- Write_Int (Int (Index));
- Write_Str (". File_Num = ");
- Write_Int (Int (ASR.File_Num));
- Write_Str (" Scope_Num = ");
- Write_Int (Int (ASR.Scope_Num));
- Write_Str (" Scope_Name = """);
-
- if ASR.Scope_Name /= null then
- Write_Str (ASR.Scope_Name.all);
- end if;
-
- Write_Char ('"');
- Write_Str (" Line = ");
- Write_Int (Int (ASR.Line));
- Write_Str (" Col = ");
- Write_Int (Int (ASR.Col));
- Write_Str (" Type = ");
- Write_Char (ASR.Stype);
- Write_Str (" From = ");
- Write_Int (Int (ASR.From_Xref));
- Write_Str (" To = ");
- Write_Int (Int (ASR.To_Xref));
- Write_Str (" Scope_Entity = ");
- Write_Int (Int (ASR.Scope_Entity));
- Write_Eol;
- end;
- end loop;
+ Write_Eol;
+ end Dump;
+ -- Start of processing for dspark
+
+ begin
-- Dump SPARK cross-reference table
Write_Eol;
Write_Line ("SPARK Xref Table");
Write_Line ("----------------");
- for Index in 1 .. SPARK_Xref_Table.Last loop
- declare
- AXR : SPARK_Xref_Record renames SPARK_Xref_Table.Table (Index);
-
- begin
- Write_Str (" ");
- Write_Int (Int (Index));
- Write_Str (". Entity_Name = """);
-
- if AXR.Entity_Name /= null then
- Write_Str (AXR.Entity_Name.all);
- end if;
-
- Write_Char ('"');
- Write_Str (" Entity_Line = ");
- Write_Int (Int (AXR.Entity_Line));
- Write_Str (" Entity_Col = ");
- Write_Int (Int (AXR.Entity_Col));
- Write_Str (" File_Num = ");
- Write_Int (Int (AXR.File_Num));
- Write_Str (" Scope_Num = ");
- Write_Int (Int (AXR.Scope_Num));
- Write_Str (" Line = ");
- Write_Int (Int (AXR.Line));
- Write_Str (" Col = ");
- Write_Int (Int (AXR.Col));
- Write_Str (" Type = ");
- Write_Char (AXR.Rtype);
- Write_Eol;
- end;
- end loop;
- end dspark;
-
- ----------------
- -- Initialize --
- ----------------
-
- procedure Initialize_SPARK_Tables is
- begin
- SPARK_File_Table.Init;
- SPARK_Scope_Table.Init;
- SPARK_Xref_Table.Init;
- end Initialize_SPARK_Tables;
-
- ------------
- -- pspark --
- ------------
-
- procedure pspark is
-
- procedure Write_Info_Char (C : Character) renames Write_Char;
- -- Write one character
+ Dump_SPARK_Xrefs;
- procedure Write_Info_Str (Val : String) renames Write_Str;
- -- Write string
-
- function Write_Info_Col return Positive;
- -- Return next column for writing
-
- procedure Write_Info_Initiate (Key : Character) renames Write_Char;
- -- Start new one and write one character;
-
- procedure Write_Info_Nat (N : Nat);
- -- Write value of N
-
- procedure Write_Info_Terminate renames Write_Eol;
- -- Terminate current line
-
- --------------------
- -- Write_Info_Col --
- --------------------
-
- function Write_Info_Col return Positive is
- begin
- return Positive (Column);
- end Write_Info_Col;
-
- --------------------
- -- Write_Info_Nat --
- --------------------
-
- procedure Write_Info_Nat (N : Nat) is
- begin
- Write_Int (N);
- end Write_Info_Nat;
-
- procedure Debug_Put_SPARK_Xrefs is new Put_SPARK_Xrefs;
-
- -- Start of processing for pspark
-
- begin
- Debug_Put_SPARK_Xrefs;
- end pspark;
+ end dspark;
end SPARK_Xrefs;
diff --git a/gcc/ada/spark_xrefs.ads b/gcc/ada/spark_xrefs.ads
index fd5b76d4a66..25af9024d51 100644
--- a/gcc/ada/spark_xrefs.ads
+++ b/gcc/ada/spark_xrefs.ads
@@ -23,352 +23,28 @@
-- --
------------------------------------------------------------------------------
--- This package defines tables used to store information needed for the SPARK
--- mode. It is used by procedures in Lib.Xref.SPARK_Specific to build the
--- SPARK-specific cross-reference information before writing it to the ALI
--- file, and by Get_SPARK_Xrefs/Put_SPARK_Xrefs to read/write the textual
--- representation that is stored in the ALI file.
+-- This package defines data structures used to expose frontend
+-- cross-references to the SPARK backend.
-with Table;
-with Types; use Types;
+with Types; use Types;
package SPARK_Xrefs is
- -- SPARK cross-reference information can exist in one of two forms. In
- -- the ALI file, it is represented using a text format that is described
- -- in this specification. Internally it is stored using three tables:
- -- SPARK_Xref_Table, SPARK_Scope_Table and SPARK_File_Table, which are
- -- also defined in this unit.
-
- -- Lib.Xref.SPARK_Specific is part of the compiler. It extracts SPARK
- -- cross-reference information from the complete set of cross-references
- -- generated during compilation.
-
- -- Get_SPARK_Xrefs reads the text lines in ALI format and populates the
- -- internal tables with corresponding information.
-
- -- Put_SPARK_Xrefs reads the internal tables and generates text lines in
- -- the ALI format.
-
- ----------------------------
- -- SPARK Xrefs ALI Format --
- ----------------------------
-
- -- SPARK cross-reference information is generated on a unit-by-unit basis
- -- in the ALI file, using lines that start with the identifying character F
- -- ("Formal"). These lines are generated if GNATprove_Mode is True.
-
- -- The SPARK cross-reference information comes after the shared
- -- cross-reference information, so it can be ignored by tools like
- -- gnatbind, gnatmake, etc.
-
- -- -------------------
- -- -- Scope Section --
- -- -------------------
-
- -- A first section defines the scopes in which entities are defined and
- -- referenced. A scope is a package/subprogram/protected_type/task_type
- -- declaration/body. Note that a package declaration and body define two
- -- different scopes. Similarly, a subprogram, protected type and task type
- -- declaration and body, when both present, define two different scopes.
-
- -- FD dependency-number filename (-> unit-filename)?
-
- -- This header precedes scope information for the unit identified by
- -- dependency number and file name. The dependency number is the index
- -- into the generated D lines and is ones-origin (e.g. 2 = reference to
- -- second generated D line).
-
- -- The list of FD lines should match the list of D lines defined in the
- -- ALI file, in the same order.
-
- -- Note that the filename here will reflect the original name if a
- -- Source_Reference pragma was encountered (since all line number
- -- references will be with respect to the original file).
-
- -- Note: the filename is redundant in that it could be deduced from the
- -- corresponding D line, but it is convenient at least for human
- -- reading of the SPARK cross-reference information, and means that
- -- the SPARK cross-reference information can stand on its own without
- -- needing other parts of the ALI file.
-
- -- The optional unit filename is given only for subunits.
-
- -- FS . scope line type col entity (-> spec-file . spec-scope)?
-
- -- (The ? mark stands for an optional entry in the syntax)
-
- -- scope is the ones-origin scope number for the current file (e.g. 2 =
- -- reference to the second FS line in this FD block).
-
- -- line is the line number of the scope entity. The name of the entity
- -- starts in column col. Columns are numbered from one, and if
- -- horizontal tab characters are present, the column number is computed
- -- assuming standard 1,9,17,.. tab stops. For example, if the entity is
- -- the first token on the line, and is preceded by space-HT-space, then
- -- the column would be column 10.
-
- -- type is a single letter identifying the type of the entity, using
- -- the same code as in cross-references:
-
- -- K = package (k = generic package)
- -- V = function (v = generic function)
- -- U = procedure (u = generic procedure)
- -- Y = entry
-
- -- col is the column number of the scope entity
-
- -- entity is the name of the scope entity, with casing in the canonical
- -- casing for the source file where it is defined.
-
- -- spec-file and spec-scope are respectively the file and scope for the
- -- spec corresponding to the current body scope, when they differ.
-
- -- ------------------
- -- -- Xref Section --
- -- ------------------
-
- -- A second section defines cross-references useful for computing global
- -- variables read/written in each subprogram/package/protected_type/
- -- task_type.
-
- -- FX dependency-number filename . entity-number entity
-
- -- dependency-number and filename identify a file in FD lines
-
- -- entity-number and entity identify a scope in FS lines
- -- for the previously identified file.
-
- -- (filename and entity are just a textual representations of
- -- dependency-number and entity-number)
-
- -- F line typ col entity ref*
-
- -- line is the line number of the referenced entity
-
- -- typ is the type of the referenced entity, using a code similar to
- -- the one used for cross-references:
-
- -- > = IN parameter
- -- < = OUT parameter
- -- = = IN OUT parameter
- -- * = all other cases
-
- -- col is the column number of the referenced entity
-
- -- entity is the name of the referenced entity as written in the source
- -- file where it is defined.
-
- -- There may be zero or more ref entries on each line
-
- -- (file |)? ((. scope :)? line type col)*
-
- -- file is the dependency number of the file with the reference. It and
- -- the following vertical bar are omitted if the file is the same as
- -- the previous ref, and the refs for the current file are first (and
- -- do not need a bar).
-
- -- scope is the scope number of the scope with the reference. It and
- -- the following colon are omitted if the scope is the same as the
- -- previous ref, and the refs for the current scope are first (and do
- -- not need a colon).
-
- -- line is the line number of the reference
-
- -- col is the column number of the reference
-
- -- type is one of the following, using the same code as in
- -- cross-references:
-
- -- m = modification
- -- r = reference
- -- c = reference to constant object
- -- s = subprogram reference in a static call
-
- -- Special entries for reads and writes to memory reference a special
- -- variable called "__HEAP". These special entries are present in every
- -- scope where reads and writes to memory are present. Line and column for
- -- this special variable are always 0.
-
- -- Examples: ??? add examples here
-
- -- -------------------------------
- -- -- Generated Globals Section --
- -- -------------------------------
-
- -- The Generated Globals section is located at the end of the ALI file
-
- -- All lines with information related to the Generated Globals begin with
- -- string "GG". This string should therefore not be used in the beginning
- -- of any line not related to Generated Globals.
-
- -- The processing (reading and writing) of this section happens in package
- -- Flow_Generated_Globals (from the SPARK 2014 sources), for further
- -- information please refer there.
-
- ----------------
- -- Xref Table --
- ----------------
-
- -- The following table records SPARK cross-references
-
- type Xref_Index is new Nat;
- -- Used to index values in this table. Values start at 1 and are assigned
- -- sequentially as entries are constructed; value 0 is used temporarily
- -- until a proper value is determined.
-
type SPARK_Xref_Record is record
- Entity_Name : String_Ptr;
- -- Pointer to entity name in ALI file
-
- Entity_Line : Nat;
- -- Line number for the entity referenced
-
- Etype : Character;
- -- Indicates type of entity, using code used in ALI file:
- -- > = IN parameter
- -- < = OUT parameter
- -- = = IN OUT parameter
- -- * = all other cases
-
- Entity_Col : Nat;
- -- Column number for the entity referenced
-
- File_Num : Nat;
- -- File dependency number for the cross-reference. Note that if no file
- -- entry is present explicitly, this is just a copy of the reference for
- -- the current cross-reference section.
-
- Scope_Num : Nat;
- -- Scope number for the cross-reference. Note that if no scope entry is
- -- present explicitly, this is just a copy of the reference for the
- -- current cross-reference section.
+ Entity : Entity_Id;
+ -- Referenced entity
- Line : Nat;
- -- Line number for the reference
+ Ref_Scope : Entity_Id;
+ -- Scope where the reference occurs
Rtype : Character;
-- Indicates type of the reference, using code used in ALI file:
-- r = reference
- -- c = reference to constant object
-- m = modification
-- s = call
-
- Col : Nat;
- -- Column number for the reference
- end record;
-
- package SPARK_Xref_Table is new Table.Table (
- Table_Component_Type => SPARK_Xref_Record,
- Table_Index_Type => Xref_Index,
- Table_Low_Bound => 1,
- Table_Initial => 2000,
- Table_Increment => 300,
- Table_Name => "Xref_Table");
-
- -----------------
- -- Scope Table --
- -----------------
-
- -- This table keeps track of the scopes and the corresponding starting and
- -- ending indexes (From, To) in the Xref table.
-
- type Scope_Index is new Nat;
- -- Used to index values in this table. Values start at 1 and are assigned
- -- sequentially as entries are constructed; value 0 indicates that no
- -- entries have been constructed and is also used until a proper value is
- -- determined.
-
- type SPARK_Scope_Record is record
- Scope_Name : String_Ptr;
- -- Pointer to scope name in ALI file
-
- File_Num : Nat;
- -- Set to the file dependency number for the scope
-
- Scope_Num : Pos;
- -- Set to the scope number for the scope
-
- Spec_File_Num : Nat;
- -- Set to the file dependency number for the scope corresponding to the
- -- spec of the current scope entity, if different, or else 0.
-
- Spec_Scope_Num : Nat;
- -- Set to the scope number for the scope corresponding to the spec of
- -- the current scope entity, if different, or else 0.
-
- Line : Nat;
- -- Line number for the scope
-
- Stype : Character;
- -- Indicates type of scope, using code used in ALI file:
- -- K = package
- -- T = task
- -- U = procedure
- -- V = function
- -- Y = entry
-
- Col : Nat;
- -- Column number for the scope
-
- From_Xref : Xref_Index;
- -- Starting index in Xref table for this scope
-
- To_Xref : Xref_Index;
- -- Ending index in Xref table for this scope
-
- -- The following component is only used in-memory, not printed out in
- -- ALI file.
-
- Scope_Entity : Entity_Id := Empty;
- -- Entity (subprogram or package) for the scope
end record;
-
- package SPARK_Scope_Table is new Table.Table (
- Table_Component_Type => SPARK_Scope_Record,
- Table_Index_Type => Scope_Index,
- Table_Low_Bound => 1,
- Table_Initial => 200,
- Table_Increment => 300,
- Table_Name => "Scope_Table");
-
- ----------------
- -- File Table --
- ----------------
-
- -- This table keeps track of the units and the corresponding starting and
- -- ending indexes (From, To) in the Scope table.
-
- type File_Index is new Nat;
- -- Used to index values in this table. Values start at 1 and are assigned
- -- sequentially as entries are constructed; value 0 indicates that no
- -- entries have been constructed.
-
- type SPARK_File_Record is record
- File_Name : String_Ptr;
- -- Pointer to file name in ALI file
-
- Unit_File_Name : String_Ptr;
- -- Pointer to file name for unit in ALI file, when File_Name refers to a
- -- subunit; otherwise null.
-
- File_Num : Nat;
- -- Dependency number in ALI file
-
- From_Scope : Scope_Index;
- -- Starting index in Scope table for this unit
-
- To_Scope : Scope_Index;
- -- Ending index in Scope table for this unit
- end record;
-
- package SPARK_File_Table is new Table.Table (
- Table_Component_Type => SPARK_File_Record,
- Table_Index_Type => File_Index,
- Table_Low_Bound => 1,
- Table_Initial => 20,
- Table_Increment => 200,
- Table_Name => "File_Table");
+ -- This type holds a subset of the frontend xref entry that is needed by
+ -- the SPARK backend.
---------------
-- Constants --
@@ -378,19 +54,17 @@ package SPARK_Xrefs is
-- Name of special variable used in effects to denote reads and writes
-- through explicit dereference.
+ Heap : Entity_Id := Empty;
+ -- A special entity which denotes the heap object; it should be considered
+ -- constant, but needs to be variable, because it can only be initialized
+ -- after the node tables are created.
+
-----------------
-- Subprograms --
-----------------
- procedure Initialize_SPARK_Tables;
- -- Reset tables for a new compilation
-
procedure dspark;
-- Debug routine to dump internal SPARK cross-reference tables. This is a
-- raw format dump showing exactly what the tables contain.
- procedure pspark;
- -- Debugging procedure to output contents of SPARK cross-reference binary
- -- tables in the format in which they appear in an ALI file.
-
end SPARK_Xrefs;
diff --git a/gcc/ada/spark_xrefs_test.adb b/gcc/ada/spark_xrefs_test.adb
deleted file mode 100644
index 6ad4de2c158..00000000000
--- a/gcc/ada/spark_xrefs_test.adb
+++ /dev/null
@@ -1,321 +0,0 @@
-------------------------------------------------------------------------------
--- --
--- GNAT SYSTEM UTILITIES --
--- --
--- S P A R K _ X R E F S _ T E S T --
--- --
--- B o d y --
--- --
--- Copyright (C) 2011-2013, Free Software Foundation, Inc. --
--- --
--- GNAT is free software; you can redistribute it and/or modify it under --
--- terms of the GNU General Public License as published by the Free Soft- --
--- ware Foundation; either version 3, or (at your option) any later ver- --
--- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
--- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
--- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --
--- for more details. You should have received a copy of the GNU General --
--- Public License distributed with GNAT; see file COPYING3. If not, go to --
--- http://www.gnu.org/licenses for a complete copy of the license. --
--- --
--- GNAT was originally developed by the GNAT team at New York University. --
--- Extensive contributions were provided by Ada Core Technologies Inc. --
--- --
-------------------------------------------------------------------------------
-
--- This utility program is used to test proper operation of the
--- Get_SPARK_Xrefs and Put_SPARK_Xrefs units. To run it, compile any source
--- file with switch -gnatd.E or -gnatd.F to get an ALI file file.ALI
--- containing SPARK information. Then run this utility using:
-
--- spark_xrefs_test file.ali
-
--- This test will read the SPARK cross-reference information from the ALI
--- file, and use Get_SPARK_Xrefs to store this in binary form in the internal
--- tables in SPARK_Xrefs. Then Put_SPARK_Xrefs is used to write the
--- information from these tables back into text form. This output is compared
--- with the original SPARK cross-reference information in the ALI file and the
--- two should be identical. If not an error message is output.
-
-with Get_SPARK_Xrefs;
-with Put_SPARK_Xrefs;
-
-with SPARK_Xrefs; use SPARK_Xrefs;
-with Types; use Types;
-
-with Ada.Command_Line; use Ada.Command_Line;
-with Ada.Streams; use Ada.Streams;
-with Ada.Streams.Stream_IO; use Ada.Streams.Stream_IO;
-with Ada.Text_IO;
-
-with GNAT.OS_Lib; use GNAT.OS_Lib;
-
-procedure SPARK_Xrefs_Test is
- Infile : File_Type;
- Name1 : String_Access;
- Outfile_1 : File_Type;
- Name2 : String_Access;
- Outfile_2 : File_Type;
- C : Character;
-
- Stop : exception;
- -- Terminate execution
-
- Diff_Exec : constant String_Access := Locate_Exec_On_Path ("diff");
- Diff_Result : Integer;
-
- use ASCII;
-
-begin
- if Argument_Count /= 1 then
- Ada.Text_IO.Put_Line ("Usage: spark_xrefs_test FILE.ali");
- raise Stop;
- end if;
-
- Name1 := new String'(Argument (1) & ".1");
- Name2 := new String'(Argument (1) & ".2");
-
- Open (Infile, In_File, Argument (1));
- Create (Outfile_1, Out_File, Name1.all);
- Create (Outfile_2, Out_File, Name2.all);
-
- -- Read input file till we get to first 'F' line
-
- Process : declare
- Output_Col : Positive := 1;
-
- function Get_Char (F : File_Type) return Character;
- -- Read one character from specified file
-
- procedure Put_Char (F : File_Type; C : Character);
- -- Write one character to specified file
-
- function Get_Output_Col return Positive;
- -- Return current column in output file, where each line starts at
- -- column 1 and terminate with LF, and HT is at columns 1, 9, etc.
- -- All output is supposed to be carried through Put_Char.
-
- --------------
- -- Get_Char --
- --------------
-
- function Get_Char (F : File_Type) return Character is
- Item : Stream_Element_Array (1 .. 1);
- Last : Stream_Element_Offset;
-
- begin
- Read (F, Item, Last);
-
- if Last /= 1 then
- return Types.EOF;
- else
- return Character'Val (Item (1));
- end if;
- end Get_Char;
-
- --------------------
- -- Get_Output_Col --
- --------------------
-
- function Get_Output_Col return Positive is
- begin
- return Output_Col;
- end Get_Output_Col;
-
- --------------
- -- Put_Char --
- --------------
-
- procedure Put_Char (F : File_Type; C : Character) is
- Item : Stream_Element_Array (1 .. 1);
-
- begin
- if C /= CR and then C /= EOF then
- if C = LF then
- Output_Col := 1;
- elsif C = HT then
- Output_Col := ((Output_Col + 6) / 8) * 8 + 1;
- else
- Output_Col := Output_Col + 1;
- end if;
-
- Item (1) := Character'Pos (C);
- Write (F, Item);
- end if;
- end Put_Char;
-
- -- Subprograms used by Get_SPARK_Xrefs (these also copy the output to
- -- Outfile_1 for later comparison with the output generated by
- -- Put_SPARK_Xrefs).
-
- function Getc return Character;
- function Nextc return Character;
- procedure Skipc;
-
- ----------
- -- Getc --
- ----------
-
- function Getc return Character is
- C : Character;
- begin
- C := Get_Char (Infile);
- Put_Char (Outfile_1, C);
- return C;
- end Getc;
-
- -----------
- -- Nextc --
- -----------
-
- function Nextc return Character is
- C : Character;
-
- begin
- C := Get_Char (Infile);
-
- if C /= EOF then
- Set_Index (Infile, Index (Infile) - 1);
- end if;
-
- return C;
- end Nextc;
-
- -----------
- -- Skipc --
- -----------
-
- procedure Skipc is
- C : Character;
- pragma Unreferenced (C);
- begin
- C := Getc;
- end Skipc;
-
- -- Subprograms used by Put_SPARK_Xrefs, which write information to
- -- Outfile_2.
-
- function Write_Info_Col return Positive;
- procedure Write_Info_Char (C : Character);
- procedure Write_Info_Initiate (Key : Character);
- procedure Write_Info_Nat (N : Nat);
- procedure Write_Info_Terminate;
-
- --------------------
- -- Write_Info_Col --
- --------------------
-
- function Write_Info_Col return Positive is
- begin
- return Get_Output_Col;
- end Write_Info_Col;
-
- ---------------------
- -- Write_Info_Char --
- ---------------------
-
- procedure Write_Info_Char (C : Character) is
- begin
- Put_Char (Outfile_2, C);
- end Write_Info_Char;
-
- -------------------------
- -- Write_Info_Initiate --
- -------------------------
-
- procedure Write_Info_Initiate (Key : Character) is
- begin
- Write_Info_Char (Key);
- end Write_Info_Initiate;
-
- --------------------
- -- Write_Info_Nat --
- --------------------
-
- procedure Write_Info_Nat (N : Nat) is
- begin
- if N > 9 then
- Write_Info_Nat (N / 10);
- end if;
-
- Write_Info_Char (Character'Val (48 + N mod 10));
- end Write_Info_Nat;
-
- --------------------------
- -- Write_Info_Terminate --
- --------------------------
-
- procedure Write_Info_Terminate is
- begin
- Write_Info_Char (LF);
- end Write_Info_Terminate;
-
- -- Local instantiations of Put_SPARK_Xrefs and Get_SPARK_Xrefs
-
- procedure Get_SPARK_Xrefs_Info is new Get_SPARK_Xrefs;
- procedure Put_SPARK_Xrefs_Info is new Put_SPARK_Xrefs;
-
- -- Start of processing for Process
-
- begin
- -- Loop to skip till first 'F' line
-
- loop
- C := Get_Char (Infile);
-
- if C = EOF then
- raise Stop;
-
- elsif C = LF or else C = CR then
- loop
- C := Get_Char (Infile);
- exit when C /= LF and then C /= CR;
- end loop;
-
- exit when C = 'F';
- end if;
- end loop;
-
- -- Position back to initial 'F' of first 'F' line
-
- Set_Index (Infile, Index (Infile) - 1);
-
- -- Read SPARK cross-reference information to internal SPARK tables, also
- -- copying SPARK xrefs info to Outfile_1.
-
- Initialize_SPARK_Tables;
- Get_SPARK_Xrefs_Info;
-
- -- Write SPARK cross-reference information from internal SPARK tables to
- -- Outfile_2.
-
- Put_SPARK_Xrefs_Info;
-
- -- Junk blank line (see comment at end of Lib.Writ)
-
- Write_Info_Terminate;
-
- -- Flush to disk
-
- Close (Outfile_1);
- Close (Outfile_2);
-
- -- Now Outfile_1 and Outfile_2 should be identical
-
- Diff_Result :=
- Spawn (Diff_Exec.all,
- Argument_String_To_List
- ("-u " & Name1.all & " " & Name2.all).all);
-
- if Diff_Result /= 0 then
- Ada.Text_IO.Put_Line ("diff(1) exit status" & Diff_Result'Img);
- end if;
-
- OS_Exit (Diff_Result);
-
- end Process;
-
-exception
- when Stop =>
- null;
-end SPARK_Xrefs_Test;
diff --git a/gcc/ada/sprint.adb b/gcc/ada/sprint.adb
index ac2dcd8a14d..428e91a73cd 100644
--- a/gcc/ada/sprint.adb
+++ b/gcc/ada/sprint.adb
@@ -3459,6 +3459,25 @@ package body Sprint is
Sprint_Node (Target_Type (Node));
Write_Str (");");
+ when N_Variable_Reference_Marker =>
+ null;
+
+ -- Enable the following code for debugging purposes only
+
+ -- if Is_Read (Node) and then Is_Write (Node) then
+ -- Write_Indent_Str ("rw#");
+
+ -- elsif Is_Read (Node) then
+ -- Write_Indent_Str ("r#");
+
+ -- else
+ -- pragma Assert (Is_Write (Node));
+ -- Write_Indent_Str ("w#");
+ -- end if;
+
+ -- Write_Id (Target (Node));
+ -- Write_Char ('#');
+
when N_Variant =>
Write_Indent_Str_Sloc ("when ");
Sprint_Bar_List (Discrete_Choices (Node));
diff --git a/gcc/ada/style.adb b/gcc/ada/style.adb
index a0d61aa37b4..df043d0669b 100644
--- a/gcc/ada/style.adb
+++ b/gcc/ada/style.adb
@@ -166,7 +166,7 @@ package body Style is
Error_Msg_Node_1 := Def;
Error_Msg_Sloc := Sloc (Def);
Error_Msg -- CODEFIX
- ("(style) bad casing of & declared#", Sref);
+ ("(style) bad casing of & declared#", Sref, Ref);
return;
-- Else end of identifiers, and they match
diff --git a/gcc/ada/stylesw.adb b/gcc/ada/stylesw.adb
index ff8155adfc9..e851a2466b8 100644
--- a/gcc/ada/stylesw.adb
+++ b/gcc/ada/stylesw.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 1992-2016, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2017, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -150,10 +150,6 @@ package body Stylesw is
-- Start of processing for Save_Style_Check_Options
begin
- for K in Options'Range loop
- Options (K) := ' ';
- end loop;
-
Add (Character'Val (Style_Check_Indentation + Character'Pos ('0')),
Style_Check_Indentation /= 0);
@@ -165,7 +161,8 @@ package body Stylesw is
if Style_Check_Comments then
if Style_Check_Comments_Spacing = 2 then
Add ('c', Style_Check_Comments);
- elsif Style_Check_Comments_Spacing = 1 then
+ else
+ pragma Assert (Style_Check_Comments_Spacing = 1);
Add ('C', Style_Check_Comments);
end if;
end if;
diff --git a/gcc/ada/switch-c.adb b/gcc/ada/switch-c.adb
index 5ad10e348a5..c1ff88d234e 100644
--- a/gcc/ada/switch-c.adb
+++ b/gcc/ada/switch-c.adb
@@ -337,19 +337,7 @@ package body Switch.C is
when 'C' =>
Ptr := Ptr + 1;
-
- if not CodePeer_Mode then
- CodePeer_Mode := True;
-
- -- Suppress compiler warnings by default, since what we are
- -- interested in here is what CodePeer can find out. Note
- -- that if -gnatwxxx is specified after -gnatC on the
- -- command line, we do not want to override this setting in
- -- Adjust_Global_Switches, and assume that the user wants to
- -- get both warnings from GNAT and CodePeer messages.
-
- Warning_Mode := Suppress;
- end if;
+ CodePeer_Mode := True;
-- -gnatd (compiler debug options)
diff --git a/gcc/ada/terminals.c b/gcc/ada/terminals.c
index 9133a3bd88c..9f300514ced 100644
--- a/gcc/ada/terminals.c
+++ b/gcc/ada/terminals.c
@@ -6,7 +6,7 @@
* *
* C Implementation File *
* *
- * Copyright (C) 2008-2016, AdaCore *
+ * Copyright (C) 2008-2017, AdaCore *
* *
* GNAT is free software; you can redistribute it and/or modify it under *
* terms of the GNU General Public License as published by the Free Soft- *
@@ -1111,7 +1111,7 @@ __gnat_setup_winsize (void *desc, int rows, int columns)
/* On some system termio is either absent or including it will disable termios
(HP-UX) */
#if !defined (__hpux__) && !defined (BSD) && !defined (__APPLE__) \
- && !defined (__rtems__)
+ && !defined (__rtems__) && !defined (__QNXNTO__)
# include <termio.h>
#endif
diff --git a/gcc/ada/tracebak.c b/gcc/ada/tracebak.c
index 7532ca2d71b..e5eb0fefc26 100644
--- a/gcc/ada/tracebak.c
+++ b/gcc/ada/tracebak.c
@@ -6,7 +6,7 @@
* *
* C Implementation File *
* *
- * Copyright (C) 2000-2016, Free Software Foundation, Inc. *
+ * Copyright (C) 2000-2017, Free Software Foundation, Inc. *
* *
* GNAT is free software; you can redistribute it and/or modify it under *
* terms of the GNU General Public License as published by the Free Soft- *
@@ -500,6 +500,18 @@ struct layout
|| ((*((ptr) - 1) & 0xff) == 0xff) \
|| (((*(ptr) & 0xd0ff) == 0xd0ff))))
+/*----------------------------- qnx ----------------------------------*/
+
+#elif defined (__QNX__)
+
+#define USE_GCC_UNWINDER
+
+#if defined (__aarch64__)
+#define PC_ADJUST -4
+#else
+#error Unhandled QNX architecture.
+#endif
+
/*----------------------------- ia64 ---------------------------------*/
#elif defined (__ia64__) && (defined (__linux__) || defined (__hpux__))
diff --git a/gcc/asan.c b/gcc/asan.c
index d00089d04dc..b3c481c4ae3 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -2806,14 +2806,17 @@ initialize_sanitizer_builtins (void)
#define ATTR_PURE_NOTHROW_LEAF_LIST ECF_PURE | ATTR_NOTHROW_LEAF_LIST
#undef DEF_BUILTIN_STUB
#define DEF_BUILTIN_STUB(ENUM, NAME)
-#undef DEF_SANITIZER_BUILTIN
-#define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+#undef DEF_SANITIZER_BUILTIN_1
+#define DEF_SANITIZER_BUILTIN_1(ENUM, NAME, TYPE, ATTRS) \
do { \
decl = add_builtin_function ("__builtin_" NAME, TYPE, ENUM, \
BUILT_IN_NORMAL, NAME, NULL_TREE); \
set_call_expr_flags (decl, ATTRS); \
set_builtin_decl (ENUM, decl, true); \
- } while (0);
+ } while (0)
+#undef DEF_SANITIZER_BUILTIN
+#define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_SANITIZER_BUILTIN_1 (ENUM, NAME, TYPE, ATTRS);
#include "sanitizer.def"
@@ -2822,10 +2825,11 @@ initialize_sanitizer_builtins (void)
DEF_SANITIZER_BUILTIN here only as a convenience macro. */
if ((flag_sanitize & SANITIZE_OBJECT_SIZE)
&& !builtin_decl_implicit_p (BUILT_IN_OBJECT_SIZE))
- DEF_SANITIZER_BUILTIN (BUILT_IN_OBJECT_SIZE, "object_size",
- BT_FN_SIZE_CONST_PTR_INT,
- ATTR_PURE_NOTHROW_LEAF_LIST)
+ DEF_SANITIZER_BUILTIN_1 (BUILT_IN_OBJECT_SIZE, "object_size",
+ BT_FN_SIZE_CONST_PTR_INT,
+ ATTR_PURE_NOTHROW_LEAF_LIST);
+#undef DEF_SANITIZER_BUILTIN_1
#undef DEF_SANITIZER_BUILTIN
#undef DEF_BUILTIN_STUB
}
diff --git a/gcc/auto-profile.c b/gcc/auto-profile.c
index 130d8df5b1e..5134a795331 100644
--- a/gcc/auto-profile.c
+++ b/gcc/auto-profile.c
@@ -1061,7 +1061,7 @@ afdo_indirect_call (gimple_stmt_iterator *gsi, const icall_target_map &map,
/* FIXME: Count should be initialized. */
struct cgraph_edge *new_edge
= indirect_edge->make_speculative (direct_call,
- profile_count::uninitialized (), 0);
+ profile_count::uninitialized ());
new_edge->redirect_call_stmt_to_callee ();
gimple_remove_histogram_value (cfun, stmt, hist);
inline_call (new_edge, true, NULL, NULL, false);
@@ -1571,7 +1571,7 @@ afdo_annotate_cfg (const stmt_set &promoted_stmts)
if (max_count > profile_count::zero ())
{
afdo_calculate_branch_prob (&annotated_bb, &annotated_edge);
- counts_to_freqs ();
+ update_max_bb_count ();
profile_status_for_fn (cfun) = PROFILE_READ;
}
if (flag_value_profile_transformations)
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index f7c1f4c971e..55e6dc647d7 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -38,7 +38,7 @@
There are two parameters: Branch Threshold and Exec Threshold.
If the probability of an edge to a successor of the current basic block is
- lower than Branch Threshold or its frequency is lower than Exec Threshold,
+ lower than Branch Threshold or its count is lower than Exec Threshold,
then the successor will be the seed in one of the next rounds.
Each round has these parameters lower than the previous one.
The last round has to have these parameters set to zero so that the
@@ -75,7 +75,7 @@
multiple predecessors/ successors during trace discovery. When connecting
traces, only connect Trace n with Trace n + 1. This change reduces most
long jumps compared with the above algorithm.
- (2) Ignore the edge probability and frequency for fallthru edges.
+ (2) Ignore the edge probability and count for fallthru edges.
(3) Keep the original order of blocks when there is no chance to fall
through. We rely on the results of cfg_cleanup.
@@ -134,10 +134,10 @@ struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
/* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
static const int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
-/* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
+/* Exec thresholds in thousandths (per mille) of the count of bb 0. */
static const int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
-/* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
+/* If edge count is lower than DUPLICATION_THRESHOLD per mille of entry
block the edge destination is not duplicated while connecting traces. */
#define DUPLICATION_THRESHOLD 100
@@ -196,25 +196,18 @@ struct trace
int length;
};
-/* Maximum frequency and count of one of the entry blocks. */
-static int max_entry_frequency;
+/* Maximum count of one of the entry blocks. */
static profile_count max_entry_count;
/* Local function prototypes. */
-static void find_traces (int *, struct trace *);
-static basic_block rotate_loop (edge, struct trace *, int);
-static void mark_bb_visited (basic_block, int);
-static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
+static void find_traces_1_round (int, profile_count, struct trace *, int *,
int, bb_heap_t **, int);
static basic_block copy_bb (basic_block, edge, basic_block, int);
static long bb_to_key (basic_block);
static bool better_edge_p (const_basic_block, const_edge, profile_probability,
- int, profile_probability, int, const_edge);
-static bool connect_better_edge_p (const_edge, bool, int, const_edge,
- struct trace *);
-static void connect_traces (int, struct trace *);
+ profile_count, profile_probability, profile_count,
+ const_edge);
static bool copy_bb_p (const_basic_block, int);
-static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
/* Return the trace number in which BB was visited. */
@@ -249,15 +242,14 @@ mark_bb_visited (basic_block bb, int trace)
static bool
push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds,
- int exec_th, gcov_type count_th)
+ profile_count count_th)
{
bool there_exists_another_round;
bool block_not_hot_enough;
there_exists_another_round = round < number_of_rounds - 1;
- block_not_hot_enough = (bb->count.to_frequency (cfun) < exec_th
- || bb->count.ipa () < count_th
+ block_not_hot_enough = (bb->count < count_th
|| probably_never_executed_bb_p (cfun, bb));
if (there_exists_another_round
@@ -287,33 +279,26 @@ find_traces (int *n_traces, struct trace *traces)
number_of_rounds = N_ROUNDS - 1;
/* Insert entry points of function into heap. */
- max_entry_frequency = 0;
max_entry_count = profile_count::zero ();
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
bbd[e->dest->index].heap = heap;
bbd[e->dest->index].node = heap->insert (bb_to_key (e->dest), e->dest);
- if (e->dest->count.to_frequency (cfun) > max_entry_frequency)
- max_entry_frequency = e->dest->count.to_frequency (cfun);
- if (e->dest->count.ipa_p () && e->dest->count > max_entry_count)
+ if (e->dest->count > max_entry_count)
max_entry_count = e->dest->count;
}
/* Find the traces. */
for (i = 0; i < number_of_rounds; i++)
{
- gcov_type count_threshold;
+ profile_count count_threshold;
if (dump_file)
fprintf (dump_file, "STC - round %d\n", i + 1);
- if (max_entry_count < INT_MAX / 1000)
- count_threshold = max_entry_count.to_gcov_type () * exec_threshold[i] / 1000;
- else
- count_threshold = max_entry_count.to_gcov_type () / 1000 * exec_threshold[i];
+ count_threshold = max_entry_count.apply_scale (exec_threshold[i], 1000);
find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
- max_entry_frequency * exec_threshold[i] / 1000,
count_threshold, traces, n_traces, i, &heap,
number_of_rounds);
}
@@ -329,10 +314,14 @@ find_traces (int *n_traces, struct trace *traces)
for (bb = traces[i].first;
bb != traces[i].last;
bb = (basic_block) bb->aux)
- fprintf (dump_file, "%d [%d] ", bb->index,
- bb->count.to_frequency (cfun));
- fprintf (dump_file, "%d [%d]\n", bb->index,
- bb->count.to_frequency (cfun));
+ {
+ fprintf (dump_file, "%d [", bb->index);
+ bb->count.dump (dump_file);
+ fprintf (dump_file, "] ");
+ }
+ fprintf (dump_file, "%d [", bb->index);
+ bb->count.dump (dump_file);
+ fprintf (dump_file, "]\n");
}
fflush (dump_file);
}
@@ -349,7 +338,6 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
/* Information about the best end (end after rotation) of the loop. */
basic_block best_bb = NULL;
edge best_edge = NULL;
- int best_freq = -1;
profile_count best_count = profile_count::uninitialized ();
/* The best edge is preferred when its destination is not visited yet
or is a start block of some trace. */
@@ -375,12 +363,9 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is also preferred. */
- int freq = EDGE_FREQUENCY (e);
- if (freq > best_freq || e->count () > best_count)
+ if (e->count () > best_count)
{
- best_freq = freq;
- if (e->count ().initialized_p ())
- best_count = e->count ();
+ best_count = e->count ();
best_edge = e;
best_bb = bb;
}
@@ -393,17 +378,14 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
{
/* The current edge E is preferred. */
is_preferred = true;
- best_freq = EDGE_FREQUENCY (e);
best_count = e->count ();
best_edge = e;
best_bb = bb;
}
else
{
- int freq = EDGE_FREQUENCY (e);
- if (!best_edge || freq > best_freq || e->count () > best_count)
+ if (!best_edge || e->count () > best_count)
{
- best_freq = freq;
best_count = e->count ();
best_edge = e;
best_bb = bb;
@@ -457,14 +439,14 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
/* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
not include basic blocks whose probability is lower than BRANCH_TH or whose
- frequency is lower than EXEC_TH into traces (or whose count is lower than
+ count is lower than EXEC_TH into traces (or whose count is lower than
COUNT_TH). Store the new traces into TRACES and modify the number of
traces *N_TRACES. Set the round (which the trace belongs to) to ROUND.
The function expects starting basic blocks to be in *HEAP and will delete
*HEAP and store starting points for the next round into new *HEAP. */
static void
-find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
+find_traces_1_round (int branch_th, profile_count count_th,
struct trace *traces, int *n_traces, int round,
bb_heap_t **heap, int number_of_rounds)
{
@@ -488,13 +470,13 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (dump_file)
fprintf (dump_file, "Getting bb %d\n", bb->index);
- /* If the BB's frequency is too low, send BB to the next round. When
+ /* If the BB's count is too low, send BB to the next round. When
partitioning hot/cold blocks into separate sections, make sure all
the cold blocks (and ONLY the cold blocks) go into the (extra) final
round. When optimizing for size, do not push to next round. */
if (!for_size
- && push_to_next_round_p (bb, round, number_of_rounds, exec_th,
+ && push_to_next_round_p (bb, round, number_of_rounds,
count_th))
{
int key = bb_to_key (bb);
@@ -517,13 +499,11 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
do
{
- profile_probability prob;
- int freq;
bool ends_in_call;
- /* The probability and frequency of the best edge. */
+ /* The probability and count of the best edge. */
profile_probability best_prob = profile_probability::uninitialized ();
- int best_freq = INT_MIN / 2;
+ profile_count best_count = profile_count::uninitialized ();
best_edge = NULL;
mark_bb_visited (bb, *n_traces);
@@ -552,8 +532,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
continue;
- prob = e->probability;
- freq = e->dest->count.to_frequency (cfun);
+ profile_probability prob = e->probability;
+ profile_count count = e->dest->count;
/* The only sensible preference for a call instruction is the
fallthru edge. Don't bother selecting anything else. */
@@ -563,27 +543,26 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
{
best_edge = e;
best_prob = prob;
- best_freq = freq;
+ best_count = count;
}
continue;
}
/* Edge that cannot be fallthru or improbable or infrequent
successor (i.e. it is unsuitable successor). When optimizing
- for size, ignore the probability and frequency. */
+ for size, ignore the probability and count. */
if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
|| !prob.initialized_p ()
|| ((prob.to_reg_br_prob_base () < branch_th
- || EDGE_FREQUENCY (e) < exec_th
- || e->count ().ipa () < count_th) && (!for_size)))
+ || e->count () < count_th) && (!for_size)))
continue;
- if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
+ if (better_edge_p (bb, e, prob, count, best_prob, best_count,
best_edge))
{
best_edge = e;
best_prob = prob;
- best_freq = freq;
+ best_count = count;
}
}
@@ -665,15 +644,13 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
{
bb_heap_t *which_heap = *heap;
- prob = e->probability;
- freq = EDGE_FREQUENCY (e);
+ profile_probability prob = e->probability;
if (!(e->flags & EDGE_CAN_FALLTHRU)
|| (e->flags & EDGE_COMPLEX)
|| !prob.initialized_p ()
|| prob.to_reg_br_prob_base () < branch_th
- || freq < exec_th
- || e->count ().ipa () < count_th)
+ || e->count () < count_th)
{
/* When partitioning hot/cold basic blocks, make sure
the cold blocks (and only the cold blocks) all get
@@ -682,7 +659,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (!for_size && push_to_next_round_p (e->dest, round,
number_of_rounds,
- exec_th, count_th))
+ count_th))
which_heap = new_heap;
}
@@ -707,8 +684,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
/* We do nothing with one basic block loops. */
if (best_edge->dest != bb)
{
- if (EDGE_FREQUENCY (best_edge)
- > 4 * best_edge->dest->count.to_frequency (cfun) / 5)
+ if (best_edge->count ()
+ > best_edge->dest->count.apply_scale (4, 5))
{
/* The loop has at least 4 iterations. If the loop
header is not the first block of the function
@@ -759,9 +736,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
C
where
- EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
- >= EDGE_FREQUENCY (AC).
- (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
+ AB->count () + BC->count () >= AC->count ().
+ (i.e. 2 * B->count >= AC->count )
Best ordering is then A B C.
When optimizing for size, A B C is always the best order.
@@ -785,8 +761,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
& EDGE_CAN_FALLTHRU)
&& !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
&& single_succ (e->dest) == best_edge->dest
- && (2 * e->dest->count.to_frequency (cfun)
- >= EDGE_FREQUENCY (best_edge) || for_size))
+ && (e->dest->count.apply_scale (2, 1)
+ >= best_edge->count () || for_size))
{
best_edge = e;
if (dump_file)
@@ -954,22 +930,21 @@ bb_to_key (basic_block bb)
/* Return true when the edge E from basic block BB is better than the temporary
best edge (details are in function). The probability of edge E is PROB. The
- frequency of the successor is FREQ. The current best probability is
- BEST_PROB, the best frequency is BEST_FREQ.
+ count of the successor is COUNT. The current best probability is
+ BEST_PROB, the best count is BEST_COUNT.
The edge is considered to be equivalent when PROB does not differ much from
- BEST_PROB; similarly for frequency. */
+ BEST_PROB; similarly for count. */
static bool
better_edge_p (const_basic_block bb, const_edge e, profile_probability prob,
- int freq, profile_probability best_prob, int best_freq,
- const_edge cur_best_edge)
+ profile_count count, profile_probability best_prob,
+ profile_count best_count, const_edge cur_best_edge)
{
bool is_better_edge;
/* The BEST_* values do not have to be best, but can be a bit smaller than
maximum values. */
profile_probability diff_prob = best_prob.apply_scale (1, 10);
- int diff_freq = best_freq / 10;
/* The smaller one is better to keep the original order. */
if (optimize_function_for_size_p (cfun))
@@ -989,21 +964,27 @@ better_edge_p (const_basic_block bb, const_edge e, profile_probability prob,
else if (prob < best_prob - diff_prob)
/* The edge has lower probability than the temporary best edge. */
is_better_edge = false;
- else if (freq < best_freq - diff_freq)
- /* The edge and the temporary best edge have almost equivalent
- probabilities. The higher frequency of a successor now means
- that there is another edge going into that successor.
- This successor has lower frequency so it is better. */
- is_better_edge = true;
- else if (freq > best_freq + diff_freq)
- /* This successor has higher frequency so it is worse. */
- is_better_edge = false;
- else if (e->dest->prev_bb == bb)
- /* The edges have equivalent probabilities and the successors
- have equivalent frequencies. Select the previous successor. */
- is_better_edge = true;
else
- is_better_edge = false;
+ {
+ profile_count diff_count = best_count.apply_scale (1, 10);
+ if (count < best_count - diff_count
+ || (!best_count.initialized_p ()
+ && count.nonzero_p ()))
+ /* The edge and the temporary best edge have almost equivalent
+ probabilities. The higher countuency of a successor now means
+ that there is another edge going into that successor.
+ This successor has lower countuency so it is better. */
+ is_better_edge = true;
+ else if (count > best_count + diff_count)
+ /* This successor has higher countuency so it is worse. */
+ is_better_edge = false;
+ else if (e->dest->prev_bb == bb)
+ /* The edges have equivalent probabilities and the successors
+ have equivalent frequencies. Select the previous successor. */
+ is_better_edge = true;
+ else
+ is_better_edge = false;
+ }
return is_better_edge;
}
@@ -1041,6 +1022,16 @@ connect_better_edge_p (const_edge e, bool src_index_p, int best_len,
{
e_index = e->src->index;
+ /* We are looking for predecessor, so probabilities are not that
+ informative. We do not want to connect A to B becuse A has
+ only one sucessor (probablity is 100%) while there is edge
+ A' to B where probability is 90% but which is much more frequent. */
+ if (e->count () > cur_best_edge->count ())
+ /* The edge has higher probability than the temporary best edge. */
+ is_better_edge = true;
+ else if (e->count () < cur_best_edge->count ())
+ /* The edge has lower probability than the temporary best edge. */
+ is_better_edge = false;
if (e->probability > cur_best_edge->probability)
/* The edge has higher probability than the temporary best edge. */
is_better_edge = true;
@@ -1086,15 +1077,10 @@ connect_traces (int n_traces, struct trace *traces)
int last_trace;
int current_pass;
int current_partition;
- int freq_threshold;
- gcov_type count_threshold;
+ profile_count count_threshold;
bool for_size = optimize_function_for_size_p (cfun);
- freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
- if (max_entry_count.to_gcov_type () < INT_MAX / 1000)
- count_threshold = max_entry_count.to_gcov_type () * DUPLICATION_THRESHOLD / 1000;
- else
- count_threshold = max_entry_count.to_gcov_type () / 1000 * DUPLICATION_THRESHOLD;
+ count_threshold = max_entry_count.apply_scale (DUPLICATION_THRESHOLD, 1000);
connected = XCNEWVEC (bool, n_traces);
last_trace = -1;
@@ -1291,8 +1277,7 @@ connect_traces (int n_traces, struct trace *traces)
&& bbd[di].start_of_trace >= 0
&& !connected[bbd[di].start_of_trace]
&& BB_PARTITION (e2->dest) == current_partition
- && EDGE_FREQUENCY (e2) >= freq_threshold
- && e2->count ().ipa () >= count_threshold
+ && e2->count () >= count_threshold
&& (!best2
|| e2->probability > best2->probability
|| (e2->probability == best2->probability
@@ -1317,9 +1302,8 @@ connect_traces (int n_traces, struct trace *traces)
&& BB_PARTITION (best->src) == BB_PARTITION (best->dest)
&& copy_bb_p (best->dest,
optimize_edge_for_speed_p (best)
- && EDGE_FREQUENCY (best) >= freq_threshold
&& (!best->count ().initialized_p ()
- || best->count ().ipa () >= count_threshold)))
+ || best->count () >= count_threshold)))
{
basic_block new_bb;
@@ -1377,8 +1361,6 @@ copy_bb_p (const_basic_block bb, int code_may_grow)
int max_size = uncond_jump_length;
rtx_insn *insn;
- if (!bb->count.to_frequency (cfun))
- return false;
if (EDGE_COUNT (bb->preds) < 2)
return false;
if (!can_duplicate_block_p (bb))
@@ -1542,8 +1524,8 @@ sanitize_hot_paths (bool walk_up, unsigned int cold_bb_count,
break;
}
/* The following loop will look for the hottest edge via
- the edge count, if it is non-zero, then fallback to the edge
- frequency and finally the edge probability. */
+ the edge count, if it is non-zero, then fallback to
+ the edge probability. */
if (!(e->count () > highest_count))
highest_count = e->count ();
if (!highest_probability.initialized_p ()
@@ -1568,8 +1550,7 @@ sanitize_hot_paths (bool walk_up, unsigned int cold_bb_count,
|| e->count () == profile_count::zero ())
continue;
/* Select the hottest edge using the edge count, if it is non-zero,
- then fallback to the edge frequency and finally the edge
- probability. */
+ then fallback to the edge probability. */
if (highest_count.initialized_p ())
{
if (!(e->count () >= highest_count))
@@ -2312,7 +2293,7 @@ reorder_basic_blocks_software_trace_cache (void)
static bool
edge_order (edge e1, edge e2)
{
- return EDGE_FREQUENCY (e1) > EDGE_FREQUENCY (e2);
+ return e1->count () > e2->count ();
}
/* Reorder basic blocks using the "simple" algorithm. This tries to
diff --git a/gcc/builtins.c b/gcc/builtins.c
index b0fe2a42980..b3cad6fe4b5 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -3267,18 +3267,60 @@ check_sizes (int opt, tree exp, tree size, tree maxlen, tree src, tree objsize)
}
/* Helper to compute the size of the object referenced by the DEST
- expression which must of of pointer type, using Object Size type
+ expression which must have pointer type, using Object Size type
OSTYPE (only the least significant 2 bits are used). Return
the size of the object if successful or NULL when the size cannot
be determined. */
-static inline tree
+tree
compute_objsize (tree dest, int ostype)
{
unsigned HOST_WIDE_INT size;
- if (compute_builtin_object_size (dest, ostype & 3, &size))
+
+ /* Only the two least significant bits are meaningful. */
+ ostype &= 3;
+
+ if (compute_builtin_object_size (dest, ostype, &size))
return build_int_cst (sizetype, size);
+ /* Unless computing the largest size (for memcpy and other raw memory
+ functions), try to determine the size of the object from its type. */
+ if (!ostype)
+ return NULL_TREE;
+
+ if (TREE_CODE (dest) == SSA_NAME)
+ {
+ gimple *stmt = SSA_NAME_DEF_STMT (dest);
+ if (!is_gimple_assign (stmt))
+ return NULL_TREE;
+
+ tree_code code = gimple_assign_rhs_code (stmt);
+ if (code != ADDR_EXPR && code != POINTER_PLUS_EXPR)
+ return NULL_TREE;
+
+ dest = gimple_assign_rhs1 (stmt);
+ }
+
+ if (TREE_CODE (dest) != ADDR_EXPR)
+ return NULL_TREE;
+
+ tree type = TREE_TYPE (dest);
+ if (TREE_CODE (type) == POINTER_TYPE)
+ type = TREE_TYPE (type);
+
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && !array_at_struct_end_p (dest))
+ {
+ /* Return the constant size unless it's zero (that's a zero-length
+ array likely at the end of a struct). */
+ tree size = TYPE_SIZE_UNIT (type);
+ if (size && TREE_CODE (size) == INTEGER_CST
+ && !integer_zerop (size))
+ return size;
+ }
+
return NULL_TREE;
}
@@ -3930,6 +3972,22 @@ expand_builtin_strncat (tree exp, rtx)
return NULL_RTX;
}
+/* Helper to check the sizes of sequences and the destination of calls
+ to __builtin_strncpy (DST, SRC, CNT) and __builtin___strncpy_chk.
+ Returns true on success (no overflow warning), false otherwise. */
+
+static bool
+check_strncpy_sizes (tree exp, tree dst, tree src, tree cnt)
+{
+ tree dstsize = compute_objsize (dst, warn_stringop_overflow - 1);
+
+ if (!check_sizes (OPT_Wstringop_overflow_,
+ exp, cnt, /*maxlen=*/NULL_TREE, src, dstsize))
+ return false;
+
+ return true;
+}
+
/* Expand expression EXP, which is a call to the strncpy builtin. Return
NULL_RTX if we failed the caller should emit a normal call. */
@@ -3948,16 +4006,7 @@ expand_builtin_strncpy (tree exp, rtx target)
/* The length of the source sequence. */
tree slen = c_strlen (src, 1);
- if (warn_stringop_overflow)
- {
- tree destsize = compute_objsize (dest,
- warn_stringop_overflow - 1);
-
- /* The number of bytes to write is LEN but check_sizes will also
- check SLEN if LEN's value isn't known. */
- check_sizes (OPT_Wstringop_overflow_,
- exp, len, /*maxlen=*/NULL_TREE, src, destsize);
- }
+ check_strncpy_sizes (exp, dest, src, len);
/* We must be passed a constant len and src parameter. */
if (!tree_fits_uhwi_p (len) || !slen || !tree_fits_uhwi_p (slen))
diff --git a/gcc/builtins.h b/gcc/builtins.h
index 8bcae4a2299..b5ce4e02d90 100644
--- a/gcc/builtins.h
+++ b/gcc/builtins.h
@@ -89,6 +89,7 @@ extern tree fold_call_stmt (gcall *, bool);
extern void set_builtin_user_assembler_name (tree decl, const char *asmspec);
extern bool is_simple_builtin (tree);
extern bool is_inexpensive_builtin (tree);
+extern tree compute_objsize (tree, int);
extern bool readonly_data_expr (tree exp);
extern bool init_target_chars (void);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index e8476426fcb..3127635056c 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,23 @@
+2017-11-15 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/81156
+ * c-common.c (c_common_reswords): Add __builtin_tgmath.
+ * c-common.h (enum rid): Add RID_BUILTIN_TGMATH.
+
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * c-common.c (catenate_strings): Use memcpy instead of strncpy.
+ * c-warn.c (sizeof_pointer_memaccess_warning): Handle arrays.
+ * c.opt (-Wstringop-truncation): New option.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ PR middle-end/82404
+ * c-opts.c (c_common_post_options): Set -Wreturn-type for C++
+ FE.
+ * c.opt: Set default value of warn_return_type.
+
2017-10-31 David Malcolm <dmalcolm@redhat.com>
* c-common.c (binary_op_error): Update for renaming of
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 83c6aadda27..94c9ebc7655 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -376,6 +376,7 @@ const struct c_common_resword c_common_reswords[] =
{ "__builtin_complex", RID_BUILTIN_COMPLEX, D_CONLY },
{ "__builtin_launder", RID_BUILTIN_LAUNDER, D_CXXONLY },
{ "__builtin_shuffle", RID_BUILTIN_SHUFFLE, 0 },
+ { "__builtin_tgmath", RID_BUILTIN_TGMATH, D_CONLY },
{ "__builtin_offsetof", RID_OFFSETOF, 0 },
{ "__builtin_types_compatible_p", RID_TYPES_COMPATIBLE_P, D_CONLY },
{ "__builtin_va_arg", RID_VA_ARG, 0 },
@@ -5900,10 +5901,10 @@ check_builtin_function_arguments (location_t loc, vec<location_t> arg_loc,
static char *
catenate_strings (const char *lhs, const char *rhs_start, int rhs_size)
{
- const int lhs_size = strlen (lhs);
+ const size_t lhs_size = strlen (lhs);
char *result = XNEWVEC (char, lhs_size + rhs_size);
- strncpy (result, lhs, lhs_size);
- strncpy (result + lhs_size, rhs_start, rhs_size);
+ memcpy (result, lhs, lhs_size);
+ memcpy (result + lhs_size, rhs_start, rhs_size);
return result;
}
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index 7e1877e8d16..5bb86191d2b 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -101,6 +101,7 @@ enum rid
RID_ASM, RID_TYPEOF, RID_ALIGNOF, RID_ATTRIBUTE, RID_VA_ARG,
RID_EXTENSION, RID_IMAGPART, RID_REALPART, RID_LABEL, RID_CHOOSE_EXPR,
RID_TYPES_COMPATIBLE_P, RID_BUILTIN_COMPLEX, RID_BUILTIN_SHUFFLE,
+ RID_BUILTIN_TGMATH,
RID_DFLOAT32, RID_DFLOAT64, RID_DFLOAT128,
/* TS 18661-3 keywords, in the same sequence as the TI_* values. */
diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c
index 32120e636c2..cead15e7a63 100644
--- a/gcc/c-family/c-opts.c
+++ b/gcc/c-family/c-opts.c
@@ -989,6 +989,9 @@ c_common_post_options (const char **pfilename)
flag_extern_tls_init = 1;
}
+ if (warn_return_type == -1)
+ warn_return_type = c_dialect_cxx ();
+
if (num_in_fnames > 1)
error ("too many filenames given. Type %s --help for usage",
progname);
diff --git a/gcc/c-family/c-warn.c b/gcc/c-family/c-warn.c
index 09ef6856cf9..6cfded97e24 100644
--- a/gcc/c-family/c-warn.c
+++ b/gcc/c-family/c-warn.c
@@ -693,7 +693,8 @@ sizeof_pointer_memaccess_warning (location_t *sizeof_arg_loc, tree callee,
|| vec_safe_length (params) <= 1)
return;
- switch (DECL_FUNCTION_CODE (callee))
+ enum built_in_function fncode = DECL_FUNCTION_CODE (callee);
+ switch (fncode)
{
case BUILT_IN_STRNCMP:
case BUILT_IN_STRNCASECMP:
@@ -775,8 +776,27 @@ sizeof_pointer_memaccess_warning (location_t *sizeof_arg_loc, tree callee,
type = TYPE_P (sizeof_arg[idx])
? sizeof_arg[idx] : TREE_TYPE (sizeof_arg[idx]);
+
if (!POINTER_TYPE_P (type))
- return;
+ {
+ /* The argument type may be an array. Diagnose bounded string
+ copy functions that specify the bound in terms of the source
+ argument rather than the destination. */
+ if (strop && !cmp && fncode != BUILT_IN_STRNDUP && src)
+ {
+ tem = tree_strip_nop_conversions (src);
+ if (TREE_CODE (tem) == ADDR_EXPR)
+ tem = TREE_OPERAND (tem, 0);
+ if (operand_equal_p (tem, sizeof_arg[idx], OEP_ADDRESS_OF))
+ warning_at (sizeof_arg_loc[idx], OPT_Wsizeof_pointer_memaccess,
+ "argument to %<sizeof%> in %qD call is the same "
+ "expression as the source; did you mean to use "
+ "the size of the destination?",
+ callee);
+ }
+
+ return;
+ }
if (dest
&& (tem = tree_strip_nop_conversions (dest))
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
index dae124ac1c2..479ae63bb0e 100644
--- a/gcc/c-family/c.opt
+++ b/gcc/c-family/c.opt
@@ -744,6 +744,10 @@ C ObjC C++ ObjC++ Joined RejectNegative UInteger Var(warn_stringop_overflow) Ini
Under the control of Object Size type, warn about buffer overflow in string
manipulation functions like memcpy and strcpy.
+Wstringop-truncation
+C ObjC C++ ObjC++ Var(warn_stringop_truncation) Warning Init (1) LangEnabledBy(C ObjC C++ ObjC++, Wall)
+Warn about truncation in string manipulation functions like strncat and strncpy.
+
Wsuggest-attribute=format
C ObjC C++ ObjC++ Var(warn_suggest_attribute_format) Warning
Warn about functions which might be candidates for format attributes.
@@ -960,7 +964,7 @@ C++ ObjC++ Var(warn_reorder) Warning LangEnabledBy(C++ ObjC++,Wall)
Warn when the compiler reorders code.
Wreturn-type
-C ObjC C++ ObjC++ Var(warn_return_type) Warning LangEnabledBy(C ObjC C++ ObjC++,Wall)
+C ObjC C++ ObjC++ Var(warn_return_type) Warning LangEnabledBy(C ObjC C++ ObjC++,Wall) Init(-1)
Warn whenever a function's return type defaults to \"int\" (C), or about inconsistent return types (C++).
Wscalar-storage-order
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 60feeea9022..5622c8ae787 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,10 @@
+2017-11-15 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/81156
+ * c-parser.c (check_tgmath_function): New function.
+ (enum tgmath_parm_kind): New enum.
+ (c_parser_postfix_expression): Handle __builtin_tgmath.
+
2017-10-31 David Malcolm <dmalcolm@redhat.com>
* c-decl.c (implicit_decl_warning): Update for renaming of
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 7bca5f1a2a7..3d90e28caad 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -7829,6 +7829,61 @@ c_parser_generic_selection (c_parser *parser)
return matched_assoc.expression;
}
+/* Check the validity of a function pointer argument *EXPR (argument
+ position POS) to __builtin_tgmath. Return the number of function
+ arguments if possibly valid; return 0 having reported an error if
+ not valid. */
+
+static unsigned int
+check_tgmath_function (c_expr *expr, unsigned int pos)
+{
+ tree type = TREE_TYPE (expr->value);
+ if (!FUNCTION_POINTER_TYPE_P (type))
+ {
+ error_at (expr->get_location (),
+ "argument %u of %<__builtin_tgmath%> is not a function pointer",
+ pos);
+ return 0;
+ }
+ type = TREE_TYPE (type);
+ if (!prototype_p (type))
+ {
+ error_at (expr->get_location (),
+ "argument %u of %<__builtin_tgmath%> is unprototyped", pos);
+ return 0;
+ }
+ if (stdarg_p (type))
+ {
+ error_at (expr->get_location (),
+ "argument %u of %<__builtin_tgmath%> has variable arguments",
+ pos);
+ return 0;
+ }
+ unsigned int nargs = 0;
+ function_args_iterator iter;
+ tree t;
+ FOREACH_FUNCTION_ARGS (type, t, iter)
+ {
+ if (t == void_type_node)
+ break;
+ nargs++;
+ }
+ if (nargs == 0)
+ {
+ error_at (expr->get_location (),
+ "argument %u of %<__builtin_tgmath%> has no arguments", pos);
+ return 0;
+ }
+ return nargs;
+}
+
+/* Ways in which a parameter or return value of a type-generic macro
+ may vary between the different functions the macro may call. */
+enum tgmath_parm_kind
+ {
+ tgmath_fixed, tgmath_real, tgmath_complex
+ };
+
/* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2,
C11 6.5.1-6.5.2). Compound literals aren't handled here; callers have to
call c_parser_postfix_expression_after_paren_type on encountering them.
@@ -7869,6 +7924,7 @@ c_parser_generic_selection (c_parser *parser)
assignment-expression ,
assignment-expression )
__builtin_types_compatible_p ( type-name , type-name )
+ __builtin_tgmath ( expr-list )
__builtin_complex ( assignment-expression , assignment-expression )
__builtin_shuffle ( assignment-expression , assignment-expression )
__builtin_shuffle ( assignment-expression ,
@@ -8295,6 +8351,513 @@ c_parser_postfix_expression (c_parser *parser)
set_c_expr_source_range (&expr, loc, close_paren_loc);
}
break;
+ case RID_BUILTIN_TGMATH:
+ {
+ vec<c_expr_t, va_gc> *cexpr_list;
+ location_t close_paren_loc;
+
+ c_parser_consume_token (parser);
+ if (!c_parser_get_builtin_args (parser,
+ "__builtin_tgmath",
+ &cexpr_list, false,
+ &close_paren_loc))
+ {
+ expr.set_error ();
+ break;
+ }
+
+ if (vec_safe_length (cexpr_list) < 3)
+ {
+ error_at (loc, "too few arguments to %<__builtin_tgmath%>");
+ expr.set_error ();
+ break;
+ }
+
+ unsigned int i;
+ c_expr_t *p;
+ FOR_EACH_VEC_ELT (*cexpr_list, i, p)
+ *p = convert_lvalue_to_rvalue (loc, *p, true, true);
+ unsigned int nargs = check_tgmath_function (&(*cexpr_list)[0], 1);
+ if (nargs == 0)
+ {
+ expr.set_error ();
+ break;
+ }
+ if (vec_safe_length (cexpr_list) < nargs)
+ {
+ error_at (loc, "too few arguments to %<__builtin_tgmath%>");
+ expr.set_error ();
+ break;
+ }
+ unsigned int num_functions = vec_safe_length (cexpr_list) - nargs;
+ if (num_functions < 2)
+ {
+ error_at (loc, "too few arguments to %<__builtin_tgmath%>");
+ expr.set_error ();
+ break;
+ }
+
+ /* The first NUM_FUNCTIONS expressions are the function
+ pointers. The remaining NARGS expressions are the
+ arguments that are to be passed to one of those
+ functions, chosen following <tgmath.h> rules. */
+ for (unsigned int j = 1; j < num_functions; j++)
+ {
+ unsigned int this_nargs
+ = check_tgmath_function (&(*cexpr_list)[j], j + 1);
+ if (this_nargs == 0)
+ {
+ expr.set_error ();
+ goto out;
+ }
+ if (this_nargs != nargs)
+ {
+ error_at ((*cexpr_list)[j].get_location (),
+ "argument %u of %<__builtin_tgmath%> has "
+ "wrong number of arguments", j + 1);
+ expr.set_error ();
+ goto out;
+ }
+ }
+
+ /* The functions all have the same number of arguments.
+ Determine whether arguments and return types vary in
+ ways permitted for <tgmath.h> functions. */
+ /* The first entry in each of these vectors is for the
+ return type, subsequent entries for parameter
+ types. */
+ auto_vec<enum tgmath_parm_kind> parm_kind (nargs + 1);
+ auto_vec<tree> parm_first (nargs + 1);
+ auto_vec<bool> parm_complex (nargs + 1);
+ auto_vec<bool> parm_varies (nargs + 1);
+ tree first_type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[0].value));
+ tree first_ret = TYPE_MAIN_VARIANT (TREE_TYPE (first_type));
+ parm_first.quick_push (first_ret);
+ parm_complex.quick_push (TREE_CODE (first_ret) == COMPLEX_TYPE);
+ parm_varies.quick_push (false);
+ function_args_iterator iter;
+ tree t;
+ unsigned int argpos;
+ FOREACH_FUNCTION_ARGS (first_type, t, iter)
+ {
+ if (t == void_type_node)
+ break;
+ parm_first.quick_push (TYPE_MAIN_VARIANT (t));
+ parm_complex.quick_push (TREE_CODE (t) == COMPLEX_TYPE);
+ parm_varies.quick_push (false);
+ }
+ for (unsigned int j = 1; j < num_functions; j++)
+ {
+ tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
+ tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ if (ret != parm_first[0])
+ {
+ parm_varies[0] = true;
+ if (!SCALAR_FLOAT_TYPE_P (parm_first[0])
+ && !COMPLEX_FLOAT_TYPE_P (parm_first[0]))
+ {
+ error_at ((*cexpr_list)[0].get_location (),
+ "invalid type-generic return type for "
+ "argument %u of %<__builtin_tgmath%>",
+ 1);
+ expr.set_error ();
+ goto out;
+ }
+ if (!SCALAR_FLOAT_TYPE_P (ret)
+ && !COMPLEX_FLOAT_TYPE_P (ret))
+ {
+ error_at ((*cexpr_list)[j].get_location (),
+ "invalid type-generic return type for "
+ "argument %u of %<__builtin_tgmath%>",
+ j + 1);
+ expr.set_error ();
+ goto out;
+ }
+ }
+ if (TREE_CODE (ret) == COMPLEX_TYPE)
+ parm_complex[0] = true;
+ argpos = 1;
+ FOREACH_FUNCTION_ARGS (type, t, iter)
+ {
+ if (t == void_type_node)
+ break;
+ t = TYPE_MAIN_VARIANT (t);
+ if (t != parm_first[argpos])
+ {
+ parm_varies[argpos] = true;
+ if (!SCALAR_FLOAT_TYPE_P (parm_first[argpos])
+ && !COMPLEX_FLOAT_TYPE_P (parm_first[argpos]))
+ {
+ error_at ((*cexpr_list)[0].get_location (),
+ "invalid type-generic type for "
+ "argument %u of argument %u of "
+ "%<__builtin_tgmath%>", argpos, 1);
+ expr.set_error ();
+ goto out;
+ }
+ if (!SCALAR_FLOAT_TYPE_P (t)
+ && !COMPLEX_FLOAT_TYPE_P (t))
+ {
+ error_at ((*cexpr_list)[j].get_location (),
+ "invalid type-generic type for "
+ "argument %u of argument %u of "
+ "%<__builtin_tgmath%>", argpos, j + 1);
+ expr.set_error ();
+ goto out;
+ }
+ }
+ if (TREE_CODE (t) == COMPLEX_TYPE)
+ parm_complex[argpos] = true;
+ argpos++;
+ }
+ }
+ enum tgmath_parm_kind max_variation = tgmath_fixed;
+ for (unsigned int j = 0; j <= nargs; j++)
+ {
+ enum tgmath_parm_kind this_kind;
+ if (parm_varies[j])
+ {
+ if (parm_complex[j])
+ max_variation = this_kind = tgmath_complex;
+ else
+ {
+ this_kind = tgmath_real;
+ if (max_variation != tgmath_complex)
+ max_variation = tgmath_real;
+ }
+ }
+ else
+ this_kind = tgmath_fixed;
+ parm_kind.quick_push (this_kind);
+ }
+ if (max_variation == tgmath_fixed)
+ {
+ error_at (loc, "function arguments of %<__builtin_tgmath%> "
+ "all have the same type");
+ expr.set_error ();
+ break;
+ }
+
+ /* Identify a parameter (not the return type) that varies,
+ including with complex types if any variation includes
+ complex types; there must be at least one such
+ parameter. */
+ unsigned int tgarg = 0;
+ for (unsigned int j = 1; j <= nargs; j++)
+ if (parm_kind[j] == max_variation)
+ {
+ tgarg = j;
+ break;
+ }
+ if (tgarg == 0)
+ {
+ error_at (loc, "function arguments of %<__builtin_tgmath%> "
+ "lack type-generic parameter");
+ expr.set_error ();
+ break;
+ }
+
+ /* Determine the type of the relevant parameter for each
+ function. */
+ auto_vec<tree> tg_type (num_functions);
+ for (unsigned int j = 0; j < num_functions; j++)
+ {
+ tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
+ argpos = 1;
+ FOREACH_FUNCTION_ARGS (type, t, iter)
+ {
+ if (argpos == tgarg)
+ {
+ tg_type.quick_push (TYPE_MAIN_VARIANT (t));
+ break;
+ }
+ argpos++;
+ }
+ }
+
+ /* Verify that the corresponding types are different for
+ all the listed functions. Also determine whether all
+ the types are complex, whether all the types are
+ standard or binary, and whether all the types are
+ decimal. */
+ bool all_complex = true;
+ bool all_binary = true;
+ bool all_decimal = true;
+ hash_set<tree> tg_types;
+ FOR_EACH_VEC_ELT (tg_type, i, t)
+ {
+ if (TREE_CODE (t) == COMPLEX_TYPE)
+ all_decimal = false;
+ else
+ {
+ all_complex = false;
+ if (DECIMAL_FLOAT_TYPE_P (t))
+ all_binary = false;
+ else
+ all_decimal = false;
+ }
+ if (tg_types.add (t))
+ {
+ error_at ((*cexpr_list)[i].get_location (),
+ "duplicate type-generic parameter type for "
+ "function argument %u of %<__builtin_tgmath%>",
+ i + 1);
+ expr.set_error ();
+ goto out;
+ }
+ }
+
+ /* Verify that other parameters and the return type whose
+ types vary have their types varying in the correct
+ way. */
+ for (unsigned int j = 0; j < num_functions; j++)
+ {
+ tree exp_type = tg_type[j];
+ tree exp_real_type = exp_type;
+ if (TREE_CODE (exp_type) == COMPLEX_TYPE)
+ exp_real_type = TREE_TYPE (exp_type);
+ tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
+ tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ if ((parm_kind[0] == tgmath_complex && ret != exp_type)
+ || (parm_kind[0] == tgmath_real && ret != exp_real_type))
+ {
+ error_at ((*cexpr_list)[j].get_location (),
+ "bad return type for function argument %u "
+ "of %<__builtin_tgmath%>", j + 1);
+ expr.set_error ();
+ goto out;
+ }
+ argpos = 1;
+ FOREACH_FUNCTION_ARGS (type, t, iter)
+ {
+ if (t == void_type_node)
+ break;
+ t = TYPE_MAIN_VARIANT (t);
+ if ((parm_kind[argpos] == tgmath_complex
+ && t != exp_type)
+ || (parm_kind[argpos] == tgmath_real
+ && t != exp_real_type))
+ {
+ error_at ((*cexpr_list)[j].get_location (),
+ "bad type for argument %u of "
+ "function argument %u of "
+ "%<__builtin_tgmath%>", argpos, j + 1);
+ expr.set_error ();
+ goto out;
+ }
+ argpos++;
+ }
+ }
+
+ /* The functions listed are a valid set of functions for a
+ <tgmath.h> macro to select between. Identify the
+ matching function, if any. First, the argument types
+ must be combined following <tgmath.h> rules. Integer
+ types are treated as _Decimal64 if any type-generic
+ argument is decimal, or if the only alternatives for
+ type-generic arguments are of decimal types, and are
+ otherwise treated as double (or _Complex double for
+ complex integer types). After that adjustment, types
+ are combined following the usual arithmetic
+ conversions. If the function only accepts complex
+ arguments, a complex type is produced. */
+ bool arg_complex = all_complex;
+ bool arg_binary = all_binary;
+ bool arg_int_decimal = all_decimal;
+ for (unsigned int j = 1; j <= nargs; j++)
+ {
+ if (parm_kind[j] == tgmath_fixed)
+ continue;
+ c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1];
+ tree type = TREE_TYPE (ce->value);
+ if (!INTEGRAL_TYPE_P (type)
+ && !SCALAR_FLOAT_TYPE_P (type)
+ && TREE_CODE (type) != COMPLEX_TYPE)
+ {
+ error_at (ce->get_location (),
+ "invalid type of argument %u of type-generic "
+ "function", j);
+ expr.set_error ();
+ goto out;
+ }
+ if (DECIMAL_FLOAT_TYPE_P (type))
+ {
+ arg_int_decimal = true;
+ if (all_complex)
+ {
+ error_at (ce->get_location (),
+ "decimal floating-point argument %u to "
+ "complex-only type-generic function", j);
+ expr.set_error ();
+ goto out;
+ }
+ else if (all_binary)
+ {
+ error_at (ce->get_location (),
+ "decimal floating-point argument %u to "
+ "binary-only type-generic function", j);
+ expr.set_error ();
+ goto out;
+ }
+ else if (arg_complex)
+ {
+ error_at (ce->get_location (),
+ "both complex and decimal floating-point "
+ "arguments to type-generic function");
+ expr.set_error ();
+ goto out;
+ }
+ else if (arg_binary)
+ {
+ error_at (ce->get_location (),
+ "both binary and decimal floating-point "
+ "arguments to type-generic function");
+ expr.set_error ();
+ goto out;
+ }
+ }
+ else if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ arg_complex = true;
+ if (COMPLEX_FLOAT_TYPE_P (type))
+ arg_binary = true;
+ if (all_decimal)
+ {
+ error_at (ce->get_location (),
+ "complex argument %u to "
+ "decimal-only type-generic function", j);
+ expr.set_error ();
+ goto out;
+ }
+ else if (arg_int_decimal)
+ {
+ error_at (ce->get_location (),
+ "both complex and decimal floating-point "
+ "arguments to type-generic function");
+ expr.set_error ();
+ goto out;
+ }
+ }
+ else if (SCALAR_FLOAT_TYPE_P (type))
+ {
+ arg_binary = true;
+ if (all_decimal)
+ {
+ error_at (ce->get_location (),
+ "binary argument %u to "
+ "decimal-only type-generic function", j);
+ expr.set_error ();
+ goto out;
+ }
+ else if (arg_int_decimal)
+ {
+ error_at (ce->get_location (),
+ "both binary and decimal floating-point "
+ "arguments to type-generic function");
+ expr.set_error ();
+ goto out;
+ }
+ }
+ }
+ tree arg_real = NULL_TREE;
+ for (unsigned int j = 1; j <= nargs; j++)
+ {
+ if (parm_kind[j] == tgmath_fixed)
+ continue;
+ c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1];
+ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (ce->value));
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ if (INTEGRAL_TYPE_P (type))
+ type = (arg_int_decimal
+ ? dfloat64_type_node
+ : double_type_node);
+ if (arg_real == NULL_TREE)
+ arg_real = type;
+ else
+ arg_real = common_type (arg_real, type);
+ if (arg_real == error_mark_node)
+ {
+ expr.set_error ();
+ goto out;
+ }
+ }
+ tree arg_type = (arg_complex
+ ? build_complex_type (arg_real)
+ : arg_real);
+
+ /* Look for a function to call with type-generic parameter
+ type ARG_TYPE. */
+ c_expr_t *fn = NULL;
+ for (unsigned int j = 0; j < num_functions; j++)
+ {
+ if (tg_type[j] == arg_type)
+ {
+ fn = &(*cexpr_list)[j];
+ break;
+ }
+ }
+ if (fn == NULL
+ && parm_kind[0] == tgmath_fixed
+ && SCALAR_FLOAT_TYPE_P (parm_first[0]))
+ {
+ /* Presume this is a macro that rounds its result to a
+ narrower type, and look for the first function with
+ at least the range and precision of the argument
+ type. */
+ for (unsigned int j = 0; j < num_functions; j++)
+ {
+ if (arg_complex
+ != (TREE_CODE (tg_type[j]) == COMPLEX_TYPE))
+ continue;
+ tree real_tg_type = (arg_complex
+ ? TREE_TYPE (tg_type[j])
+ : tg_type[j]);
+ if (DECIMAL_FLOAT_TYPE_P (arg_real)
+ != DECIMAL_FLOAT_TYPE_P (real_tg_type))
+ continue;
+ scalar_float_mode arg_mode
+ = SCALAR_FLOAT_TYPE_MODE (arg_real);
+ scalar_float_mode tg_mode
+ = SCALAR_FLOAT_TYPE_MODE (real_tg_type);
+ const real_format *arg_fmt = REAL_MODE_FORMAT (arg_mode);
+ const real_format *tg_fmt = REAL_MODE_FORMAT (tg_mode);
+ if (arg_fmt->b == tg_fmt->b
+ && arg_fmt->p <= tg_fmt->p
+ && arg_fmt->emax <= tg_fmt->emax
+ && (arg_fmt->emin - arg_fmt->p
+ >= tg_fmt->emin - tg_fmt->p))
+ {
+ fn = &(*cexpr_list)[j];
+ break;
+ }
+ }
+ }
+ if (fn == NULL)
+ {
+ error_at (loc, "no matching function for type-generic call");
+ expr.set_error ();
+ break;
+ }
+
+ /* Construct a call to FN. */
+ vec<tree, va_gc> *args;
+ vec_alloc (args, nargs);
+ vec<tree, va_gc> *origtypes;
+ vec_alloc (origtypes, nargs);
+ auto_vec<location_t> arg_loc (nargs);
+ for (unsigned int j = 0; j < nargs; j++)
+ {
+ c_expr_t *ce = &(*cexpr_list)[num_functions + j];
+ args->quick_push (ce->value);
+ arg_loc.quick_push (ce->get_location ());
+ origtypes->quick_push (ce->original_type);
+ }
+ expr.value = c_build_function_call_vec (loc, arg_loc, fn->value,
+ args, origtypes);
+ set_c_expr_source_range (&expr, loc, close_paren_loc);
+ break;
+ }
case RID_BUILTIN_CALL_WITH_STATIC_CHAIN:
{
vec<c_expr_t, va_gc> *cexpr_list;
@@ -8563,6 +9126,7 @@ c_parser_postfix_expression (c_parser *parser)
expr.set_error ();
break;
}
+ out:
return c_parser_postfix_expression_after_primary
(parser, EXPR_LOC_OR_LOC (expr.value, loc), expr);
}
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 06a8af8a166..cc2212969af 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -5073,7 +5073,7 @@ expand_debug_expr (tree exp)
case REDUC_AND_EXPR:
case REDUC_IOR_EXPR:
case REDUC_XOR_EXPR:
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
case VEC_COND_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 7c3507c6ece..bc60fc90f56 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -822,7 +822,7 @@ cgraph_edge::set_call_stmt (gcall *new_stmt, bool update_speculative)
cgraph_edge *
symbol_table::create_edge (cgraph_node *caller, cgraph_node *callee,
- gcall *call_stmt, profile_count count, int freq,
+ gcall *call_stmt, profile_count count,
bool indir_unknown_callee)
{
cgraph_edge *edge;
@@ -862,10 +862,7 @@ symbol_table::create_edge (cgraph_node *caller, cgraph_node *callee,
edge->next_callee = NULL;
edge->lto_stmt_uid = 0;
- edge->count = count.ipa ();
- edge->frequency = freq;
- gcc_checking_assert (freq >= 0);
- gcc_checking_assert (freq <= CGRAPH_FREQ_MAX);
+ edge->count = count;
edge->call_stmt = call_stmt;
push_cfun (DECL_STRUCT_FUNCTION (caller->decl));
@@ -907,10 +904,10 @@ symbol_table::create_edge (cgraph_node *caller, cgraph_node *callee,
cgraph_edge *
cgraph_node::create_edge (cgraph_node *callee,
- gcall *call_stmt, profile_count count, int freq)
+ gcall *call_stmt, profile_count count)
{
cgraph_edge *edge = symtab->create_edge (this, callee, call_stmt, count,
- freq, false);
+ false);
initialize_inline_failed (edge);
@@ -944,11 +941,11 @@ cgraph_allocate_init_indirect_info (void)
cgraph_edge *
cgraph_node::create_indirect_edge (gcall *call_stmt, int ecf_flags,
- profile_count count, int freq,
+ profile_count count,
bool compute_indirect_info)
{
cgraph_edge *edge = symtab->create_edge (this, NULL, call_stmt,
- count, freq, true);
+ count, true);
tree target;
initialize_inline_failed (edge);
@@ -1060,8 +1057,7 @@ cgraph_edge::remove (void)
Return direct edge created. */
cgraph_edge *
-cgraph_edge::make_speculative (cgraph_node *n2, profile_count direct_count,
- int direct_frequency)
+cgraph_edge::make_speculative (cgraph_node *n2, profile_count direct_count)
{
cgraph_node *n = caller;
ipa_ref *ref = NULL;
@@ -1071,7 +1067,7 @@ cgraph_edge::make_speculative (cgraph_node *n2, profile_count direct_count,
fprintf (dump_file, "Indirect call -> speculative call %s => %s\n",
n->dump_name (), n2->dump_name ());
speculative = true;
- e2 = n->create_edge (n2, call_stmt, direct_count, direct_frequency);
+ e2 = n->create_edge (n2, call_stmt, direct_count);
initialize_inline_failed (e2);
e2->speculative = true;
if (TREE_NOTHROW (n2->decl))
@@ -1081,7 +1077,6 @@ cgraph_edge::make_speculative (cgraph_node *n2, profile_count direct_count,
e2->lto_stmt_uid = lto_stmt_uid;
e2->in_polymorphic_cdtor = in_polymorphic_cdtor;
count -= e2->count;
- frequency -= e2->frequency;
symtab->call_edge_duplication_hooks (this, e2);
ref = n->create_reference (n2, IPA_REF_ADDR, call_stmt);
ref->lto_stmt_uid = lto_stmt_uid;
@@ -1198,9 +1193,6 @@ cgraph_edge::resolve_speculation (tree callee_decl)
in the functions inlined through it. */
}
edge->count += e2->count;
- edge->frequency += e2->frequency;
- if (edge->frequency > CGRAPH_FREQ_MAX)
- edge->frequency = CGRAPH_FREQ_MAX;
edge->speculative = false;
e2->speculative = false;
ref->remove_reference ();
@@ -1308,9 +1300,7 @@ cgraph_edge::redirect_call_stmt_to_callee (void)
/* We are producing the final function body and will throw away the
callgraph edges really soon. Reset the counts/frequencies to
keep verifier happy in the case of roundoff errors. */
- e->count = gimple_bb (e->call_stmt)->count.ipa ();
- e->frequency = compute_call_stmt_bb_frequency
- (e->caller->decl, gimple_bb (e->call_stmt));
+ e->count = gimple_bb (e->call_stmt)->count;
}
/* Expand speculation into GIMPLE code. */
else
@@ -1329,12 +1319,7 @@ cgraph_edge::redirect_call_stmt_to_callee (void)
profile_probability prob = e->count.probability_in (e->count
+ e2->count);
- if (prob.initialized_p ())
- ;
- else if (e->frequency || e2->frequency)
- prob = profile_probability::probability_in_gcov_type
- (e->frequency, e->frequency + e2->frequency).guessed ();
- else
+ if (!prob.initialized_p ())
prob = profile_probability::even ();
new_stmt = gimple_ic (e->call_stmt,
dyn_cast<cgraph_node *> (ref->referred),
@@ -1355,24 +1340,11 @@ cgraph_edge::redirect_call_stmt_to_callee (void)
gcall *ibndret = chkp_retbnd_call_by_val (iresult);
struct cgraph_edge *iedge
= e2->caller->cgraph_node::get_edge (ibndret);
- struct cgraph_edge *dedge;
if (dbndret)
- {
- dedge = iedge->caller->create_edge (iedge->callee,
- dbndret, e->count,
- e->frequency);
- dedge->frequency = compute_call_stmt_bb_frequency
- (dedge->caller->decl, gimple_bb (dedge->call_stmt));
- }
- iedge->frequency = compute_call_stmt_bb_frequency
- (iedge->caller->decl, gimple_bb (iedge->call_stmt));
+ iedge->caller->create_edge (iedge->callee, dbndret, e->count);
}
- e->frequency = compute_call_stmt_bb_frequency
- (e->caller->decl, gimple_bb (e->call_stmt));
- e2->frequency = compute_call_stmt_bb_frequency
- (e2->caller->decl, gimple_bb (e2->call_stmt));
e2->speculative = false;
ref->speculative = false;
ref->stmt = NULL;
@@ -1610,7 +1582,6 @@ cgraph_update_edges_for_call_stmt_node (cgraph_node *node,
cgraph_edge *e = node->get_edge (old_stmt);
cgraph_edge *ne = NULL;
profile_count count;
- int frequency;
if (e)
{
@@ -1644,8 +1615,7 @@ cgraph_update_edges_for_call_stmt_node (cgraph_node *node,
/* Otherwise remove edge and create new one; we can't simply redirect
since function has changed, so inline plan and other information
attached to edge is invalid. */
- count = e->count.ipa ();
- frequency = e->frequency;
+ count = e->count;
if (e->indirect_unknown_callee || e->inline_failed)
e->remove ();
else
@@ -1655,16 +1625,13 @@ cgraph_update_edges_for_call_stmt_node (cgraph_node *node,
{
/* We are seeing new direct call; compute profile info based on BB. */
basic_block bb = gimple_bb (new_stmt);
- count = bb->count.ipa ();
- frequency = compute_call_stmt_bb_frequency (current_function_decl,
- bb);
+ count = bb->count;
}
if (new_call)
{
ne = node->create_edge (cgraph_node::get_create (new_call),
- as_a <gcall *> (new_stmt), count,
- frequency);
+ as_a <gcall *> (new_stmt), count);
gcc_assert (ne->inline_failed);
}
}
@@ -2056,10 +2023,9 @@ cgraph_edge::dump_edge_flags (FILE *f)
{
fprintf (f, "(");
count.dump (f);
- fprintf (f, ")");
+ fprintf (f, ",");
+ fprintf (f, "%.2f per call) ", frequency () / (double)CGRAPH_FREQ_BASE);
}
- if (frequency)
- fprintf (f, "(%.2f per call) ", frequency / (double)CGRAPH_FREQ_BASE);
if (can_throw_external)
fprintf (f, "(can throw external) ");
}
@@ -2205,7 +2171,7 @@ cgraph_node::dump (FILE *f)
}
fprintf (f, "\n");
- if (count.initialized_p ())
+ if (count.ipa ().initialized_p ())
{
bool ok = true;
bool min = false;
@@ -2213,14 +2179,14 @@ cgraph_node::dump (FILE *f)
FOR_EACH_ALIAS (this, ref)
if (dyn_cast <cgraph_node *> (ref->referring)->count.initialized_p ())
- sum += dyn_cast <cgraph_node *> (ref->referring)->count;
+ sum += dyn_cast <cgraph_node *> (ref->referring)->count.ipa ();
if (global.inlined_to
|| (symtab->state < EXPANSION
&& ultimate_alias_target () == this && only_called_directly_p ()))
ok = !count.differs_from_p (sum);
- else if (count > profile_count::from_gcov_type (100)
- && count < sum.apply_scale (99, 100))
+ else if (count.ipa () > profile_count::from_gcov_type (100)
+ && count.ipa () < sum.apply_scale (99, 100))
ok = false, min = true;
if (!ok)
{
@@ -2826,7 +2792,7 @@ cgraph_edge::cannot_lead_to_return_p (void)
bool
cgraph_edge::maybe_hot_p (void)
{
- if (!maybe_hot_count_p (NULL, count))
+ if (!maybe_hot_count_p (NULL, count.ipa ()))
return false;
if (caller->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED
|| (callee
@@ -2845,12 +2811,12 @@ cgraph_edge::maybe_hot_p (void)
if (symtab->state < IPA_SSA)
return true;
if (caller->frequency == NODE_FREQUENCY_EXECUTED_ONCE
- && frequency < CGRAPH_FREQ_BASE * 3 / 2)
+ && frequency () < CGRAPH_FREQ_BASE * 3 / 2)
return false;
if (opt_for_fn (caller->decl, flag_guess_branch_prob))
{
if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0
- || frequency <= (CGRAPH_FREQ_BASE
+ || frequency () <= (CGRAPH_FREQ_BASE
/ PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
return false;
}
@@ -3079,7 +3045,7 @@ clone_of_p (cgraph_node *node, cgraph_node *node2)
/* Verify edge count and frequency. */
bool
-cgraph_edge::verify_count_and_frequency ()
+cgraph_edge::verify_count ()
{
bool error_found = false;
if (!count.verify ())
@@ -3087,21 +3053,6 @@ cgraph_edge::verify_count_and_frequency ()
error ("caller edge count invalid");
error_found = true;
}
- if (count.initialized_p () && !(count.ipa () == count))
- {
- error ("caller edge count is local");
- error_found = true;
- }
- if (frequency < 0)
- {
- error ("caller edge frequency is negative");
- error_found = true;
- }
- if (frequency > CGRAPH_FREQ_MAX)
- {
- error ("caller edge frequency is too large");
- error_found = true;
- }
return error_found;
}
@@ -3193,11 +3144,6 @@ cgraph_node::verify_node (void)
error ("cgraph count invalid");
error_found = true;
}
- if (count.initialized_p () && !(count.ipa () == count))
- {
- error ("cgraph count is local");
- error_found = true;
- }
if (global.inlined_to && same_comdat_group)
{
error ("inline clone in same comdat group list");
@@ -3244,7 +3190,7 @@ cgraph_node::verify_node (void)
bool check_comdat = comdat_local_p ();
for (e = callers; e; e = e->next_caller)
{
- if (e->verify_count_and_frequency ())
+ if (e->verify_count ())
error_found = true;
if (check_comdat
&& !in_same_comdat_group_p (e->caller))
@@ -3277,46 +3223,49 @@ cgraph_node::verify_node (void)
}
for (e = callees; e; e = e->next_callee)
{
- if (e->verify_count_and_frequency ())
+ if (e->verify_count ())
error_found = true;
- /* FIXME: re-enable once cgraph is converted to counts. */
if (gimple_has_body_p (e->caller->decl)
- && 0
&& !e->caller->global.inlined_to
&& !e->speculative
/* Optimized out calls are redirected to __builtin_unreachable. */
- && (e->frequency
+ && (e->count.nonzero_p ()
|| ! e->callee->decl
|| DECL_BUILT_IN_CLASS (e->callee->decl) != BUILT_IN_NORMAL
|| DECL_FUNCTION_CODE (e->callee->decl) != BUILT_IN_UNREACHABLE)
- && (e->frequency
- != compute_call_stmt_bb_frequency (e->caller->decl,
- gimple_bb (e->call_stmt))))
+ && count
+ == ENTRY_BLOCK_PTR_FOR_FN (DECL_STRUCT_FUNCTION (decl))->count
+ && (!e->count.ipa_p ()
+ && e->count.differs_from_p (gimple_bb (e->call_stmt)->count)))
{
- error ("caller edge frequency %i does not match BB frequency %i",
- e->frequency,
- compute_call_stmt_bb_frequency (e->caller->decl,
- gimple_bb (e->call_stmt)));
+ error ("caller edge count does not match BB count");
+ fprintf (stderr, "edge count: ");
+ e->count.dump (stderr);
+ fprintf (stderr, "\n bb count: ");
+ gimple_bb (e->call_stmt)->count.dump (stderr);
+ fprintf (stderr, "\n");
error_found = true;
}
}
for (e = indirect_calls; e; e = e->next_callee)
{
- if (e->verify_count_and_frequency ())
+ if (e->verify_count ())
error_found = true;
- /* FIXME: re-enable once cgraph is converted to counts. */
if (gimple_has_body_p (e->caller->decl)
&& !e->caller->global.inlined_to
&& !e->speculative
- && 0
- && (e->frequency
- != compute_call_stmt_bb_frequency (e->caller->decl,
- gimple_bb (e->call_stmt))))
+ && e->count.ipa_p ()
+ && count
+ == ENTRY_BLOCK_PTR_FOR_FN (DECL_STRUCT_FUNCTION (decl))->count
+ && (!e->count.ipa_p ()
+ && e->count.differs_from_p (gimple_bb (e->call_stmt)->count)))
{
- error ("indirect call frequency %i does not match BB frequency %i",
- e->frequency,
- compute_call_stmt_bb_frequency (e->caller->decl,
- gimple_bb (e->call_stmt)));
+ error ("indirect call count does not match BB count");
+ fprintf (stderr, "edge count: ");
+ e->count.dump (stderr);
+ fprintf (stderr, "\n bb count: ");
+ gimple_bb (e->call_stmt)->count.dump (stderr);
+ fprintf (stderr, "\n");
error_found = true;
}
}
@@ -3931,4 +3880,16 @@ cgraph_node::has_thunk_p (cgraph_node *node, void *)
return false;
}
+/* Expected frequency of executions within the function.
+ When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
+ per function call. The range is 0 to CGRAPH_FREQ_MAX. */
+
+sreal
+cgraph_edge::sreal_frequency ()
+{
+ return count.to_sreal_scale (caller->global.inlined_to
+ ? caller->global.inlined_to->count
+ : caller->count);
+}
+
#include "gt-cgraph.h"
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index 84824e9f814..1c952eb5094 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -942,7 +942,7 @@ public:
All hooks will see this in node's global.inlined_to, when invoked.
Can be NULL if the node is not inlined. SUFFIX is string that is appended
to the original name. */
- cgraph_node *create_clone (tree decl, profile_count count, int freq,
+ cgraph_node *create_clone (tree decl, profile_count count,
bool update_original,
vec<cgraph_edge *> redirect_callers,
bool call_duplication_hook,
@@ -1110,14 +1110,13 @@ public:
/* Create edge from a given function to CALLEE in the cgraph. */
cgraph_edge *create_edge (cgraph_node *callee,
- gcall *call_stmt, profile_count count,
- int freq);
+ gcall *call_stmt, profile_count count);
/* Create an indirect edge with a yet-undetermined callee where the call
statement destination is a formal parameter of the caller with index
PARAM_INDEX. */
cgraph_edge *create_indirect_edge (gcall *call_stmt, int ecf_flags,
- profile_count count, int freq,
+ profile_count count,
bool compute_indirect_info = true);
/* Like cgraph_create_edge walk the clone tree and update all clones sharing
@@ -1126,7 +1125,6 @@ public:
void create_edge_including_clones (cgraph_node *callee,
gimple *old_stmt, gcall *stmt,
profile_count count,
- int freq,
cgraph_inline_failed_t reason);
/* Return the callgraph edge representing the GIMPLE_CALL statement
@@ -1665,8 +1663,7 @@ struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"),
/* Turn edge into speculative call calling N2. Update
the profile so the direct call is taken COUNT times
with FREQUENCY. */
- cgraph_edge *make_speculative (cgraph_node *n2, profile_count direct_count,
- int direct_frequency);
+ cgraph_edge *make_speculative (cgraph_node *n2, profile_count direct_count);
/* Given speculative call edge, return all three components. */
void speculative_call_info (cgraph_edge *&direct, cgraph_edge *&indirect,
@@ -1684,11 +1681,11 @@ struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"),
/* Create clone of edge in the node N represented
by CALL_EXPR the callgraph. */
cgraph_edge * clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
- profile_count num, profile_count den, int freq_scale,
+ profile_count num, profile_count den,
bool update_original);
/* Verify edge count and frequency. */
- bool verify_count_and_frequency ();
+ bool verify_count ();
/* Return true when call of edge can not lead to return from caller
and thus it is safe to ignore its side effects for IPA analysis
@@ -1728,10 +1725,6 @@ struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"),
/* The stmt_uid of call_stmt. This is used by LTO to recover the call_stmt
when the function is serialized in. */
unsigned int lto_stmt_uid;
- /* Expected frequency of executions within the function.
- When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
- per function call. The range is 0 to CGRAPH_FREQ_MAX. */
- int frequency;
/* Unique id of the edge. */
int uid;
/* Whether this edge was made direct by indirect inlining. */
@@ -1769,6 +1762,13 @@ struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"),
/* Return true if call must bind to current definition. */
bool binds_to_current_def_p ();
+ /* Expected frequency of executions within the function.
+ When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
+ per function call. The range is 0 to CGRAPH_FREQ_MAX. */
+ int frequency ();
+
+ /* Expected frequency of executions within the function. */
+ sreal sreal_frequency ();
private:
/* Remove the edge from the list of the callers of the callee. */
void remove_caller (void);
@@ -2287,7 +2287,7 @@ private:
parameters of which only CALLEE can be NULL (when creating an indirect call
edge). */
cgraph_edge *create_edge (cgraph_node *caller, cgraph_node *callee,
- gcall *call_stmt, profile_count count, int freq,
+ gcall *call_stmt, profile_count count,
bool indir_unknown_callee);
/* Put the edge onto the free list. */
@@ -3111,6 +3111,19 @@ cgraph_edge::binds_to_current_def_p ()
return false;
}
+/* Expected frequency of executions within the function.
+ When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
+ per function call. The range is 0 to CGRAPH_FREQ_MAX. */
+
+inline int
+cgraph_edge::frequency ()
+{
+ return count.to_cgraph_frequency (caller->global.inlined_to
+ ? caller->global.inlined_to->count
+ : caller->count);
+}
+
+
/* Return true if the TM_CLONE bit is set for a given FNDECL. */
static inline bool
decl_is_tm_clone (const_tree fndecl)
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index dd4bf9a7fa3..efb333cbae0 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -317,17 +317,15 @@ pass_build_cgraph_edges::execute (function *fun)
if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
{
- int freq = compute_call_stmt_bb_frequency (current_function_decl,
- bb);
decl = gimple_call_fndecl (call_stmt);
if (decl)
- node->create_edge (cgraph_node::get_create (decl), call_stmt, bb->count, freq);
+ node->create_edge (cgraph_node::get_create (decl), call_stmt, bb->count);
else if (gimple_call_internal_p (call_stmt))
;
else
node->create_indirect_edge (call_stmt,
gimple_call_flags (call_stmt),
- bb->count, freq);
+ bb->count);
}
node->record_stmt_references (stmt);
if (gomp_parallel *omp_par_stmt = dyn_cast <gomp_parallel *> (stmt))
@@ -402,7 +400,7 @@ cgraph_edge::rebuild_edges (void)
node->remove_callees ();
node->remove_all_references ();
- node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ();
+ node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
FOR_EACH_BB_FN (bb, cfun)
{
@@ -413,18 +411,16 @@ cgraph_edge::rebuild_edges (void)
if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
{
- int freq = compute_call_stmt_bb_frequency (current_function_decl,
- bb);
decl = gimple_call_fndecl (call_stmt);
if (decl)
node->create_edge (cgraph_node::get_create (decl), call_stmt,
- bb->count, freq);
+ bb->count);
else if (gimple_call_internal_p (call_stmt))
;
else
node->create_indirect_edge (call_stmt,
gimple_call_flags (call_stmt),
- bb->count, freq);
+ bb->count);
}
node->record_stmt_references (stmt);
}
diff --git a/gcc/cgraphclones.c b/gcc/cgraphclones.c
index 6513aa768be..a575a34b0c6 100644
--- a/gcc/cgraphclones.c
+++ b/gcc/cgraphclones.c
@@ -87,20 +87,11 @@ along with GCC; see the file COPYING3. If not see
cgraph_edge *
cgraph_edge::clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
profile_count num, profile_count den,
- int freq_scale, bool update_original)
+ bool update_original)
{
cgraph_edge *new_edge;
- profile_count gcov_count
- = (num == profile_count::zero () || den > 0)
- ? count.apply_scale (num, den) : count;
- gcov_type freq;
-
- /* We do not want to ignore loop nest after frequency drops to 0. */
- if (!freq_scale)
- freq_scale = 1;
- freq = frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
- if (freq > CGRAPH_FREQ_MAX)
- freq = CGRAPH_FREQ_MAX;
+ profile_count::adjust_for_ipa_scaling (&num, &den);
+ profile_count gcov_count = count.apply_scale (num, den);
if (indirect_unknown_callee)
{
@@ -113,19 +104,19 @@ cgraph_edge::clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
{
cgraph_node *callee = cgraph_node::get (decl);
gcc_checking_assert (callee);
- new_edge = n->create_edge (callee, call_stmt, gcov_count, freq);
+ new_edge = n->create_edge (callee, call_stmt, gcov_count);
}
else
{
new_edge = n->create_indirect_edge (call_stmt,
indirect_info->ecf_flags,
- gcov_count, freq, false);
+ gcov_count, false);
*new_edge->indirect_info = *indirect_info;
}
}
else
{
- new_edge = n->create_edge (callee, call_stmt, gcov_count, freq);
+ new_edge = n->create_edge (callee, call_stmt, gcov_count);
if (indirect_info)
{
new_edge->indirect_info
@@ -142,10 +133,14 @@ cgraph_edge::clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
new_edge->call_stmt_cannot_inline_p = call_stmt_cannot_inline_p;
new_edge->speculative = speculative;
new_edge->in_polymorphic_cdtor = in_polymorphic_cdtor;
- if (update_original)
- {
- count -= new_edge->count;
- }
+
+ /* Update IPA profile. Local profiles need no updating in original. */
+ if (update_original
+ && count.ipa () == count && new_edge->count.ipa () == new_edge->count)
+ count -= new_edge->count;
+ else if (caller->count.global0 () == caller->count
+ && !(count == profile_count::zero ()))
+ count = count.global0 ();
symtab->call_edge_duplication_hooks (this, new_edge);
return new_edge;
}
@@ -337,8 +332,7 @@ duplicate_thunk_for_node (cgraph_node *thunk, cgraph_node *node)
new_thunk->clone.args_to_skip = node->clone.args_to_skip;
new_thunk->clone.combined_args_to_skip = node->clone.combined_args_to_skip;
- cgraph_edge *e = new_thunk->create_edge (node, NULL, new_thunk->count,
- CGRAPH_FREQ_BASE);
+ cgraph_edge *e = new_thunk->create_edge (node, NULL, new_thunk->count);
symtab->call_edge_duplication_hooks (thunk->callees, e);
symtab->call_cgraph_duplication_hooks (thunk, new_thunk);
return new_thunk;
@@ -422,7 +416,7 @@ dump_callgraph_transformation (const cgraph_node *original,
node is not inlined. */
cgraph_node *
-cgraph_node::create_clone (tree new_decl, profile_count prof_count, int freq,
+cgraph_node::create_clone (tree new_decl, profile_count prof_count,
bool update_original,
vec<cgraph_edge *> redirect_callers,
bool call_duplication_hook,
@@ -432,11 +426,27 @@ cgraph_node::create_clone (tree new_decl, profile_count prof_count, int freq,
cgraph_node *new_node = symtab->create_empty ();
cgraph_edge *e;
unsigned i;
+ profile_count old_count = count;
if (new_inlined_to)
dump_callgraph_transformation (this, new_inlined_to, "inlining to");
+ if (prof_count == profile_count::zero ()
+ && !(count == profile_count::zero ()))
+ prof_count = count.global0 ();
+
new_node->count = prof_count;
+
+ /* Update IPA profile. Local profiles need no updating in original. */
+ if (update_original && !(count == profile_count::zero ())
+ && count.ipa () == count && prof_count.ipa () == prof_count)
+ {
+ if (count.nonzero_p ()
+ && !(count - prof_count).nonzero_p ())
+ count = count.global0 ();
+ else
+ count -= prof_count;
+ }
new_node->decl = new_decl;
new_node->register_symbol ();
new_node->origin = origin;
@@ -489,12 +499,12 @@ cgraph_node::create_clone (tree new_decl, profile_count prof_count, int freq,
new_node->expand_all_artificial_thunks ();
for (e = callees;e; e=e->next_callee)
- e->clone (new_node, e->call_stmt, e->lto_stmt_uid, new_node->count, count,
- freq, update_original);
+ e->clone (new_node, e->call_stmt, e->lto_stmt_uid, new_node->count, old_count,
+ update_original);
for (e = indirect_calls; e; e = e->next_callee)
e->clone (new_node, e->call_stmt, e->lto_stmt_uid,
- new_node->count, count, freq, update_original);
+ new_node->count, old_count, update_original);
new_node->clone_references (this);
new_node->next_sibling_clone = clones;
@@ -503,9 +513,6 @@ cgraph_node::create_clone (tree new_decl, profile_count prof_count, int freq,
clones = new_node;
new_node->clone_of = this;
- if (update_original)
- count -= prof_count;
-
if (call_duplication_hook)
symtab->call_cgraph_duplication_hooks (this, new_node);
@@ -591,7 +598,7 @@ cgraph_node::create_virtual_clone (vec<cgraph_edge *> redirect_callers,
SET_DECL_ASSEMBLER_NAME (new_decl, clone_function_name (old_decl, suffix));
SET_DECL_RTL (new_decl, NULL);
- new_node = create_clone (new_decl, count, CGRAPH_FREQ_BASE, false,
+ new_node = create_clone (new_decl, count, false,
redirect_callers, false, NULL, args_to_skip, suffix);
/* Update the properties.
@@ -773,7 +780,6 @@ void
cgraph_node::create_edge_including_clones (cgraph_node *callee,
gimple *old_stmt, gcall *stmt,
profile_count count,
- int freq,
cgraph_inline_failed_t reason)
{
cgraph_node *node;
@@ -781,7 +787,7 @@ cgraph_node::create_edge_including_clones (cgraph_node *callee,
if (!get_edge (stmt))
{
- edge = create_edge (callee, stmt, count, freq);
+ edge = create_edge (callee, stmt, count);
edge->inline_failed = reason;
}
@@ -801,7 +807,7 @@ cgraph_node::create_edge_including_clones (cgraph_node *callee,
edge->set_call_stmt (stmt);
else if (! node->get_edge (stmt))
{
- edge = node->create_edge (callee, stmt, count, freq);
+ edge = node->create_edge (callee, stmt, count);
edge->inline_failed = reason;
}
@@ -904,14 +910,12 @@ cgraph_node::create_version_clone (tree new_decl,
|| bitmap_bit_p (bbs_to_copy, gimple_bb (e->call_stmt)->index))
e->clone (new_version, e->call_stmt,
e->lto_stmt_uid, count, count,
- CGRAPH_FREQ_BASE,
true);
for (e = indirect_calls; e; e=e->next_callee)
if (!bbs_to_copy
|| bitmap_bit_p (bbs_to_copy, gimple_bb (e->call_stmt)->index))
e->clone (new_version, e->call_stmt,
e->lto_stmt_uid, count, count,
- CGRAPH_FREQ_BASE,
true);
FOR_EACH_VEC_ELT (redirect_callers, i, e)
{
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index c5183a02058..dec5c8b5736 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -620,7 +620,7 @@ cgraph_node::analyze (void)
{
cgraph_node *t = cgraph_node::get (thunk.alias);
- create_edge (t, NULL, t->count, CGRAPH_FREQ_BASE);
+ create_edge (t, NULL, t->count);
callees->can_throw_external = !TREE_NOTHROW (t->decl);
/* Target code in expand_thunk may need the thunk's target
to be analyzed, so recurse here. */
@@ -1950,7 +1950,7 @@ cgraph_node::expand_thunk (bool output_asm_thunks, bool force_gimple_thunk)
resbnd = chkp_insert_retbnd_call (NULL, restmp, &bsi);
create_edge (get_create (gimple_call_fndecl (gsi_stmt (bsi))),
as_a <gcall *> (gsi_stmt (bsi)),
- callees->count, callees->frequency);
+ callees->count);
}
if (restmp && !this_adjusting
@@ -2026,7 +2026,7 @@ cgraph_node::expand_thunk (bool output_asm_thunks, bool force_gimple_thunk)
}
cfun->gimple_df->in_ssa_p = true;
- counts_to_freqs ();
+ update_max_bb_count ();
profile_status_for_fn (cfun)
= cfg_count.initialized_p () && cfg_count.ipa_p ()
? PROFILE_READ : PROFILE_GUESSED;
@@ -2759,7 +2759,7 @@ cgraph_node::create_wrapper (cgraph_node *target)
memset (&thunk, 0, sizeof (cgraph_thunk_info));
thunk.thunk_p = true;
- create_edge (target, NULL, count, CGRAPH_FREQ_BASE);
+ create_edge (target, NULL, count);
callees->can_throw_external = !TREE_NOTHROW (target->decl);
tree arguments = DECL_ARGUMENTS (decl);
diff --git a/gcc/collect2.c b/gcc/collect2.c
index ddbd2be8bf8..d25b75697c0 100644
--- a/gcc/collect2.c
+++ b/gcc/collect2.c
@@ -614,7 +614,7 @@ static const char *const target_machine = TARGET_MACHINE;
Return 0 if not found, otherwise return its name, allocated with malloc. */
-#ifdef OBJECT_FORMAT_NONE
+#if defined (OBJECT_FORMAT_NONE) || defined (OBJECT_FORMAT_COFF)
/* Add an entry for the object file NAME to object file list LIST.
New entries are added at the end of the list. The original pointer
@@ -634,7 +634,7 @@ add_lto_object (struct lto_object_list *list, const char *name)
list->last = n;
}
-#endif /* OBJECT_FORMAT_NONE */
+#endif
/* Perform a link-time recompilation and relink if any of the object
@@ -2641,17 +2641,6 @@ scan_libraries (const char *prog_name)
#ifdef OBJECT_FORMAT_COFF
-#if defined (EXTENDED_COFF)
-
-# define GCC_SYMBOLS(X) (SYMHEADER (X).isymMax + SYMHEADER (X).iextMax)
-# define GCC_SYMENT SYMR
-# define GCC_OK_SYMBOL(X) ((X).st == stProc || (X).st == stGlobal)
-# define GCC_SYMINC(X) (1)
-# define GCC_SYMZERO(X) (SYMHEADER (X).isymMax)
-# define GCC_CHECK_HDR(X) (PSYMTAB (X) != 0)
-
-#else
-
# define GCC_SYMBOLS(X) (HEADER (ldptr).f_nsyms)
# define GCC_SYMENT SYMENT
# if defined (C_WEAKEXT)
@@ -2690,8 +2679,6 @@ scan_libraries (const char *prog_name)
&& !(HEADER (X).f_flags & F_LOADONLY))
#endif
-#endif
-
#ifdef COLLECT_EXPORT_LIST
/* Array of standard AIX libraries which should not
be scanned for ctors/dtors. */
@@ -2750,8 +2737,10 @@ scan_prog_file (const char *prog_name, scanpass which_pass,
LDFILE *ldptr = NULL;
int sym_index, sym_count;
int is_shared = 0;
+ int found_lto = 0;
- if (which_pass != PASS_FIRST && which_pass != PASS_OBJ)
+ if (which_pass != PASS_FIRST && which_pass != PASS_OBJ
+ && which_pass != PASS_LTOINFO)
return;
#ifdef COLLECT_EXPORT_LIST
@@ -2764,6 +2753,7 @@ scan_prog_file (const char *prog_name, scanpass which_pass,
eliminate scan_libraries() function. */
do
{
+ found_lto = 0;
#endif
/* Some platforms (e.g. OSF4) declare ldopen as taking a
non-const char * filename parameter, even though it will not
@@ -2806,6 +2796,19 @@ scan_prog_file (const char *prog_name, scanpass which_pass,
++name;
#endif
+ if (which_pass == PASS_LTOINFO)
+ {
+ if (found_lto)
+ continue;
+ if (strncmp (name, "__gnu_lto_v1", 12) == 0)
+ {
+ add_lto_object (&lto_objects, prog_name);
+ found_lto = 1;
+ break;
+ }
+ continue;
+ }
+
switch (is_ctor_dtor (name))
{
#if TARGET_AIX_VERSION
@@ -2904,16 +2907,10 @@ scan_prog_file (const char *prog_name, scanpass which_pass,
}
if (debug)
-#if !defined(EXTENDED_COFF)
fprintf (stderr, "\tsec=%d class=%d type=%s%o %s\n",
symbol.n_scnum, symbol.n_sclass,
(symbol.n_type ? "0" : ""), symbol.n_type,
name);
-#else
- fprintf (stderr,
- "\tiss = %5d, value = %5ld, index = %5d, name = %s\n",
- symbol.iss, (long) symbol.value, symbol.index, name);
-#endif
}
}
}
diff --git a/gcc/common/config/i386/i386-common.c b/gcc/common/config/i386/i386-common.c
index ada918e6f2a..b7a0ff5feb8 100644
--- a/gcc/common/config/i386/i386-common.c
+++ b/gcc/common/config/i386/i386-common.c
@@ -80,6 +80,7 @@ along with GCC; see the file COPYING3. If not see
(OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512BW_SET)
#define OPTION_MASK_ISA_AVX5124FMAPS_SET OPTION_MASK_ISA_AVX5124FMAPS
#define OPTION_MASK_ISA_AVX5124VNNIW_SET OPTION_MASK_ISA_AVX5124VNNIW
+#define OPTION_MASK_ISA_AVX512VBMI2_SET OPTION_MASK_ISA_AVX512VBMI2
#define OPTION_MASK_ISA_AVX512VPOPCNTDQ_SET OPTION_MASK_ISA_AVX512VPOPCNTDQ
#define OPTION_MASK_ISA_RTM_SET OPTION_MASK_ISA_RTM
#define OPTION_MASK_ISA_PRFCHW_SET OPTION_MASK_ISA_PRFCHW
@@ -191,6 +192,7 @@ along with GCC; see the file COPYING3. If not see
#define OPTION_MASK_ISA_AVX512VBMI_UNSET OPTION_MASK_ISA_AVX512VBMI
#define OPTION_MASK_ISA_AVX5124FMAPS_UNSET OPTION_MASK_ISA_AVX5124FMAPS
#define OPTION_MASK_ISA_AVX5124VNNIW_UNSET OPTION_MASK_ISA_AVX5124VNNIW
+#define OPTION_MASK_ISA_AVX512VBMI2_UNSET OPTION_MASK_ISA_AVX512VBMI2
#define OPTION_MASK_ISA_AVX512VPOPCNTDQ_UNSET OPTION_MASK_ISA_AVX512VPOPCNTDQ
#define OPTION_MASK_ISA_RTM_UNSET OPTION_MASK_ISA_RTM
#define OPTION_MASK_ISA_PRFCHW_UNSET OPTION_MASK_ISA_PRFCHW
@@ -242,8 +244,7 @@ along with GCC; see the file COPYING3. If not see
#define OPTION_MASK_ISA_GENERAL_REGS_ONLY_UNSET \
(OPTION_MASK_ISA_MMX_UNSET \
- | OPTION_MASK_ISA_SSE_UNSET \
- | OPTION_MASK_ISA_MPX)
+ | OPTION_MASK_ISA_SSE_UNSET)
/* Implement TARGET_HANDLE_OPTION. */
@@ -265,8 +266,12 @@ ix86_handle_option (struct gcc_options *opts,
general registers are allowed. */
opts->x_ix86_isa_flags
&= ~OPTION_MASK_ISA_GENERAL_REGS_ONLY_UNSET;
+ opts->x_ix86_isa_flags2
+ &= ~OPTION_MASK_ISA_MPX;
opts->x_ix86_isa_flags_explicit
|= OPTION_MASK_ISA_GENERAL_REGS_ONLY_UNSET;
+ opts->x_ix86_isa_flags2_explicit
+ |= OPTION_MASK_ISA_MPX;
opts->x_target_flags &= ~MASK_80387;
}
@@ -493,13 +498,13 @@ ix86_handle_option (struct gcc_options *opts,
case OPT_mgfni:
if (value)
{
- opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_GFNI_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA_GFNI_SET;
+ opts->x_ix86_isa_flags |= OPTION_MASK_ISA_GFNI_SET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_GFNI_SET;
}
else
{
- opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA_GFNI_UNSET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA_GFNI_UNSET;
+ opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_GFNI_UNSET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_GFNI_UNSET;
}
return true;
@@ -562,6 +567,21 @@ ix86_handle_option (struct gcc_options *opts,
}
return true;
+ case OPT_mavx512vbmi2:
+ if (value)
+ {
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_AVX512VBMI2_SET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA_AVX512VBMI2_SET;
+ opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512F_SET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX512F_SET;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA_AVX512VBMI2_UNSET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA_AVX512VBMI2_UNSET;
+ }
+ return true;
+
case OPT_mavx512vpopcntdq:
if (value)
{
diff --git a/gcc/compare-elim.c b/gcc/compare-elim.c
index 17d08842d15..b34a07f8336 100644
--- a/gcc/compare-elim.c
+++ b/gcc/compare-elim.c
@@ -683,6 +683,8 @@ try_merge_compare (struct comparison *cmp)
rtx_insn *def_insn = cmp->in_a_setter;
rtx set = single_set (def_insn);
+ if (!set)
+ return false;
if (!can_merge_compare_into_arith (cmp_insn, def_insn))
return false;
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 3dace854c95..8ee8e8c7c8b 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -860,8 +860,8 @@ case ${target} in
sol2_tm_file_tail="${cpu_type}/sol2.h sol2.h"
sol2_tm_file="${sol2_tm_file_head} ${sol2_tm_file_tail}"
case ${target} in
- *-*-solaris2.1[2-9]*)
- # __cxa_atexit was introduced in Solaris 12.
+ *-*-solaris2.1[1-9]*)
+ # __cxa_atexit was introduced in Solaris 11.4.
default_use_cxa_atexit=yes
;;
esac
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
index 11bbdfcb55e..0680404f7d4 100644
--- a/gcc/config/aarch64/aarch64-modes.def
+++ b/gcc/config/aarch64/aarch64-modes.def
@@ -31,20 +31,20 @@ ADJUST_FLOAT_FORMAT (HF, &ieee_half_format);
/* Vector modes. */
-VECTOR_BOOL_MODE (32, 4);
-VECTOR_BOOL_MODE (16, 4);
-VECTOR_BOOL_MODE (8, 4);
-VECTOR_BOOL_MODE (4, 4);
+VECTOR_BOOL_MODE (VNx16BI, 16, 2);
+VECTOR_BOOL_MODE (VNx8BI, 8, 2);
+VECTOR_BOOL_MODE (VNx4BI, 4, 2);
+VECTOR_BOOL_MODE (VNx2BI, 2, 2);
-ADJUST_NUNITS (V32BI, aarch64_sve_vg * 8);
-ADJUST_NUNITS (V16BI, aarch64_sve_vg * 4);
-ADJUST_NUNITS (V8BI, aarch64_sve_vg * 2);
-ADJUST_NUNITS (V4BI, aarch64_sve_vg);
+ADJUST_NUNITS (VNx16BI, aarch64_sve_vg * 8);
+ADJUST_NUNITS (VNx8BI, aarch64_sve_vg * 4);
+ADJUST_NUNITS (VNx4BI, aarch64_sve_vg * 2);
+ADJUST_NUNITS (VNx2BI, aarch64_sve_vg);
-ADJUST_ALIGNMENT (V32BI, 2);
-ADJUST_ALIGNMENT (V16BI, 2);
-ADJUST_ALIGNMENT (V8BI, 2);
-ADJUST_ALIGNMENT (V4BI, 2);
+ADJUST_ALIGNMENT (VNx16BI, 2);
+ADJUST_ALIGNMENT (VNx8BI, 2);
+ADJUST_ALIGNMENT (VNx4BI, 2);
+ADJUST_ALIGNMENT (VNx2BI, 2);
VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
@@ -65,8 +65,8 @@ INT_MODE (XI, 64);
strictly necessary to set the alignment here, since the default would
be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */
#define SVE_MODES(NVECS, VB, VH, VS, VD) \
- VECTOR_MODES (INT, 32 * NVECS); \
- VECTOR_MODES (FLOAT, 32 * NVECS); \
+ VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS); \
+ VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS); \
\
ADJUST_NUNITS (VB##QI, aarch64_sve_vg * NVECS * 8); \
ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \
@@ -86,10 +86,10 @@ INT_MODE (XI, 64);
/* Give SVE vectors the names normally used for 256-bit vectors.
The actual number depends on command-line flags. */
-SVE_MODES (1, V32, V16, V8, V4)
-SVE_MODES (2, V64, V32, V16, V8)
-SVE_MODES (3, V96, V48, V24, V12)
-SVE_MODES (4, V128, V64, V32, V16)
+SVE_MODES (1, VNx16, VNx8, VNx4, VNx2)
+SVE_MODES (2, VNx32, VNx16, VNx8, VNx4)
+SVE_MODES (3, VNx48, VNx24, VNx12, VNx6)
+SVE_MODES (4, VNx64, VNx32, VNx16, VNx8)
/* Quad float: 128-bit floating mode for long doubles. */
FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index fcc49e3a2f8..4311b1814e0 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -172,7 +172,7 @@
(vec_select:<VEL> (match_operand:VALL_F16 1 "register_operand" "w")
(parallel [(match_operand 2 "const_int_operand" "n")])))]
"TARGET_SIMD
- && ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])) == 0"
+ && ENDIAN_LANE_N (<nunits>, INTVAL (operands[2])) == 0"
"str\\t%<Vetype>1, %0"
[(set_attr "type" "neon_store1_1reg<q>")]
)
@@ -449,8 +449,7 @@
DOTPROD)))]
"TARGET_DOTPROD"
{
- operands[4]
- = GEN_INT (ENDIAN_LANE_N (V8QImode, INTVAL (operands[4])));
+ operands[4] = aarch64_endian_lane_rtx (V8QImode, INTVAL (operands[4]));
return "<sur>dot\\t%0.<Vtype>, %2.<Vdottype>, %3.4b[%4]";
}
[(set_attr "type" "neon_dot")]
@@ -465,8 +464,7 @@
DOTPROD)))]
"TARGET_DOTPROD"
{
- operands[4]
- = GEN_INT (ENDIAN_LANE_N (V16QImode, INTVAL (operands[4])));
+ operands[4] = aarch64_endian_lane_rtx (V16QImode, INTVAL (operands[4]));
return "<sur>dot\\t%0.<Vtype>, %2.<Vdottype>, %3.4b[%4]";
}
[(set_attr "type" "neon_dot")]
@@ -733,9 +731,9 @@
(match_operand:SI 2 "immediate_operand" "i")))]
"TARGET_SIMD"
{
- int elt = ENDIAN_LANE_N (<MODE>mode, exact_log2 (INTVAL (operands[2])));
+ int elt = ENDIAN_LANE_N (<nunits>, exact_log2 (INTVAL (operands[2])));
operands[2] = GEN_INT (HOST_WIDE_INT_1 << elt);
- operands[4] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[4])));
+ operands[4] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[4]));
return "ins\t%0.<Vetype>[%p2], %3.<Vetype>[%4]";
}
@@ -754,10 +752,10 @@
(match_operand:SI 2 "immediate_operand" "i")))]
"TARGET_SIMD"
{
- int elt = ENDIAN_LANE_N (<MODE>mode, exact_log2 (INTVAL (operands[2])));
+ int elt = ENDIAN_LANE_N (<nunits>, exact_log2 (INTVAL (operands[2])));
operands[2] = GEN_INT (HOST_WIDE_INT_1 << elt);
- operands[4] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
- INTVAL (operands[4])));
+ operands[4] = aarch64_endian_lane_rtx (<VSWAP_WIDTH>mode,
+ INTVAL (operands[4]));
return "ins\t%0.<Vetype>[%p2], %3.<Vetype>[%4]";
}
@@ -2420,13 +2418,13 @@
;; in *aarch64_simd_bsl<mode>_alt.
(define_insn "aarch64_simd_bsl<mode>_internal"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w,w,w")
- (xor:VSDQ_I_DI
- (and:VSDQ_I_DI
- (xor:VSDQ_I_DI
+ [(set (match_operand:VDQ_I 0 "register_operand" "=w,w,w")
+ (xor:VDQ_I
+ (and:VDQ_I
+ (xor:VDQ_I
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w,0,w")
- (match_operand:VSDQ_I_DI 2 "register_operand" "w,w,0"))
- (match_operand:VSDQ_I_DI 1 "register_operand" "0,w,w"))
+ (match_operand:VDQ_I 2 "register_operand" "w,w,0"))
+ (match_operand:VDQ_I 1 "register_operand" "0,w,w"))
(match_dup:<V_INT_EQUIV> 3)
))]
"TARGET_SIMD"
@@ -2444,14 +2442,14 @@
;; permutations of commutative operations, we have to have a separate pattern.
(define_insn "*aarch64_simd_bsl<mode>_alt"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w,w,w")
- (xor:VSDQ_I_DI
- (and:VSDQ_I_DI
- (xor:VSDQ_I_DI
- (match_operand:VSDQ_I_DI 3 "register_operand" "w,w,0")
- (match_operand:VSDQ_I_DI 2 "register_operand" "w,0,w"))
- (match_operand:VSDQ_I_DI 1 "register_operand" "0,w,w"))
- (match_dup:VSDQ_I_DI 2)))]
+ [(set (match_operand:VDQ_I 0 "register_operand" "=w,w,w")
+ (xor:VDQ_I
+ (and:VDQ_I
+ (xor:VDQ_I
+ (match_operand:VDQ_I 3 "register_operand" "w,w,0")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand" "w,0,w"))
+ (match_operand:VDQ_I 1 "register_operand" "0,w,w"))
+ (match_dup:<V_INT_EQUIV> 2)))]
"TARGET_SIMD"
"@
bsl\\t%0.<Vbtype>, %3.<Vbtype>, %2.<Vbtype>
@@ -2460,6 +2458,100 @@
[(set_attr "type" "neon_bsl<q>")]
)
+;; DImode is special, we want to avoid computing operations which are
+;; more naturally computed in general purpose registers in the vector
+;; registers. If we do that, we need to move all three operands from general
+;; purpose registers to vector registers, then back again. However, we
+;; don't want to make this pattern an UNSPEC as we'd lose scope for
+;; optimizations based on the component operations of a BSL.
+;;
+;; That means we need a splitter back to the individual operations, if they
+;; would be better calculated on the integer side.
+
+(define_insn_and_split "aarch64_simd_bsldi_internal"
+ [(set (match_operand:DI 0 "register_operand" "=w,w,w,&r")
+ (xor:DI
+ (and:DI
+ (xor:DI
+ (match_operand:DI 3 "register_operand" "w,0,w,r")
+ (match_operand:DI 2 "register_operand" "w,w,0,r"))
+ (match_operand:DI 1 "register_operand" "0,w,w,r"))
+ (match_dup:DI 3)
+ ))]
+ "TARGET_SIMD"
+ "@
+ bsl\\t%0.8b, %2.8b, %3.8b
+ bit\\t%0.8b, %2.8b, %1.8b
+ bif\\t%0.8b, %3.8b, %1.8b
+ #"
+ "&& GP_REGNUM_P (REGNO (operands[0]))"
+ [(match_dup 1) (match_dup 1) (match_dup 2) (match_dup 3)]
+{
+ /* Split back to individual operations. If we're before reload, and
+ able to create a temporary register, do so. If we're after reload,
+ we've got an early-clobber destination register, so use that.
+ Otherwise, we can't create pseudos and we can't yet guarantee that
+ operands[0] is safe to write, so FAIL to split. */
+
+ rtx scratch;
+ if (reload_completed)
+ scratch = operands[0];
+ else if (can_create_pseudo_p ())
+ scratch = gen_reg_rtx (DImode);
+ else
+ FAIL;
+
+ emit_insn (gen_xordi3 (scratch, operands[2], operands[3]));
+ emit_insn (gen_anddi3 (scratch, scratch, operands[1]));
+ emit_insn (gen_xordi3 (operands[0], scratch, operands[3]));
+ DONE;
+}
+ [(set_attr "type" "neon_bsl,neon_bsl,neon_bsl,multiple")
+ (set_attr "length" "4,4,4,12")]
+)
+
+(define_insn_and_split "aarch64_simd_bsldi_alt"
+ [(set (match_operand:DI 0 "register_operand" "=w,w,w,&r")
+ (xor:DI
+ (and:DI
+ (xor:DI
+ (match_operand:DI 3 "register_operand" "w,w,0,r")
+ (match_operand:DI 2 "register_operand" "w,0,w,r"))
+ (match_operand:DI 1 "register_operand" "0,w,w,r"))
+ (match_dup:DI 2)
+ ))]
+ "TARGET_SIMD"
+ "@
+ bsl\\t%0.8b, %3.8b, %2.8b
+ bit\\t%0.8b, %3.8b, %1.8b
+ bif\\t%0.8b, %2.8b, %1.8b
+ #"
+ "&& GP_REGNUM_P (REGNO (operands[0]))"
+ [(match_dup 0) (match_dup 1) (match_dup 2) (match_dup 3)]
+{
+ /* Split back to individual operations. If we're before reload, and
+ able to create a temporary register, do so. If we're after reload,
+ we've got an early-clobber destination register, so use that.
+ Otherwise, we can't create pseudos and we can't yet guarantee that
+ operands[0] is safe to write, so FAIL to split. */
+
+ rtx scratch;
+ if (reload_completed)
+ scratch = operands[0];
+ else if (can_create_pseudo_p ())
+ scratch = gen_reg_rtx (DImode);
+ else
+ FAIL;
+
+ emit_insn (gen_xordi3 (scratch, operands[2], operands[3]));
+ emit_insn (gen_anddi3 (scratch, scratch, operands[1]));
+ emit_insn (gen_xordi3 (operands[0], scratch, operands[2]));
+ DONE;
+}
+ [(set_attr "type" "neon_bsl,neon_bsl,neon_bsl,multiple")
+ (set_attr "length" "4,4,4,12")]
+)
+
(define_expand "aarch64_simd_bsl<mode>"
[(match_operand:VALLDIF 0 "register_operand")
(match_operand:<V_INT_EQUIV> 1 "register_operand")
@@ -2934,14 +3026,40 @@
[(set_attr "type" "neon_to_gp<q>, neon_dup<q>, neon_store1_one_lane<q>")]
)
+(define_insn "load_pair_lanes<mode>"
+ [(set (match_operand:<VDBL> 0 "register_operand" "=w")
+ (vec_concat:<VDBL>
+ (match_operand:VDC 1 "memory_operand" "Utq")
+ (match_operand:VDC 2 "memory_operand" "m")))]
+ "TARGET_SIMD && !STRICT_ALIGNMENT
+ && rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[1], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "ldr\\t%q0, %1"
+ [(set_attr "type" "neon_load1_1reg_q")]
+)
+
+(define_insn "store_pair_lanes<mode>"
+ [(set (match_operand:<VDBL> 0 "aarch64_mem_pair_lanes_operand" "=Uml, Uml")
+ (vec_concat:<VDBL>
+ (match_operand:VDC 1 "register_operand" "w, r")
+ (match_operand:VDC 2 "register_operand" "w, r")))]
+ "TARGET_SIMD"
+ "@
+ stp\\t%d1, %d2, %0
+ stp\\t%x1, %x2, %0"
+ [(set_attr "type" "neon_stp, store_16")]
+)
+
;; In this insn, operand 1 should be low, and operand 2 the high part of the
;; dest vector.
(define_insn "*aarch64_combinez<mode>"
[(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
- (vec_concat:<VDBL>
- (match_operand:VD_BHSI 1 "general_operand" "w,?r,m")
- (match_operand:VD_BHSI 2 "aarch64_simd_imm_zero" "Dz,Dz,Dz")))]
+ (vec_concat:<VDBL>
+ (match_operand:VDC 1 "general_operand" "w,?r,m")
+ (match_operand:VDC 2 "aarch64_simd_or_scalar_imm_zero")))]
"TARGET_SIMD && !BYTES_BIG_ENDIAN"
"@
mov\\t%0.8b, %1.8b
@@ -2955,8 +3073,8 @@
(define_insn "*aarch64_combinez_be<mode>"
[(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
(vec_concat:<VDBL>
- (match_operand:VD_BHSI 2 "aarch64_simd_imm_zero" "Dz,Dz,Dz")
- (match_operand:VD_BHSI 1 "general_operand" "w,?r,m")))]
+ (match_operand:VDC 2 "aarch64_simd_or_scalar_imm_zero")
+ (match_operand:VDC 1 "general_operand" "w,?r,m")))]
"TARGET_SIMD && BYTES_BIG_ENDIAN"
"@
mov\\t%0.8b, %1.8b
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 7052063bb23..a2c35e7e958 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -122,11 +122,11 @@
[(parallel
[(set (match_operand 0)
(match_operand 1))
- (clobber (match_operand:V32BI 2 "register_operand" "=Upl"))])]
+ (clobber (match_operand:VNx16BI 2 "register_operand" "=Upl"))])]
"TARGET_SVE && BYTES_BIG_ENDIAN"
{
/* Create a PTRUE. */
- emit_move_insn (operands[2], CONSTM1_RTX (V32BImode));
+ emit_move_insn (operands[2], CONSTM1_RTX (VNx16BImode));
/* Refer to the PTRUE in the appropriate mode for this move. */
machine_mode mode = GET_MODE (operands[0]);
@@ -189,6 +189,63 @@
"st1<Vesize>\t%1.<Vetype>, %2, %0"
)
+;; Unpredicated gather loads.
+(define_expand "gather_load<mode>"
+ [(set (match_operand:SVE_SD 0 "register_operand")
+ (unspec:SVE_SD
+ [(match_dup 5)
+ (match_operand:DI 1 "aarch64_reg_or_zero")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
+ (mem:BLK (scratch))]
+ UNSPEC_LD1_GATHER))]
+ "TARGET_SVE"
+ {
+ operands[5] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ }
+)
+
+;; Predicated gather loads for 32-bit elements. Operand 3 is true for
+;; unsigned extension and false for signed extension.
+(define_insn "mask_gather_load<mode>"
+ [(set (match_operand:SVE_S 0 "register_operand" "=w, w, w, w, w")
+ (unspec:SVE_S
+ [(match_operand:<VPRED> 5 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ (match_operand:DI 1 "aarch64_reg_or_zero" "Z, rk, rk, rk, rk")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand" "w, w, w, w, w")
+ (match_operand:DI 3 "const_int_operand" "i, Z, Ui1, Z, Ui1")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, i, i")
+ (mem:BLK (scratch))]
+ UNSPEC_LD1_GATHER))]
+ "TARGET_SVE"
+ "@
+ ld1w\t%0.s, %5/z, [%2.s]
+ ld1w\t%0.s, %5/z, [%1, %2.s, sxtw]
+ ld1w\t%0.s, %5/z, [%1, %2.s, uxtw]
+ ld1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
+ ld1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+)
+
+;; Predicated gather loads for 64-bit elements. The value of operand 3
+;; doesn't matter in this case.
+(define_insn "mask_gather_load<mode>"
+ [(set (match_operand:SVE_D 0 "register_operand" "=w, w, w")
+ (unspec:SVE_D
+ [(match_operand:<VPRED> 5 "register_operand" "Upl, Upl, Upl")
+ (match_operand:DI 1 "aarch64_reg_or_zero" "Z, rk, rk")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand" "w, w, w")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, Ui1, i")
+ (mem:BLK (scratch))]
+ UNSPEC_LD1_GATHER))]
+ "TARGET_SVE"
+ "@
+ ld1d\t%0.d, %5/z, [%2.d]
+ ld1d\t%0.d, %5/z, [%1, %2.d]
+ ld1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+)
+
(define_expand "firstfault_load<mode>"
[(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
@@ -213,17 +270,79 @@
"ldff1<Vesize>\t%0.<Vetype>, %2/z, %j1";
)
+;; Unpredicated scatter store.
+(define_expand "scatter_store<mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(match_dup 5)
+ (match_operand:DI 0 "aarch64_reg_or_zero")
+ (match_operand:<V_INT_EQUIV> 1 "register_operand")
+ (match_operand:DI 2 "const_int_operand")
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_SD 4 "register_operand")]
+ UNSPEC_ST1_SCATTER))]
+ "TARGET_SVE"
+ {
+ operands[5] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ }
+)
+
+;; Predicated scatter stores for 32-bit elements. Operand 2 is true for
+;; unsigned extension and false for signed extension.
+(define_insn "mask_scatter_store<mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(match_operand:<VPRED> 5 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ (match_operand:DI 0 "aarch64_reg_or_zero" "Z, rk, rk, rk, rk")
+ (match_operand:<V_INT_EQUIV> 1 "register_operand" "w, w, w, w, w")
+ (match_operand:DI 2 "const_int_operand" "i, Z, Ui1, Z, Ui1")
+ (match_operand:DI 3 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, i, i")
+ (match_operand:SVE_S 4 "register_operand" "w, w, w, w, w")]
+ UNSPEC_ST1_SCATTER))]
+ "TARGET_SVE"
+ "@
+ st1w\t%4.s, %5, [%1.s]
+ st1w\t%4.s, %5, [%0, %1.s, sxtw]
+ st1w\t%4.s, %5, [%0, %1.s, uxtw]
+ st1w\t%4.s, %5, [%0, %1.s, sxtw %p3]
+ st1w\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+)
+
+;; Predicated scatter stores for 64-bit elements. The value of operand 2
+;; doesn't matter in this case.
+(define_insn "mask_scatter_store<mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(match_operand:<VPRED> 5 "register_operand" "Upl, Upl, Upl")
+ (match_operand:DI 0 "aarch64_reg_or_zero" "Z, rk, rk")
+ (match_operand:<V_INT_EQUIV> 1 "register_operand" "w, w, w")
+ (match_operand:DI 2 "const_int_operand")
+ (match_operand:DI 3 "aarch64_gather_scale_operand_d" "Ui1, Ui1, i")
+ (match_operand:SVE_D 4 "register_operand" "w, w, w")]
+ UNSPEC_ST1_SCATTER))]
+ "TARGET_SVE"
+ "@
+ st1d\t%4.d, %5, [%1.d]
+ st1d\t%4.d, %5, [%0, %1.d]
+ st1d\t%4.d, %5, [%0, %1.d, lsl %p3]"
+)
+
;; SVE structure moves.
(define_expand "mov<mode>"
[(set (match_operand:SVE_STRUCT 0 "nonimmediate_operand")
(match_operand:SVE_STRUCT 1 "general_operand"))]
"TARGET_SVE"
{
- if (MEM_P (operands[0]) || MEM_P (operands[1]))
+ /* Big-endian loads and stores need to be done via LD1 and ST1;
+ see the comment at the head of the file for details. */
+ if ((MEM_P (operands[0]) || MEM_P (operands[1]))
+ && BYTES_BIG_ENDIAN)
{
+ gcc_assert (can_create_pseudo_p ());
aarch64_expand_sve_mem_move (operands[0], operands[1], <VPRED>mode);
DONE;
}
+
if (CONSTANT_P (operands[1]))
{
aarch64_expand_mov_immediate (operands[0], operands[1]);
@@ -232,17 +351,11 @@
}
)
-;; Unpredicated structure moves (little-endian). Only allow memory operations
-;; during and after RA; before RA we want the predicated load and store
-;; patterns to be used instead.
+;; Unpredicated structure moves (little-endian).
(define_insn "*aarch64_sve_mov<mode>_le"
[(set (match_operand:SVE_STRUCT 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w")
(match_operand:SVE_STRUCT 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))]
- "TARGET_SVE
- && !BYTES_BIG_ENDIAN
- && ((lra_in_progress || reload_completed)
- || (register_operand (operands[0], <MODE>mode)
- && nonmemory_operand (operands[1], <MODE>mode)))"
+ "TARGET_SVE && !BYTES_BIG_ENDIAN"
"#"
[(set_attr "length" "<insn_length>")]
)
@@ -283,6 +396,8 @@
}
)
+;; Predicated structure moves. This works for both endiannesses but in
+;; practice is only useful for big-endian.
(define_insn_and_split "pred_mov<mode>"
[(set (match_operand:SVE_STRUCT 0 "aarch64_sve_struct_nonimmediate_operand" "=w, Utx")
(unspec:SVE_STRUCT
@@ -368,7 +483,7 @@
/* The last element can be extracted with a LASTB and a false
predicate. */
rtx sel = force_reg (<VPRED>mode, CONST0_RTX (<VPRED>mode));
- emit_insn (gen_extract_last_<mode> (operands[0], operands[1], sel));
+ emit_insn (gen_extract_last_<mode> (operands[0], sel, operands[1]));
DONE;
}
if (!CONST_INT_P (operands[2]))
@@ -387,7 +502,7 @@
emit_insn (gen_vec_cmp<v_int_equiv><vpred> (sel, cmp, series, zero));
/* Select the element using LASTB. */
- emit_insn (gen_extract_last_<mode> (operands[0], operands[1], sel));
+ emit_insn (gen_extract_last_<mode> (operands[0], sel, operands[1]));
DONE;
}
}
@@ -455,13 +570,13 @@
(define_insn "extract_last_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=r, w")
(unspec:<VEL>
- [(match_operand:SVE_ALL 1 "register_operand" "w, w")
- (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")]
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (match_operand:SVE_ALL 2 "register_operand" "w, w")]
UNSPEC_LASTB))]
"TARGET_SVE"
"@
- lastb\t%<vwcore>0, %2, %1.<Vetype>
- lastb\t%<Vetype>0, %2, %1.<Vetype>"
+ lastb\t%<vwcore>0, %1, %2.<Vetype>
+ lastb\t%<Vetype>0, %1, %2.<Vetype>"
)
(define_expand "vec_duplicate<mode>"
@@ -489,7 +604,7 @@
(define_insn_and_split "*vec_duplicate<mode>_reg"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w")
(vec_duplicate:SVE_ALL
- (match_operand:<VEL> 1 "aarch64_sve_dup_operand" "r, w, Utw")))
+ (match_operand:<VEL> 1 "aarch64_sve_dup_operand" "r, w, Uty")))
(clobber (match_scratch:<VPRED> 2 "=X, X, Upl"))]
"TARGET_SVE"
"@
@@ -517,7 +632,7 @@
(unspec:SVE_ALL
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(vec_duplicate:SVE_ALL
- (match_operand:<VEL> 2 "aarch64_sve_ld1r_operand" "Utw"))
+ (match_operand:<VEL> 2 "aarch64_sve_ld1r_operand" "Uty"))
(match_operand:SVE_ALL 3 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
@@ -552,130 +667,6 @@
index\t%0.<Vetype>, %<vw>1, %<vw>2"
)
-(define_expand "vec_gather_loads<mode>"
- [(set (match_operand:SVE_SD 0 "register_operand")
- (unspec:SVE_SD
- [(match_dup 4)
- (match_operand:DI 1 "aarch64_reg_or_zero")
- (match_operand:<V_INT_EQUIV> 2 "register_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vetype>")
- (mem:BLK (scratch))]
- UNSPEC_GATHER_LOADS))]
- "TARGET_SVE"
- {
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_gather_loads<mode>"
- [(set (match_operand:SVE_SD 0 "register_operand" "=w, w, w")
- (unspec:SVE_SD
- [(match_operand:<VPRED> 4 "register_operand" "Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, Z, rk")
- (match_operand:<V_INT_EQUIV> 2 "register_operand" "w, w, w")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vetype>" "Ui1, Ui1, i")
- (mem:BLK (scratch))]
- UNSPEC_GATHER_LOADS))]
- "TARGET_SVE"
- "@
- ld1<Vesize>\t%0.<Vetype>, %4/z, [%1, %2.<Vetype><gather_unscaled_mods>]
- ld1<Vesize>\t%0.<Vetype>, %4/z, [%2.<Vetype>]
- ld1<Vesize>\t%0.<Vetype>, %4/z, [%1, %2.<Vetype><gather_scaled_mods>]"
-)
-
-(define_expand "vec_gather_loadu<mode>"
- [(set (match_operand:SVE_SD 0 "register_operand")
- (unspec:SVE_SD
- [(match_dup 4)
- (match_operand:DI 1 "aarch64_reg_or_zero")
- (match_operand:<V_INT_EQUIV> 2 "register_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vetype>")
- (mem:BLK (scratch))]
- UNSPEC_GATHER_LOADU))]
- "TARGET_SVE"
- {
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_gather_loadu<mode>"
- [(set (match_operand:SVE_SD 0 "register_operand" "=w, w, w")
- (unspec:SVE_SD
- [(match_operand:<VPRED> 4 "register_operand" "Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, Z, rk")
- (match_operand:<V_INT_EQUIV> 2 "register_operand" "w, w, w")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vetype>" "Ui1, Ui1, i")
- (mem:BLK (scratch))]
- UNSPEC_GATHER_LOADU))]
- "TARGET_SVE"
- "@
- ld1<Vesize>\t%0.<Vetype>, %4/z, [%1, %2.<Vetype><gather_unscaled_modu>]
- ld1<Vesize>\t%0.<Vetype>, %4/z, [%2.<Vetype>]
- ld1<Vesize>\t%0.<Vetype>, %4/z, [%1, %2.<Vetype><gather_scaled_modu>]"
-)
-
-(define_expand "vec_scatter_stores<mode>"
- [(set (mem:BLK (scratch))
- (unspec:BLK
- [(match_dup 4)
- (match_operand:DI 0 "aarch64_reg_or_zero")
- (match_operand:<V_INT_EQUIV> 1 "register_operand")
- (match_operand:DI 2 "aarch64_gather_scale_operand_<Vetype>")
- (match_operand:SVE_SD 3 "register_operand")]
- UNSPEC_SCATTER_STORES))]
- "TARGET_SVE"
- {
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_scatter_stores<mode>"
- [(set (mem:BLK (scratch))
- (unspec:BLK
- [(match_operand:<VPRED> 4 "register_operand" "Upl, Upl, Upl")
- (match_operand:DI 0 "aarch64_reg_or_zero" "rk, Z, rk")
- (match_operand:<V_INT_EQUIV> 1 "register_operand" "w, w, w")
- (match_operand:DI 2 "aarch64_gather_scale_operand_<Vetype>" "Ui1, Ui1, i")
- (match_operand:SVE_SD 3 "register_operand" "w, w, w")]
- UNSPEC_SCATTER_STORES))]
- "TARGET_SVE"
- "@
- st1<Vesize>\t%3.<Vetype>, %4, [%0, %1.<Vetype><gather_unscaled_mods>]
- st1<Vesize>\t%3.<Vetype>, %4, [%1.<Vetype>]
- st1<Vesize>\t%3.<Vetype>, %4, [%0, %1.<Vetype><gather_scaled_mods>]"
-)
-
-(define_expand "vec_scatter_storeu<mode>"
- [(set (mem:BLK (scratch))
- (unspec:BLK
- [(match_dup 4)
- (match_operand:DI 0 "aarch64_reg_or_zero")
- (match_operand:<V_INT_EQUIV> 1 "register_operand")
- (match_operand:DI 2 "aarch64_gather_scale_operand_<Vetype>")
- (match_operand:SVE_SD 3 "register_operand")]
- UNSPEC_SCATTER_STOREU))]
- "TARGET_SVE"
- {
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_scatter_storeu<mode>"
- [(set (mem:BLK (scratch))
- (unspec:BLK
- [(match_operand:<VPRED> 4 "register_operand" "Upl, Upl, Upl")
- (match_operand:DI 0 "aarch64_reg_or_zero" "rk, Z, rk")
- (match_operand:<V_INT_EQUIV> 1 "register_operand" "w, w, w")
- (match_operand:DI 2 "aarch64_gather_scale_operand_<Vetype>" "Ui1, Ui1, i")
- (match_operand:SVE_SD 3 "register_operand" "w, w, w")]
- UNSPEC_SCATTER_STOREU))]
- "TARGET_SVE"
- "@
- st1<Vesize>\t%3.<Vetype>, %4, [%0, %1.<Vetype><gather_unscaled_modu>]
- st1<Vesize>\t%3.<Vetype>, %4, [%1.<Vetype>]
- st1<Vesize>\t%3.<Vetype>, %4, [%0, %1.<Vetype><gather_scaled_modu>]"
-)
-
;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range
;; of an INDEX instruction.
(define_insn "*vec_series<mode>_plus"
@@ -691,160 +682,58 @@
}
)
-(define_expand "vec_load_lanes<vrl2><mode>"
- [(set (match_operand:<VRL2> 0 "register_operand")
- (unspec:<VRL2>
- [(match_dup 2)
- (match_operand:<VRL2> 1 "memory_operand")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD2))]
- "TARGET_SVE"
- {
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_load_lanes<vrl2><mode>"
- [(set (match_operand:<VRL2> 0 "register_operand" "=w")
- (unspec:<VRL2>
- [(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:<VRL2> 1 "memory_operand" "m")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD2))]
- "TARGET_SVE"
- "ld2<Vesize>\t{%S0.<Vetype> - %T0.<Vetype>}, %2/z, %1"
-)
-
-(define_expand "vec_load_lanes<vrl3><mode>"
- [(set (match_operand:<VRL3> 0 "register_operand")
- (unspec:<VRL3>
- [(match_dup 2)
- (match_operand:<VRL3> 1 "memory_operand")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD3))]
- "TARGET_SVE"
- {
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_load_lanes<vrl3><mode>"
- [(set (match_operand:<VRL3> 0 "register_operand" "=w")
- (unspec:<VRL3>
- [(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:<VRL3> 1 "memory_operand" "m")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD3))]
- "TARGET_SVE"
- "ld3<Vesize>\t{%S0.<Vetype> - %U0.<Vetype>}, %2/z, %1"
-)
-
-(define_expand "vec_load_lanes<vrl4><mode>"
- [(set (match_operand:<VRL4> 0 "register_operand")
- (unspec:<VRL4>
+;; Unpredicated LD[234].
+(define_expand "vec_load_lanes<mode><vsingle>"
+ [(set (match_operand:SVE_STRUCT 0 "register_operand")
+ (unspec:SVE_STRUCT
[(match_dup 2)
- (match_operand:<VRL4> 1 "memory_operand")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD4))]
+ (match_operand:SVE_STRUCT 1 "memory_operand")]
+ UNSPEC_LDN))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
}
)
-(define_insn "vec_mask_load_lanes<vrl4><mode>"
- [(set (match_operand:<VRL4> 0 "register_operand" "=w")
- (unspec:<VRL4>
+;; Predicated LD[234].
+(define_insn "vec_mask_load_lanes<mode><vsingle>"
+ [(set (match_operand:SVE_STRUCT 0 "register_operand" "=w")
+ (unspec:SVE_STRUCT
[(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:<VRL4> 1 "memory_operand" "m")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD4))]
+ (match_operand:SVE_STRUCT 1 "memory_operand" "m")]
+ UNSPEC_LDN))]
"TARGET_SVE"
- "ld4<Vesize>\t{%S0.<Vetype> - %V0.<Vetype>}, %2/z, %1"
+ "ld<vector_count><Vesize>\t%0, %2/z, %1"
)
-;; This is always a full update, so the (match_dup 0) is redundant.
+;; Unpredicated ST[234]. This is always a full update, so the dependence
+;; on the old value of the memory location (via (match_dup 0)) is redundant.
;; There doesn't seem to be any obvious benefit to treating the all-true
;; case differently though. In particular, it's very unlikely that we'll
;; only find out during RTL that a store_lanes is dead.
-(define_expand "vec_store_lanes<vrl2><mode>"
- [(set (match_operand:<VRL2> 0 "memory_operand")
- (unspec:<VRL2>
- [(match_dup 2)
- (match_operand:<VRL2> 1 "register_operand")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
- (match_dup 0)]
- UNSPEC_ST2))]
- "TARGET_SVE"
- {
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_store_lanes<vrl2><mode>"
- [(set (match_operand:<VRL2> 0 "memory_operand" "+m")
- (unspec:<VRL2>
- [(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:<VRL2> 1 "register_operand" "w")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
- (match_dup 0)]
- UNSPEC_ST2))]
- "TARGET_SVE"
- "st2<Vesize>\t{%S1.<Vetype> - %T1.<Vetype>}, %2, %0"
-)
-
-;; See the comment above vec_store_lanes<vrl2><mode>.
-(define_expand "vec_store_lanes<vrl3><mode>"
- [(set (match_operand:<VRL3> 0 "memory_operand")
- (unspec:<VRL3>
- [(match_dup 2)
- (match_operand:<VRL3> 1 "register_operand")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
- (match_dup 0)]
- UNSPEC_ST3))]
- "TARGET_SVE"
- {
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
- }
-)
-
-(define_insn "vec_mask_store_lanes<vrl3><mode>"
- [(set (match_operand:<VRL3> 0 "memory_operand" "+m")
- (unspec:<VRL3>
- [(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:<VRL3> 1 "register_operand" "w")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
- (match_dup 0)]
- UNSPEC_ST3))]
- "TARGET_SVE"
- "st3<Vesize>\t{%S1.<Vetype> - %U1.<Vetype>}, %2, %0"
-)
-
-;; See the comment above vec_store_lanes<vrl2><mode>.
-(define_expand "vec_store_lanes<vrl4><mode>"
- [(set (match_operand:<VRL4> 0 "memory_operand")
- (unspec:<VRL4>
+(define_expand "vec_store_lanes<mode><vsingle>"
+ [(set (match_operand:SVE_STRUCT 0 "memory_operand")
+ (unspec:SVE_STRUCT
[(match_dup 2)
- (match_operand:<VRL4> 1 "register_operand")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SVE_STRUCT 1 "register_operand")
(match_dup 0)]
- UNSPEC_ST4))]
+ UNSPEC_STN))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
}
)
-(define_insn "vec_mask_store_lanes<vrl4><mode>"
- [(set (match_operand:<VRL4> 0 "memory_operand" "+m")
- (unspec:<VRL4>
+;; Predicated ST[234].
+(define_insn "vec_mask_store_lanes<mode><vsingle>"
+ [(set (match_operand:SVE_STRUCT 0 "memory_operand" "+m")
+ (unspec:SVE_STRUCT
[(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:<VRL4> 1 "register_operand" "w")
- (unspec:SVE_ALL [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SVE_STRUCT 1 "register_operand" "w")
(match_dup 0)]
- UNSPEC_ST4))]
+ UNSPEC_STN))]
"TARGET_SVE"
- "st4<Vesize>\t{%S1.<Vetype> - %V1.<Vetype>}, %2, %0"
+ "st<vector_count><Vesize>\t%1, %2, %0"
)
(define_expand "vec_perm_const<mode>"
@@ -889,14 +778,13 @@
(unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand")
(match_operand:SVE_ALL 2 "register_operand")]
OPTAB_PERMUTE))]
- "TARGET_SVE")
+ "TARGET_SVE && !GET_MODE_NUNITS (<MODE>mode).is_constant ()")
-(define_insn "vec_reverse_<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w")]
+(define_expand "vec_reverse_<mode>"
+ [(set (match_operand:SVE_ALL 0 "register_operand")
+ (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand")]
UNSPEC_REV))]
- "TARGET_SVE"
- "rev\t%0.<Vetype>, %1.<Vetype>")
+ "TARGET_SVE && !GET_MODE_NUNITS (<MODE>mode).is_constant ()")
(define_insn "*aarch64_sve_tbl<mode>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w")
@@ -929,7 +817,7 @@
(define_insn "*aarch64_sve_rev64<mode>"
[(set (match_operand:SVE_BHS 0 "register_operand" "=w")
(unspec:SVE_BHS
- [(match_operand:V4BI 1 "register_operand" "Upl")
+ [(match_operand:VNx2BI 1 "register_operand" "Upl")
(unspec:SVE_BHS [(match_operand:SVE_BHS 2 "register_operand" "w")]
UNSPEC_REV64)]
UNSPEC_MERGE_PTRUE))]
@@ -940,7 +828,7 @@
(define_insn "*aarch64_sve_rev32<mode>"
[(set (match_operand:SVE_BH 0 "register_operand" "=w")
(unspec:SVE_BH
- [(match_operand:V8BI 1 "register_operand" "Upl")
+ [(match_operand:VNx4BI 1 "register_operand" "Upl")
(unspec:SVE_BH [(match_operand:SVE_BH 2 "register_operand" "w")]
UNSPEC_REV32)]
UNSPEC_MERGE_PTRUE))]
@@ -948,17 +836,24 @@
"rev<Vesize>\t%0.s, %1/m, %2.s"
)
-(define_insn "*aarch64_sve_rev16v32qi"
- [(set (match_operand:V32QI 0 "register_operand" "=w")
- (unspec:V32QI
- [(match_operand:V16BI 1 "register_operand" "Upl")
- (unspec:V32QI [(match_operand:V32QI 2 "register_operand" "w")]
- UNSPEC_REV16)]
+(define_insn "*aarch64_sve_rev16vnx16qi"
+ [(set (match_operand:VNx16QI 0 "register_operand" "=w")
+ (unspec:VNx16QI
+ [(match_operand:VNx8BI 1 "register_operand" "Upl")
+ (unspec:VNx16QI [(match_operand:VNx16QI 2 "register_operand" "w")]
+ UNSPEC_REV16)]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
"revb\t%0.h, %1/m, %2.h"
)
+(define_insn "*aarch64_sve_rev<mode>"
+ [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
+ (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w")]
+ UNSPEC_REV))]
+ "TARGET_SVE"
+ "rev\t%0.<Vetype>, %1.<Vetype>")
+
(define_insn "*aarch64_sve_dup_lane<mode>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w")
(vec_duplicate:SVE_ALL
@@ -1237,16 +1132,6 @@
"<logical_nn>\t%0.b, %1/z, %2.b, %3.b"
)
-(define_insn "break_after_<mode>"
- [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa")
- (unspec:PRED_ALL
- [(match_operand:PRED_ALL 1 "register_operand" "Upa")
- (match_operand:PRED_ALL 2 "register_operand" "Upa")]
- UNSPEC_BRKA))]
- "TARGET_SVE"
- "brka\t%0.b, %1/z, %2.b"
-)
-
;; Unpredicated LSL, LSR and ASR by a vector.
(define_expand "v<optab><mode>3"
[(set (match_operand:SVE_I 0 "register_operand")
@@ -1773,17 +1658,19 @@
"<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
)
-(define_insn "clastb<mode>"
+;; Set operand 0 to the last active element in operand 3, or to tied
+;; operand 1 if no elements are active.
+(define_insn "fold_extract_last_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=r, w")
(unspec:<VEL>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:<VEL> 2 "register_operand" "0, 0")
+ [(match_operand:<VEL> 1 "register_operand" "0, 0")
+ (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
(match_operand:SVE_ALL 3 "register_operand" "w, w")]
UNSPEC_CLASTB))]
"TARGET_SVE"
"@
- clastb\t%<vwcore>0, %1, %<vwcore>0, %3.<Vetype>
- clastb\t%<vw>0, %1, %<vw>0, %3.<Vetype>"
+ clastb\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
+ clastb\t%<vw>0, %2, %<vw>0, %3.<Vetype>"
)
;; Unpredicated integer add reduction.
@@ -1874,7 +1761,7 @@
"<maxmin_uns_op>v\t%<Vetype>0, %1, %2.<Vetype>"
)
-(define_expand "reduc_<bit_reduc>_scal_<mode>"
+(define_expand "reduc_<optab>_scal_<mode>"
[(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL> [(match_dup 2)
(match_operand:SVE_I 1 "register_operand")]
@@ -1885,7 +1772,7 @@
}
)
-(define_insn "*reduc_<bit_reduc>_scal_<mode>"
+(define_insn "*reduc_<optab>_scal_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(unspec:<VEL> [(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:SVE_I 2 "register_operand" "w")]
@@ -1894,9 +1781,8 @@
"<bit_reduc_op>\t%<Vetype>0, %1, %2.<Vetype>"
)
-;; Strict FP reductions, i.e. in order as opposed to the tree based
-;; reductions used when -ffast-math is enabled
-(define_expand "strict_reduc_plus_scal_<mode>"
+;; Unpredicated in-order FP reductions.
+(define_expand "fold_left_plus_<mode>"
[(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL> [(match_dup 3)
(match_operand:<VEL> 1 "register_operand")
@@ -1908,7 +1794,8 @@
}
)
-(define_insn "*strict_reduc_plus_scal_<mode>"
+;; In-order FP reductions predicated with PTRUE.
+(define_insn "*fold_left_plus_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(unspec:<VEL> [(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:<VEL> 2 "register_operand" "0")
@@ -1918,18 +1805,18 @@
"fadda\t%<Vetype>0, %1, %<Vetype>0, %3.<Vetype>"
)
-;; Predicated form of the above strict FP reduction.
-(define_insn "strict_reduc_plus_pred_scal_<mode>"
+;; Predicated form of the above in-order reduction.
+(define_insn "*pred_fold_left_plus_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(unspec:<VEL>
[(match_operand:<VEL> 1 "register_operand" "0")
(unspec:SVE_F
[(match_operand:<VPRED> 2 "register_operand" "Upl")
(match_operand:SVE_F 3 "register_operand" "w")
- (match_operand:SVE_F 4 "aarch64_constant_vector_operand")]
+ (match_operand:SVE_F 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL)]
UNSPEC_FADDA))]
- "TARGET_SVE && aarch64_simd_identity_value (PLUS, <MODE>mode, operands[4])"
+ "TARGET_SVE"
"fadda\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>"
)
@@ -2255,31 +2142,31 @@
(unspec:SVE_HSDI
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(FIXUORS:SVE_HSDI
- (match_operand:V16HF 2 "register_operand" "w"))]
+ (match_operand:VNx8HF 2 "register_operand" "w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
"fcvtz<su>\t%0.<Vetype>, %1/m, %2.h"
)
;; Conversion of SF to DI or SI, predicated with a PTRUE.
-(define_insn "*<fix_trunc_optab>v8sf<mode>2"
+(define_insn "*<fix_trunc_optab>vnx4sf<mode>2"
[(set (match_operand:SVE_SDI 0 "register_operand" "=w")
(unspec:SVE_SDI
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(FIXUORS:SVE_SDI
- (match_operand:V8SF 2 "register_operand" "w"))]
+ (match_operand:VNx4SF 2 "register_operand" "w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
"fcvtz<su>\t%0.<Vetype>, %1/m, %2.s"
)
;; Conversion of DF to DI or SI, predicated with a PTRUE.
-(define_insn "*<fix_trunc_optab>v4df<mode>2"
+(define_insn "*<fix_trunc_optab>vnx2df<mode>2"
[(set (match_operand:SVE_SDI 0 "register_operand" "=w")
(unspec:SVE_SDI
- [(match_operand:V4BI 1 "register_operand" "Upl")
+ [(match_operand:VNx2BI 1 "register_operand" "Upl")
(FIXUORS:SVE_SDI
- (match_operand:V4DF 2 "register_operand" "w"))]
+ (match_operand:VNx2DF 2 "register_operand" "w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
"fcvtz<su>\t%0.<Vetype>, %1/m, %2.d"
@@ -2302,11 +2189,11 @@
;; Conversion of DI, SI or HI to the same number of HFs, predicated
;; with a PTRUE.
-(define_insn "*<optab><mode>v16hf2"
- [(set (match_operand:V16HF 0 "register_operand" "=w")
- (unspec:V16HF
+(define_insn "*<optab><mode>vnx8hf2"
+ [(set (match_operand:VNx8HF 0 "register_operand" "=w")
+ (unspec:VNx8HF
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (FLOATUORS:V16HF
+ (FLOATUORS:VNx8HF
(match_operand:SVE_HSDI 2 "register_operand" "w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
@@ -2314,11 +2201,11 @@
)
;; Conversion of DI or SI to the same number of SFs, predicated with a PTRUE.
-(define_insn "*<optab><mode>v8sf2"
- [(set (match_operand:V8SF 0 "register_operand" "=w")
- (unspec:V8SF
+(define_insn "*<optab><mode>vnx4sf2"
+ [(set (match_operand:VNx4SF 0 "register_operand" "=w")
+ (unspec:VNx4SF
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (FLOATUORS:V8SF
+ (FLOATUORS:VNx4SF
(match_operand:SVE_SDI 2 "register_operand" "w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
@@ -2326,11 +2213,11 @@
)
;; Conversion of DI or SI to DF, predicated with a PTRUE.
-(define_insn "*<optab><mode>v4df2"
- [(set (match_operand:V4DF 0 "register_operand" "=w")
- (unspec:V4DF
- [(match_operand:V4BI 1 "register_operand" "Upl")
- (FLOATUORS:V4DF
+(define_insn "*<optab><mode>vnx2df2"
+ [(set (match_operand:VNx2DF 0 "register_operand" "=w")
+ (unspec:VNx2DF
+ [(match_operand:VNx2BI 1 "register_operand" "Upl")
+ (FLOATUORS:VNx2DF
(match_operand:SVE_SDI 2 "register_operand" "w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
@@ -2384,8 +2271,8 @@
)
;; Used by the vec_unpacks_<perm_hilo>_<mode> expander to unpack the bit
-;; representation of a V8SF or V16HF without conversion. The choice between
-;; signed and unsigned isn't significant.
+;; representation of a VNx4SF or VNx8HF without conversion. The choice
+;; between signed and unsigned isn't significant.
(define_insn "*vec_unpacku_<perm_hilo>_<mode>_no_convert"
[(set (match_operand:SVE_HSF 0 "register_operand" "=w")
(unspec:SVE_HSF [(match_operand:SVE_HSF 1 "register_operand" "w")]
@@ -2394,7 +2281,7 @@
"uunpk<perm_hilo>\t%0.<Vewtype>, %1.<Vetype>"
)
-;; Unpack one half of a V8SF to V4DF, or one half of a V16HF to V8SF.
+;; Unpack one half of a VNx4SF to VNx2DF, or one half of a VNx8HF to VNx4SF.
;; First unpack the source without conversion, then float-convert the
;; unpacked source.
(define_expand "vec_unpacks_<perm_hilo>_<mode>"
@@ -2412,21 +2299,22 @@
}
)
-;; Unpack one half of a V8SI to V4DF. First unpack from V8SI to V4DI,
-;; reinterpret the V4DI as a V8SI, then convert the unpacked V8SI to V4DF.
-(define_expand "vec_unpack<su_optab>_float_<perm_hilo>_v8si"
+;; Unpack one half of a VNx4SI to VNx2DF. First unpack from VNx4SI
+;; to VNx2DI, reinterpret the VNx2DI as a VNx4SI, then convert the
+;; unpacked VNx4SI to VNx2DF.
+(define_expand "vec_unpack<su_optab>_float_<perm_hilo>_vnx4si"
[(set (match_dup 2)
- (unspec:V4DI [(match_operand:V8SI 1 "register_operand")]
- UNPACK_UNSIGNED))
- (set (match_operand:V4DF 0 "register_operand")
- (unspec:V4DF [(match_dup 3)
- (FLOATUORS:V4DF (match_dup 4))]
- UNSPEC_MERGE_PTRUE))]
+ (unspec:VNx2DI [(match_operand:VNx4SI 1 "register_operand")]
+ UNPACK_UNSIGNED))
+ (set (match_operand:VNx2DF 0 "register_operand")
+ (unspec:VNx2DF [(match_dup 3)
+ (FLOATUORS:VNx2DF (match_dup 4))]
+ UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[2] = gen_reg_rtx (V4DImode);
- operands[3] = force_reg (V4BImode, CONSTM1_RTX (V4BImode));
- operands[4] = gen_rtx_SUBREG (V8SImode, operands[2], 0);
+ operands[2] = gen_reg_rtx (VNx2DImode);
+ operands[3] = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode));
+ operands[4] = gen_rtx_SUBREG (VNx4SImode, operands[2], 0);
}
)
@@ -2480,24 +2368,24 @@
)
;; Convert two vectors of DF to SI and pack the results into a single vector.
-(define_expand "vec_pack_<su>fix_trunc_v4df"
+(define_expand "vec_pack_<su>fix_trunc_vnx2df"
[(set (match_dup 4)
- (unspec:V8SI
+ (unspec:VNx4SI
[(match_dup 3)
- (FIXUORS:V8SI (match_operand:V4DF 1 "register_operand"))]
+ (FIXUORS:VNx4SI (match_operand:VNx2DF 1 "register_operand"))]
UNSPEC_MERGE_PTRUE))
(set (match_dup 5)
- (unspec:V8SI
+ (unspec:VNx4SI
[(match_dup 3)
- (FIXUORS:V8SI (match_operand:V4DF 2 "register_operand"))]
+ (FIXUORS:VNx4SI (match_operand:VNx2DF 2 "register_operand"))]
UNSPEC_MERGE_PTRUE))
- (set (match_operand:V8SI 0 "register_operand")
- (unspec:V8SI [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
+ (set (match_operand:VNx4SI 0 "register_operand")
+ (unspec:VNx4SI [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
"TARGET_SVE"
{
- operands[3] = force_reg (V4BImode, CONSTM1_RTX (V4BImode));
- operands[4] = gen_reg_rtx (V8SImode);
- operands[5] = gen_reg_rtx (V8SImode);
+ operands[3] = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode));
+ operands[4] = gen_reg_rtx (VNx4SImode);
+ operands[5] = gen_reg_rtx (VNx4SImode);
}
)
@@ -2538,6 +2426,16 @@
"<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
)
+(define_insn "break_after_<mode>"
+ [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa")
+ (unspec:PRED_ALL
+ [(match_operand:PRED_ALL 1 "register_operand" "Upa")
+ (match_operand:PRED_ALL 2 "register_operand" "Upa")]
+ UNSPEC_BRKA))]
+ "TARGET_SVE"
+ "brka\t%0.b, %1/z, %2.b"
+)
+
(define_insn "read_nf<mode>"
[(set (match_operand:PRED_ALL 0 "register_operand" "=Upa")
(unspec:PRED_ALL [(reg:SI FFRT_REGNUM)] UNSPEC_READ_NF))
@@ -2575,6 +2473,7 @@
"cntp\t%0, %1, %2.<Vetype>"
)
+;; Shift an SVE vector left and insert a scalar into element 0.
(define_insn "vec_shl_insert_<mode>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w, w")
(unspec:SVE_ALL
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index b5a179784a6..1a70d4cde57 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -1141,10 +1141,10 @@ static bool
aarch64_sve_pred_mode_p (machine_mode mode)
{
return (TARGET_SVE
- && (mode == V32BImode
- || mode == V16BImode
- || mode == V8BImode
- || mode == V4BImode));
+ && (mode == VNx16BImode
+ || mode == VNx8BImode
+ || mode == VNx4BImode
+ || mode == VNx2BImode));
}
/* Three mutually-exclusive flags describing a vector or predicate type. */
@@ -1250,13 +1250,13 @@ aarch64_sve_pred_mode (unsigned int elem_nbytes)
if (TARGET_SVE)
{
if (elem_nbytes == 1)
- return V32BImode;
+ return VNx16BImode;
if (elem_nbytes == 2)
- return V16BImode;
+ return VNx8BImode;
if (elem_nbytes == 4)
- return V8BImode;
+ return VNx4BImode;
if (elem_nbytes == 8)
- return V4BImode;
+ return VNx2BImode;
}
return opt_machine_mode ();
}
@@ -3682,21 +3682,6 @@ aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
return "";
}
-static bool
-aarch64_frame_pointer_required (void)
-{
- /* Use the frame pointer if enabled and it is not a leaf function, unless
- leaf frame pointer omission is disabled. If the frame pointer is enabled,
- force the frame pointer in leaf functions which use LR. */
- if (flag_omit_frame_pointer == 2
- && !(flag_omit_leaf_frame_pointer
- && crtl->is_leaf
- && !df_regs_ever_live_p (LR_REGNUM)))
- return true;
-
- return false;
-}
-
/* Mark the registers that need to be saved by the callee and calculate
the size of the callee-saved registers area and frame record (both FP
and LR may be omitted). */
@@ -3713,6 +3698,14 @@ aarch64_layout_frame (void)
cfun->machine->frame.emit_frame_chain
= frame_pointer_needed || crtl->calls_eh_return;
+ /* Emit a frame chain if the frame pointer is enabled.
+ If -momit-leaf-frame-pointer is used, do not use a frame chain
+ in leaf functions which do not use LR. */
+ if (flag_omit_frame_pointer == 2
+ && !(flag_omit_leaf_frame_pointer && crtl->is_leaf
+ && !df_regs_ever_live_p (LR_REGNUM)))
+ cfun->machine->frame.emit_frame_chain = true;
+
#define SLOT_NOT_REQUIRED (-2)
#define SLOT_REQUIRED (-1)
@@ -4504,15 +4497,16 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
{
/* We don't use aarch64_sub_sp here because we don't want to
repeatedly load TEMP1. */
+ rtx step = GEN_INT (-probe_interval);
if (probe_interval > ARITH_FACTOR)
- emit_move_insn (temp1, GEN_INT (-probe_interval));
- else
- temp1 = GEN_INT (-probe_interval);
+ {
+ emit_move_insn (temp1, step);
+ step = temp1;
+ }
for (HOST_WIDE_INT i = 0; i < rounded_size; i += probe_interval)
{
- rtx_insn *insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
- temp1));
+ rtx_insn *insn = emit_insn (gen_add2_insn (stack_pointer_rtx, step));
add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
if (probe_interval > ARITH_FACTOR)
@@ -4753,14 +4747,16 @@ aarch64_expand_prologue (void)
aarch64_add_offset (Pmode, hard_frame_pointer_rtx,
stack_pointer_rtx, callee_offset,
ip1_rtx, ip0_rtx, frame_pointer_needed);
- if (!frame_size.is_constant ())
+ if (frame_pointer_needed && !frame_size.is_constant ())
{
- /* Variable-sized frames need to describe the save slot address
- using DW_CFA_expression rather than DW_CFA_offset. This means
- that the locations of the registers that we've already saved
- do not automatically change as the CFA definition changes.
- We instead need to re-express the save slots with addresses
- based on the frame pointer rather than the stack pointer. */
+ /* Variable-sized frames need to describe the save slot
+ address using DW_CFA_expression rather than DW_CFA_offset.
+ This means that, without taking further action, the
+ locations of the registers that we've already saved would
+ remain based on the stack pointer even after we redefine
+ the CFA based on the frame pointer. We therefore need new
+ DW_CFA_expressions to re-express the save slots with addresses
+ based on the frame pointer. */
rtx_insn *insn = get_last_insn ();
gcc_assert (RTX_FRAME_RELATED_P (insn));
@@ -5907,6 +5903,80 @@ aarch64_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
return aarch64_classify_address (&addr, x, mode, strict_p, type);
}
+/* Implement TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT. */
+
+static bool
+aarch64_legitimize_address_displacement (rtx *offset1, rtx *offset2,
+ poly_int64 orig_offset,
+ machine_mode mode)
+{
+ HOST_WIDE_INT size;
+ if (GET_MODE_SIZE (mode).is_constant (&size))
+ {
+ HOST_WIDE_INT const_offset, second_offset;
+
+ /* A general SVE offset is A * VQ + B. Remove the A component from
+ coefficient 0 in order to get the constant B. */
+ const_offset = orig_offset.coeffs[0] - orig_offset.coeffs[1];
+
+ /* Split an out-of-range address displacement into a base and
+ offset. Use 4KB range for 1- and 2-byte accesses and a 16KB
+ range otherwise to increase opportunities for sharing the base
+ address of different sizes. Unaligned accesses use the signed
+ 9-bit range, TImode/TFmode use the intersection of signed
+ scaled 7-bit and signed 9-bit offset. */
+ if (mode == TImode || mode == TFmode)
+ second_offset = ((const_offset + 0x100) & 0x1f8) - 0x100;
+ else if ((const_offset & (size - 1)) != 0)
+ second_offset = ((const_offset + 0x100) & 0x1ff) - 0x100;
+ else
+ second_offset = const_offset & (size < 4 ? 0xfff : 0x3ffc);
+
+ if (second_offset == 0 || must_eq (orig_offset, second_offset))
+ return false;
+
+ /* Split the offset into second_offset and the rest. */
+ *offset1 = gen_int_mode (orig_offset - second_offset, Pmode);
+ *offset2 = gen_int_mode (second_offset, Pmode);
+ return true;
+ }
+ else
+ {
+ /* Get the mode we should use as the basis of the range. For structure
+ modes this is the mode of one vector. */
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ machine_mode step_mode
+ = (vec_flags & VEC_STRUCT) != 0 ? SVE_BYTE_MODE : mode;
+
+ /* Get the "mul vl" multiplier we'd like to use. */
+ HOST_WIDE_INT factor = GET_MODE_SIZE (step_mode).coeffs[1];
+ HOST_WIDE_INT vnum = orig_offset.coeffs[1] / factor;
+ if (vec_flags & VEC_SVE_DATA)
+ /* LDR supports a 9-bit range, but the move patterns for
+ structure modes require all vectors to be in range of the
+ same base. The simplest way of accomodating that while still
+ promoting reuse of anchor points between different modes is
+ to use an 8-bit range unconditionally. */
+ vnum = ((vnum + 128) & 255) - 128;
+ else
+ /* Predicates are only handled singly, so we might as well use
+ the full range. */
+ vnum = ((vnum + 256) & 511) - 256;
+ if (vnum == 0)
+ return false;
+
+ /* Convert the "mul vl" multiplier into a byte offset. */
+ poly_int64 second_offset = GET_MODE_SIZE (step_mode) * vnum;
+ if (must_eq (second_offset, orig_offset))
+ return false;
+
+ /* Split the offset into second_offset and the rest. */
+ *offset1 = gen_int_mode (orig_offset - second_offset, Pmode);
+ *offset2 = gen_int_mode (second_offset, Pmode);
+ return true;
+ }
+}
+
/* Return the binary representation of floating point constant VALUE in INTVAL.
If the value cannot be converted, return false without setting INTVAL.
The conversion is done in the given MODE. */
@@ -6432,6 +6502,20 @@ aarch64_print_vector_float_operand (FILE *f, rtx x, bool negate)
return true;
}
+/* Return the equivalent letter for size. */
+static char
+sizetochar (int size)
+{
+ switch (size)
+ {
+ case 64: return 'd';
+ case 32: return 's';
+ case 16: return 'h';
+ case 8 : return 'b';
+ default: gcc_unreachable ();
+ }
+}
+
/* Print operand X to file F in a target specific manner according to CODE.
The acceptable formatting commands given by CODE are:
'c': An integer or symbol address without a preceding #
@@ -6715,7 +6799,18 @@ aarch64_print_operand (FILE *f, rtx x, int code)
{
case REG:
if (aarch64_sve_data_mode_p (GET_MODE (x)))
- asm_fprintf (f, "z%d", REGNO (x) - V0_REGNUM);
+ {
+ if (REG_NREGS (x) == 1)
+ asm_fprintf (f, "z%d", REGNO (x) - V0_REGNUM);
+ else
+ {
+ char suffix
+ = sizetochar (GET_MODE_UNIT_BITSIZE (GET_MODE (x)));
+ asm_fprintf (f, "{z%d.%c - z%d.%c}",
+ REGNO (x) - V0_REGNUM, suffix,
+ END_REGNO (x) - V0_REGNUM - 1, suffix);
+ }
+ }
else
asm_fprintf (f, "%s", reg_names [REGNO (x)]);
break;
@@ -7193,80 +7288,6 @@ aarch64_legitimize_address (rtx x, rtx /* orig_x */, machine_mode mode)
return x;
}
-/* Implement TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT. */
-
-static bool
-aarch64_legitimize_address_displacement (rtx *offset1, rtx *offset2,
- poly_int64 orig_offset,
- machine_mode mode)
-{
- HOST_WIDE_INT size;
- if (GET_MODE_SIZE (mode).is_constant (&size))
- {
- HOST_WIDE_INT const_offset, second_offset;
-
- /* Remove the polynomial part of the offset to get a purely
- constant one. */
- const_offset = orig_offset.coeffs[0] - orig_offset.coeffs[1];
-
- /* Split an out-of-range address displacement into a base and
- offset. Use 4KB range for 1- and 2-byte accesses and a 16KB
- range otherwise to increase opportunities for sharing the base
- address of different sizes. Unaligned accesses use the signed
- 9-bit range, TImode/TFmode use the intersection of signed
- scaled 7-bit and signed 9-bit offset. */
- if (mode == TImode || mode == TFmode)
- second_offset = ((const_offset + 0x100) & 0x1f8) - 0x100;
- else if ((const_offset & (size - 1)) != 0)
- second_offset = ((const_offset + 0x100) & 0x1ff) - 0x100;
- else
- second_offset = const_offset & (size < 4 ? 0xfff : 0x3ffc);
-
- if (second_offset == 0 || must_eq (orig_offset, second_offset))
- return false;
-
- /* Split the offset into second_offset and the rest. */
- *offset1 = gen_int_mode (orig_offset - second_offset, Pmode);
- *offset2 = gen_int_mode (second_offset, Pmode);
- return true;
- }
- else
- {
- /* Get the mode we should use as the basis of the range. For structure
- modes this is the mode of one vector. */
- unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- machine_mode step_mode
- = (vec_flags & VEC_STRUCT) != 0 ? SVE_BYTE_MODE : mode;
-
- /* Get the "mul vl" multiplier we'd like to use. */
- HOST_WIDE_INT factor = GET_MODE_SIZE (step_mode).coeffs[1];
- HOST_WIDE_INT vnum = orig_offset.coeffs[1] / factor;
- if (vec_flags & VEC_SVE_DATA)
- /* LDR supports a 9-bit range, but the move patterns for
- structure modes require all vectors to be in range of the
- same base. The simplest way of accomodating that while still
- promoting reuse of anchor points between different modes is
- to use an 8-bit range unconditionally. */
- vnum = ((vnum + 128) & 255) - 128;
- else
- /* Predicates are only handled singly, so we might as well use
- the full range. */
- vnum = ((vnum + 256) & 511) - 256;
- if (vnum == 0)
- return false;
-
- /* Convert the "mul vl" multiplier into a byte offset. */
- poly_int64 second_offset = GET_MODE_SIZE (step_mode) * vnum;
- if (must_eq (second_offset, orig_offset))
- return false;
-
- /* Split the offset into second_offset and the rest. */
- *offset1 = gen_int_mode (orig_offset - second_offset, Pmode);
- *offset2 = gen_int_mode (second_offset, Pmode);
- return true;
- }
-}
-
/* Return the reload icode required for a constant pool in mode. */
static enum insn_code
aarch64_constant_pool_reload_icode (machine_mode mode)
@@ -7365,37 +7386,14 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
}
static bool
-aarch64_can_eliminate (const int from, const int to)
+aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
{
- /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
- HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+ gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
+ /* If we need a frame pointer, ARG_POINTER_REGNUM and FRAME_POINTER_REGNUM
+ can only eliminate to HARD_FRAME_POINTER_REGNUM. */
if (frame_pointer_needed)
- {
- if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return true;
- if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
- return false;
- if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
- && !cfun->calls_alloca)
- return true;
- if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return true;
-
- return false;
- }
- else
- {
- /* If we decided that we didn't need a leaf frame pointer but then used
- LR in the function, then we'll want a frame pointer after all, so
- prevent this elimination to ensure a frame pointer is used. */
- if (to == STACK_POINTER_REGNUM
- && flag_omit_frame_pointer == 2
- && flag_omit_leaf_frame_pointer
- && df_regs_ever_live_p (LR_REGNUM))
- return false;
- }
-
+ return to == HARD_FRAME_POINTER_REGNUM;
return true;
}
@@ -12750,19 +12748,19 @@ aarch64_simd_container_mode (scalar_mode mode, poly_int64 width)
switch (mode)
{
case E_DFmode:
- return V4DFmode;
+ return VNx2DFmode;
case E_SFmode:
- return V8SFmode;
+ return VNx4SFmode;
case E_HFmode:
- return V16HFmode;
+ return VNx8HFmode;
case E_DImode:
- return V4DImode;
+ return VNx2DImode;
case E_SImode:
- return V8SImode;
+ return VNx4SImode;
case E_HImode:
- return V16HImode;
+ return VNx8HImode;
case E_QImode:
- return V32QImode;
+ return VNx16QImode;
default:
return word_mode;
}
@@ -12972,20 +12970,6 @@ aarch64_final_prescan_insn (rtx_insn *insn)
}
-/* Return the equivalent letter for size. */
-static char
-sizetochar (int size)
-{
- switch (size)
- {
- case 64: return 'd';
- case 32: return 's';
- case 16: return 'h';
- case 8 : return 'b';
- default: gcc_unreachable ();
- }
-}
-
/* Return true if BASE_OR_STEP is a valid immediate operand for an SVE INDEX
instruction. */
@@ -13608,15 +13592,13 @@ aarch64_sve_struct_memory_operand_p (rtx op)
machine_mode mode = GET_MODE (op);
struct aarch64_address_info addr;
- if (!aarch64_classify_address (&addr, XEXP (op, 0), mode, false,
+ if (!aarch64_classify_address (&addr, XEXP (op, 0), SVE_BYTE_MODE, false,
ADDR_QUERY_ANY)
|| addr.type != ADDRESS_REG_IMM)
return false;
poly_int64 first = addr.const_offset;
- poly_int64 last = (first
- + GET_MODE_SIZE (mode)
- - GET_MODE_SIZE (SVE_BYTE_MODE));
+ poly_int64 last = first + GET_MODE_SIZE (mode) - BYTES_PER_SVE_VECTOR;
return (offset_4bit_signed_scaled_p (SVE_BYTE_MODE, first)
&& offset_4bit_signed_scaled_p (SVE_BYTE_MODE, last));
}
@@ -15388,10 +15370,11 @@ aarch64_evpc_ext (struct expand_vec_perm_d *d)
return true;
}
-/* Recognize patterns for the REV insns. */
+/* Recognize patterns for the REV{64,32,16} insns, which reverse elements
+ within each 64-bit, 32-bit or 16-bit granule. */
static bool
-aarch64_evpc_rev (struct expand_vec_perm_d *d)
+aarch64_evpc_rev_local (struct expand_vec_perm_d *d)
{
unsigned int i, j, diff, size, unspec, nelt = d->perm.length ();
machine_mode pred_mode;
@@ -15404,17 +15387,17 @@ aarch64_evpc_rev (struct expand_vec_perm_d *d)
if (size == 8)
{
unspec = UNSPEC_REV64;
- pred_mode = V4BImode;
+ pred_mode = VNx2BImode;
}
else if (size == 4)
{
unspec = UNSPEC_REV32;
- pred_mode = V8BImode;
+ pred_mode = VNx4BImode;
}
else if (size == 2)
{
unspec = UNSPEC_REV16;
- pred_mode = V16BImode;
+ pred_mode = VNx8BImode;
}
else
return false;
@@ -15447,6 +15430,30 @@ aarch64_evpc_rev (struct expand_vec_perm_d *d)
return true;
}
+/* Recognize patterns for the REV insn, which reverses elements within
+ a full vector. */
+
+static bool
+aarch64_evpc_rev_global (struct expand_vec_perm_d *d)
+{
+ unsigned int i, nelt = d->perm.length ();
+
+ if (!d->one_vector_p || d->vec_flags != VEC_SVE_DATA)
+ return false;
+
+ for (i = 0; i < nelt; ++i)
+ if (d->perm[i] != nelt - i - 1)
+ return false;
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ rtx src = gen_rtx_UNSPEC (d->vmode, gen_rtvec (1, d->op0), UNSPEC_REV);
+ emit_set_insn (d->target, src);
+ return true;
+}
+
static bool
aarch64_evpc_dup (struct expand_vec_perm_d *d)
{
@@ -15551,7 +15558,9 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
if ((d->vec_flags == VEC_ADVSIMD || d->vec_flags == VEC_SVE_DATA)
&& nelt > 1)
{
- if (aarch64_evpc_rev (d))
+ if (aarch64_evpc_rev_local (d))
+ return true;
+ else if (aarch64_evpc_rev_global (d))
return true;
else if (aarch64_evpc_ext (d))
return true;
@@ -16214,16 +16223,6 @@ aarch64_asan_shadow_offset (void)
return (HOST_WIDE_INT_1 << 36);
}
-/* Implement the TARGET_GATHER_SCATTER_SUPPORTS_SCALE_P hook */
-
-static bool
-aarch64_gather_scatter_supports_scale_p (bool gather_p ATTRIBUTE_UNUSED,
- unsigned int offset_bitsize,
- unsigned int scale)
-{
- return offset_bitsize == scale * 8 || scale == 1;
-}
-
static bool
aarch64_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
unsigned int align,
@@ -17429,6 +17428,22 @@ aarch64_can_change_mode_class (machine_mode from,
return true;
}
+/* Implement TARGET_EARLY_REMAT_MODES. */
+
+static void
+aarch64_select_early_remat_modes (sbitmap modes)
+{
+ /* SVE values are not normally live across a call, so it should be
+ worth doing early rematerialization even in VL-specific mode. */
+ for (int i = 0; i < NUM_MACHINE_MODES; ++i)
+ {
+ machine_mode mode = (machine_mode) i;
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ if (vec_flags & VEC_ANY_SVE)
+ bitmap_set_bit (modes, i);
+ }
+}
+
/* Target-specific selftests. */
#if CHECKING_P
@@ -17591,9 +17606,6 @@ aarch64_run_selftests (void)
#undef TARGET_FUNCTION_VALUE_REGNO_P
#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
-#undef TARGET_FRAME_POINTER_REQUIRED
-#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
-
#undef TARGET_GIMPLE_FOLD_BUILTIN
#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
@@ -17833,10 +17845,6 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_LEGITIMIZE_ADDRESS
#define TARGET_LEGITIMIZE_ADDRESS aarch64_legitimize_address
-#undef TARGET_GATHER_SCATTER_SUPPORTS_SCALE_P
-#define TARGET_GATHER_SCATTER_SUPPORTS_SCALE_P \
- aarch64_gather_scatter_supports_scale_p
-
#undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
#define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
aarch64_use_by_pieces_infrastructure_p
@@ -17910,6 +17918,9 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_CAN_CHANGE_MODE_CLASS
#define TARGET_CAN_CHANGE_MODE_CLASS aarch64_can_change_mode_class
+#undef TARGET_SELECT_EARLY_REMAT_MODES
+#define TARGET_SELECT_EARLY_REMAT_MODES aarch64_select_early_remat_modes
+
#if CHECKING_P
#undef TARGET_RUN_TARGET_SELFTESTS
#define TARGET_RUN_TARGET_SELFTESTS selftest::aarch64_run_selftests
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 5816bc6c1e4..bc87f6e601b 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -1009,7 +1009,7 @@ extern poly_uint16 aarch64_sve_vg;
#define BYTES_PER_SVE_PRED aarch64_sve_vg
/* The SVE mode for a vector of bytes. */
-#define SVE_BYTE_MODE V32QImode
+#define SVE_BYTE_MODE VNx16QImode
/* The maximum number of bytes in a fixed-size vector. This is 256 bytes
(for -msve-vector-bits=2048) multiplied by the maximum number of
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 6a15ff0b61d..898cc7bbeb8 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -150,14 +150,10 @@
UNSPEC_RSQRTS
UNSPEC_NZCV
UNSPEC_XPACLRI
- UNSPEC_GATHER_LOADS
- UNSPEC_GATHER_LOADU
- UNSPEC_PRED_GATHER_LOADS
- UNSPEC_PRED_GATHER_LOADU
- UNSPEC_SCATTER_STORES
- UNSPEC_SCATTER_STOREU
UNSPEC_LD1_SVE
UNSPEC_ST1_SVE
+ UNSPEC_LD1_GATHER
+ UNSPEC_ST1_SCATTER
UNSPEC_MERGE_PTRUE
UNSPEC_PTEST_PTRUE
UNSPEC_UNPACKSHI
@@ -167,12 +163,16 @@
UNSPEC_PACK
UNSPEC_FLOAT_CONVERT
UNSPEC_WHILE_LO
+ UNSPEC_LDN
+ UNSPEC_STN
+ UNSPEC_INSR
UNSPEC_CLASTB
+ UNSPEC_FADDA
+ UNSPEC_CNTP
+ UNSPEC_BRKA
UNSPEC_LDFF1
UNSPEC_READ_NF
UNSPEC_WRITE_NF
- UNSPEC_CNTP
- UNSPEC_INSR
])
(define_c_enum "unspecv" [
@@ -3519,8 +3519,8 @@
;; A = UQDEC[BHWD] (B, X)
;;
;; We don't use match_operand predicates because the order of the operands
-;; can vary: CNT[BHWD] will come first if the other operand is a simpler
-;; constant (such as a CONST_INT), otherwise it will come second.
+;; can vary: the CNT[BHWD] constant will come first if the other operand is
+;; a simpler constant (such as a CONST_INT), otherwise it will come second.
(define_expand "umax<mode>3"
[(set (match_operand:GPI 0 "register_operand")
(umax:GPI (match_operand:GPI 1 "")
@@ -3539,6 +3539,7 @@
}
)
+;; Saturating unsigned subtraction of a CNT[BHWD] immediate.
(define_insn "aarch64_uqdec<mode>"
[(set (match_operand:GPI 0 "register_operand" "=r")
(minus:GPI
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 2a8722c4c86..8b46f719f3f 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -190,6 +190,15 @@
(match_test "aarch64_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
true, ADDR_QUERY_LDP_STP)")))
+;; Used for storing two 64-bit values in an AdvSIMD register using an STP
+;; as a 128-bit vec_concat.
+(define_memory_constraint "Uml"
+ "@internal
+ A memory address suitable for a load/store pair operation."
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (DFmode, XEXP (op, 0), 1,
+ ADDR_QUERY_LDP_STP)")))
+
(define_memory_constraint "Utf"
"@internal
An address valid for SVE LDFF1s."
@@ -210,7 +219,14 @@
(and (match_code "mem")
(match_test "aarch64_simd_mem_operand_p (op)")))
-(define_memory_constraint "Utw"
+(define_memory_constraint "Utq"
+ "@internal
+ An address valid for loading or storing a 128-bit AdvSIMD register"
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (V2DImode,
+ XEXP (op, 0), 1)")))
+
+(define_memory_constraint "Uty"
"@internal
An address valid for SVE LD1Rs."
(and (match_code "mem")
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 2a4e26fb940..7eea775ce54 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -247,45 +247,55 @@
(define_mode_iterator VMUL_CHANGE_NLANES [V4HI V8HI V2SI V4SI V2SF V4SF])
;; All SVE vector modes.
-(define_mode_iterator SVE_ALL [V32QI V16HI V8SI V4DI V16HF V8SF V4DF])
+(define_mode_iterator SVE_ALL [VNx16QI VNx8HI VNx4SI VNx2DI
+ VNx8HF VNx4SF VNx2DF])
;; All SVE vector structure modes.
-(define_mode_iterator SVE_STRUCT [V64QI V32HI V16SI V8DI V32HF V16SF V8DF
- V96QI V48HI V24SI V12DI V48HF V24SF V12DF
- V128QI V64HI V32SI V16DI V64HF V32SF V16DF])
+(define_mode_iterator SVE_STRUCT [VNx32QI VNx16HI VNx8SI VNx4DI
+ VNx16HF VNx8SF VNx4DF
+ VNx48QI VNx24HI VNx12SI VNx6DI
+ VNx24HF VNx12SF VNx6DF
+ VNx64QI VNx32HI VNx16SI VNx8DI
+ VNx32HF VNx16SF VNx8DF])
;; All SVE vector modes that have 8-bit or 16-bit elements.
-(define_mode_iterator SVE_BH [V32QI V16HI V16HF])
+(define_mode_iterator SVE_BH [VNx16QI VNx8HI VNx8HF])
;; All SVE vector modes that have 8-bit, 16-bit or 32-bit elements.
-(define_mode_iterator SVE_BHS [V32QI V16HI V8SI V16HF V8SF])
+(define_mode_iterator SVE_BHS [VNx16QI VNx8HI VNx4SI VNx8HF VNx4SF])
;; All SVE integer vector modes that have 8-bit, 16-bit or 32-bit elements.
-(define_mode_iterator SVE_BHSI [V32QI V16HI V8SI])
+(define_mode_iterator SVE_BHSI [VNx16QI VNx8HI VNx4SI])
;; All SVE integer vector modes that have 16-bit, 32-bit or 64-bit elements.
-(define_mode_iterator SVE_HSDI [V32QI V16HI V8SI])
+(define_mode_iterator SVE_HSDI [VNx16QI VNx8HI VNx4SI])
;; All SVE floating-point vector modes that have 16-bit or 32-bit elements.
-(define_mode_iterator SVE_HSF [V16HF V8SF])
+(define_mode_iterator SVE_HSF [VNx8HF VNx4SF])
;; All SVE vector modes that have 32-bit or 64-bit elements.
-(define_mode_iterator SVE_SD [V8SI V4DI V8SF V4DF])
+(define_mode_iterator SVE_SD [VNx4SI VNx2DI VNx4SF VNx2DF])
+
+;; All SVE vector modes that have 32-bit elements.
+(define_mode_iterator SVE_S [VNx4SI VNx4SF])
+
+;; All SVE vector modes that have 64-bit elements.
+(define_mode_iterator SVE_D [VNx2DI VNx2DF])
;; All SVE integer vector modes that have 32-bit or 64-bit elements.
-(define_mode_iterator SVE_SDI [V8SI V4DI])
+(define_mode_iterator SVE_SDI [VNx4SI VNx2DI])
;; All SVE integer vector modes.
-(define_mode_iterator SVE_I [V32QI V16HI V8SI V4DI])
+(define_mode_iterator SVE_I [VNx16QI VNx8HI VNx4SI VNx2DI])
;; All SVE floating-point vector modes.
-(define_mode_iterator SVE_F [V16HF V8SF V4DF])
+(define_mode_iterator SVE_F [VNx8HF VNx4SF VNx2DF])
;; All SVE predicate modes.
-(define_mode_iterator PRED_ALL [V32BI V16BI V8BI V4BI])
+(define_mode_iterator PRED_ALL [VNx16BI VNx8BI VNx4BI VNx2BI])
;; SVE predicate modes that control 8-bit, 16-bit or 32-bit elements.
-(define_mode_iterator PRED_BHS [V32BI V16BI V8BI])
+(define_mode_iterator PRED_BHS [VNx16BI VNx8BI VNx4BI])
;; ------------------------------------------------------------------
;; Unspec enumerations for Advance SIMD. These could well go into
@@ -411,7 +421,6 @@
UNSPEC_ANDF ; Used in aarch64-sve.md.
UNSPEC_IORF ; Used in aarch64-sve.md.
UNSPEC_XORF ; Used in aarch64-sve.md.
- UNSPEC_FADDA ; Used in aarch64-sve.md.
UNSPEC_COND_ADD ; Used in aarch64-sve.md.
UNSPEC_COND_SUB ; Used in aarch64-sve.md.
UNSPEC_COND_MUL ; Used in aarch64-sve.md.
@@ -438,7 +447,6 @@
UNSPEC_COND_HI ; Used in aarch64-sve.md.
UNSPEC_COND_UO ; Used in aarch64-sve.md.
UNSPEC_LASTB ; Used in aarch64-sve.md.
- UNSPEC_BRKA ; Used in aarch64-sve.md.
])
;; ------------------------------------------------------------------
@@ -596,23 +604,30 @@
(HI "")])
;; Mode-to-individual element type mapping.
-(define_mode_attr Vetype [(V8QI "b") (V16QI "b") (V32QI "b") (V32BI "b")
- (V4HI "h") (V8HI "h") (V16HI "h") (V16BI "h")
- (V2SI "s") (V4SI "s") (V8SI "s") (V8BI "s")
- (V2DI "d") (V4DI "d") (V4BI "d")
- (V4HF "h") (V8HF "h") (V16HF "h")
- (V2SF "s") (V4SF "s") (V8SF "s")
- (V2DF "d") (V4DF "d")
+(define_mode_attr Vetype [(V8QI "b") (V16QI "b") (VNx16QI "b") (VNx16BI "b")
+ (V4HI "h") (V8HI "h") (VNx8HI "h") (VNx8BI "h")
+ (V2SI "s") (V4SI "s") (VNx4SI "s") (VNx4BI "s")
+ (V2DI "d") (VNx2DI "d") (VNx2BI "d")
+ (V4HF "h") (V8HF "h") (VNx8HF "h")
+ (V2SF "s") (V4SF "s") (VNx4SF "s")
+ (V2DF "d") (VNx2DF "d")
(HF "h")
(SF "s") (DF "d")
(QI "b") (HI "h")
(SI "s") (DI "d")])
;; Equivalent of "size" for a vector element.
-(define_mode_attr Vesize [(V32QI "b")
- (V16HI "h") (V16HF "h")
- (V8SI "w") (V8SF "w")
- (V4DI "d") (V4DF "d")])
+(define_mode_attr Vesize [(VNx16QI "b")
+ (VNx8HI "h") (VNx8HF "h")
+ (VNx4SI "w") (VNx4SF "w")
+ (VNx2DI "d") (VNx2DF "d")
+ (VNx32QI "b") (VNx48QI "b") (VNx64QI "b")
+ (VNx16HI "h") (VNx24HI "h") (VNx32HI "h")
+ (VNx16HF "h") (VNx24HF "h") (VNx32HF "h")
+ (VNx8SI "w") (VNx12SI "w") (VNx16SI "w")
+ (VNx8SF "w") (VNx12SF "w") (VNx16SF "w")
+ (VNx4DI "d") (VNx6DI "d") (VNx8DI "d")
+ (VNx4DF "d") (VNx6DF "d") (VNx8DF "d")])
;; Vetype is used everywhere in scheduling type and assembly output,
;; sometimes they are not the same, for example HF modes on some
@@ -635,44 +650,44 @@
(SI "8b")])
;; Define element mode for each vector mode.
-(define_mode_attr VEL [(V8QI "QI") (V16QI "QI") (V32QI "QI")
- (V4HI "HI") (V8HI "HI") (V16HI "HI")
- (V2SI "SI") (V4SI "SI") (V8SI "SI")
- (DI "DI") (V2DI "DI") (V4DI "DI")
- (V4HF "HF") (V8HF "HF") (V16HF "HF")
- (V2SF "SF") (V4SF "SF") (V8SF "SF")
- (DF "DF") (V2DF "DF") (V4DF "DF")
+(define_mode_attr VEL [(V8QI "QI") (V16QI "QI") (VNx16QI "QI")
+ (V4HI "HI") (V8HI "HI") (VNx8HI "HI")
+ (V2SI "SI") (V4SI "SI") (VNx4SI "SI")
+ (DI "DI") (V2DI "DI") (VNx2DI "DI")
+ (V4HF "HF") (V8HF "HF") (VNx8HF "HF")
+ (V2SF "SF") (V4SF "SF") (VNx4SF "SF")
+ (DF "DF") (V2DF "DF") (VNx2DF "DF")
(SI "SI") (HI "HI")
(QI "QI")])
;; Define element mode for each vector mode (lower case).
-(define_mode_attr Vel [(V8QI "qi") (V16QI "qi") (V32QI "qi")
- (V4HI "hi") (V8HI "hi") (V16HI "hi")
- (V2SI "si") (V4SI "si") (V8SI "si")
- (DI "di") (V2DI "di") (V4DI "di")
- (V4HF "hf") (V8HF "hf") (V16HF "hf")
- (V2SF "sf") (V4SF "sf") (V8SF "sf")
- (V2DF "df") (DF "df") (V4DF "df")
+(define_mode_attr Vel [(V8QI "qi") (V16QI "qi") (VNx16QI "qi")
+ (V4HI "hi") (V8HI "hi") (VNx8HI "hi")
+ (V2SI "si") (V4SI "si") (VNx4SI "si")
+ (DI "di") (V2DI "di") (VNx2DI "di")
+ (V4HF "hf") (V8HF "hf") (VNx8HF "hf")
+ (V2SF "sf") (V4SF "sf") (VNx4SF "sf")
+ (V2DF "df") (DF "df") (VNx2DF "df")
(SI "si") (HI "hi")
(QI "qi")])
;; Element mode with floating-point values replaced by like-sized integers.
-(define_mode_attr VEL_INT [(V32QI "QI")
- (V16HI "HI") (V16HF "HI")
- (V8SI "SI") (V8SF "SI")
- (V4DI "DI") (V4DF "DI")])
+(define_mode_attr VEL_INT [(VNx16QI "QI")
+ (VNx8HI "HI") (VNx8HF "HI")
+ (VNx4SI "SI") (VNx4SF "SI")
+ (VNx2DI "DI") (VNx2DF "DI")])
;; Gives the mode of the 128-bit lowpart of an SVE vector.
-(define_mode_attr V128 [(V32QI "V16QI")
- (V16HI "V8HI") (V16HF "V8HF")
- (V8SI "V4SI") (V8SF "V4SF")
- (V4DI "V2DI") (V4DF "V2DF")])
+(define_mode_attr V128 [(VNx16QI "V16QI")
+ (VNx8HI "V8HI") (VNx8HF "V8HF")
+ (VNx4SI "V4SI") (VNx4SF "V4SF")
+ (VNx2DI "V2DI") (VNx2DF "V2DF")])
;; ...and again in lower case.
-(define_mode_attr v128 [(V32QI "v16qi")
- (V16HI "v8hi") (V16HF "v8hf")
- (V8SI "v4si") (V8SF "v4sf")
- (V4DI "v2di") (V4DF "v2df")])
+(define_mode_attr v128 [(VNx16QI "v16qi")
+ (VNx8HI "v8hi") (VNx8HF "v8hf")
+ (VNx4SI "v4si") (VNx4SF "v4sf")
+ (VNx2DI "v2di") (VNx2DF "v2df")])
;; 64-bit container modes the inner or scalar source mode.
(define_mode_attr VCOND [(HI "V4HI") (SI "V2SI")
@@ -758,22 +773,22 @@
(HI "SI") (SI "DI")
(V8HF "V4SF") (V4SF "V2DF")
(V4HF "V4SF") (V2SF "V2DF")
- (V16HF "V8SF") (V8SF "V4DF")
- (V32QI "V16HI") (V16HI "V8SI")
- (V8SI "V4DI")
- (V32BI "V16BI") (V16BI "V8BI")
- (V8BI "V4BI")])
+ (VNx8HF "VNx4SF") (VNx4SF "VNx2DF")
+ (VNx16QI "VNx8HI") (VNx8HI "VNx4SI")
+ (VNx4SI "VNx2DI")
+ (VNx16BI "VNx8BI") (VNx8BI "VNx4BI")
+ (VNx4BI "VNx2BI")])
;; Predicate mode associated with VWIDE.
-(define_mode_attr VWIDE_PRED [(V16HF "V8BI") (V8SF "V4BI")])
+(define_mode_attr VWIDE_PRED [(VNx8HF "VNx4BI") (VNx4SF "VNx2BI")])
;; Widened modes of vector modes, lowercase
(define_mode_attr Vwide [(V2SF "v2df") (V4HF "v4sf")
- (V32QI "v16hi") (V16HI "v8si")
- (V8SI "v4di")
- (V16HF "v8sf") (V8SF "v4df")
- (V32BI "v16bi") (V16BI "v8bi")
- (V8BI "v4bi")])
+ (VNx16QI "vnx8hi") (VNx8HI "vnx4si")
+ (VNx4SI "vnx2di")
+ (VNx8HF "vnx4sf") (VNx4SF "vnx2df")
+ (VNx16BI "vnx8bi") (VNx8BI "vnx4bi")
+ (VNx4BI "vnx2bi")])
;; Widened mode register suffixes for VD_BHSI/VQW/VQ_HSF.
(define_mode_attr Vwtype [(V8QI "8h") (V4HI "4s")
@@ -782,9 +797,9 @@
(V8HF "4s") (V4SF "2d")])
;; SVE vector after widening
-(define_mode_attr Vewtype [(V32QI "h")
- (V16HI "s") (V16HF "s")
- (V8SI "d") (V8SF "d")])
+(define_mode_attr Vewtype [(VNx16QI "h")
+ (VNx8HI "s") (VNx8HF "s")
+ (VNx4SI "d") (VNx4SF "d")])
;; Widened mode register suffixes for VDW/VQW.
(define_mode_attr Vmwtype [(V8QI ".8h") (V4HI ".4s")
@@ -799,52 +814,62 @@
(V4SF "2s")])
;; Define corresponding core/FP element mode for each vector mode.
-(define_mode_attr vw [(V8QI "w") (V16QI "w") (V32QI "w")
- (V4HI "w") (V8HI "w") (V16HI "w")
- (V2SI "w") (V4SI "w") (V8SI "w")
- (DI "x") (V2DI "x") (V4DI "x")
- (V16HF "h")
- (V2SF "s") (V4SF "s") (V8SF "s")
- (V2DF "d") (V4DF "d")])
+(define_mode_attr vw [(V8QI "w") (V16QI "w") (VNx16QI "w")
+ (V4HI "w") (V8HI "w") (VNx8HI "w")
+ (V2SI "w") (V4SI "w") (VNx4SI "w")
+ (DI "x") (V2DI "x") (VNx2DI "x")
+ (VNx8HF "h")
+ (V2SF "s") (V4SF "s") (VNx4SF "s")
+ (V2DF "d") (VNx2DF "d")])
;; Corresponding core element mode for each vector mode. This is a
;; variation on <vw> mapping FP modes to GP regs.
-(define_mode_attr vwcore [(V8QI "w") (V16QI "w") (V32QI "w")
- (V4HI "w") (V8HI "w") (V16HI "w")
- (V2SI "w") (V4SI "w") (V8SI "w")
- (DI "x") (V2DI "x") (V4DI "x")
- (V4HF "w") (V8HF "w") (V16HF "w")
- (V2SF "w") (V4SF "w") (V8SF "w")
- (V2DF "x") (V4DF "x")])
+(define_mode_attr vwcore [(V8QI "w") (V16QI "w") (VNx16QI "w")
+ (V4HI "w") (V8HI "w") (VNx8HI "w")
+ (V2SI "w") (V4SI "w") (VNx4SI "w")
+ (DI "x") (V2DI "x") (VNx2DI "x")
+ (V4HF "w") (V8HF "w") (VNx8HF "w")
+ (V2SF "w") (V4SF "w") (VNx4SF "w")
+ (V2DF "x") (VNx2DF "x")])
;; Double vector types for ALLX.
(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
;; Mode with floating-point values replaced by like-sized integers.
-(define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI") (V32QI "V32QI")
- (V4HI "V4HI") (V8HI "V8HI") (V16HI "V16HI")
- (V2SI "V2SI") (V4SI "V4SI") (V8SI "V8SI")
- (DI "DI") (V2DI "V2DI") (V4DI "V4DI")
- (V4HF "V4HI") (V8HF "V8HI") (V16HF "V16HI")
- (V2SF "V2SI") (V4SF "V4SI") (V8SF "V8SI")
- (DF "DI") (V2DF "V2DI") (V4DF "V4DI")
- (SF "SI") (HF "HI")])
+(define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI")
+ (V4HI "V4HI") (V8HI "V8HI")
+ (V2SI "V2SI") (V4SI "V4SI")
+ (DI "DI") (V2DI "V2DI")
+ (V4HF "V4HI") (V8HF "V8HI")
+ (V2SF "V2SI") (V4SF "V4SI")
+ (DF "DI") (V2DF "V2DI")
+ (SF "SI") (HF "HI")
+ (VNx16QI "VNx16QI")
+ (VNx8HI "VNx8HI") (VNx8HF "VNx8HI")
+ (VNx4SI "VNx4SI") (VNx4SF "VNx4SI")
+ (VNx2DI "VNx2DI") (VNx2DF "VNx2DI")
+])
;; Lower case mode with floating-point values replaced by like-sized integers.
-(define_mode_attr v_int_equiv [(V8QI "v8qi") (V16QI "v16qi") (V32QI "v32qi")
- (V4HI "v4hi") (V8HI "v8hi") (V16HI "v16hi")
- (V2SI "v2si") (V4SI "v4si") (V8SI "v8si")
- (DI "di") (V2DI "v2di") (V4DI "v4di")
- (V4HF "v4hi") (V8HF "v8hi") (V16HF "v16hi")
- (V2SF "v2si") (V4SF "v4si") (V8SF "v8si")
- (DF "di") (V2DF "v2di") (V4DF "v4di")
- (SF "si")])
+(define_mode_attr v_int_equiv [(V8QI "v8qi") (V16QI "v16qi")
+ (V4HI "v4hi") (V8HI "v8hi")
+ (V2SI "v2si") (V4SI "v4si")
+ (DI "di") (V2DI "v2di")
+ (V4HF "v4hi") (V8HF "v8hi")
+ (V2SF "v2si") (V4SF "v4si")
+ (DF "di") (V2DF "v2di")
+ (SF "si")
+ (VNx16QI "vnx16qi")
+ (VNx8HI "vnx8hi") (VNx8HF "vnx8hi")
+ (VNx4SI "vnx4si") (VNx4SF "vnx4si")
+ (VNx2DI "vnx2di") (VNx2DF "vnx2di")
+])
;; Floating-point equivalent of selected modes.
-(define_mode_attr V_FP_EQUIV [(V8SI "V8SF") (V8SF "V8SF")
- (V4DI "V4DF") (V4DF "V4DF")])
-(define_mode_attr v_fp_equiv [(V8SI "v8sf") (V8SF "v8sf")
- (V4DI "v4df") (V4DF "v4df")])
+(define_mode_attr V_FP_EQUIV [(VNx4SI "VNx4SF") (VNx4SF "VNx4SF")
+ (VNx2DI "VNx2DF") (VNx2DF "VNx2DF")])
+(define_mode_attr v_fp_equiv [(VNx4SI "vnx4sf") (VNx4SF "vnx4sf")
+ (VNx2DI "vnx2df") (VNx2DF "vnx2df")])
;; Mode for vector conditional operations where the comparison has
;; different type from the lhs.
@@ -875,43 +900,6 @@
;; ld..._lane and st..._lane operations.
(define_mode_attr nregs [(OI "2") (CI "3") (XI "4")])
-;; Map the mode of a single vector to a list of two vectors.
-(define_mode_attr VRL2 [(V32QI "V64QI") (V16HI "V32HI") (V16HF "V32HF")
- (V8SI "V16SI") (V8SF "V16SF")
- (V4DI "V8DI") (V4DF "V8DF")])
-
-(define_mode_attr vrl2 [(V32QI "v64qi") (V16HI "v32hi") (V16HF "v32hf")
- (V8SI "v16si") (V8SF "v16sf")
- (V4DI "v8di") (V4DF "v8df")])
-
-;; Map the mode of a single vector to a list of three vectors.
-(define_mode_attr VRL3 [(V32QI "V96QI") (V16HI "V48HI") (V16HF "V48HF")
- (V8SI "V24SI") (V8SF "V24SF")
- (V4DI "V12DI") (V4DF "V12DF")])
-
-(define_mode_attr vrl3 [(V32QI "v96qi") (V16HI "v48hi") (V16HF "v48hf")
- (V8SI "v24si") (V8SF "v24sf")
- (V4DI "v12di") (V4DF "v12df")])
-
-;; Map the mode of a single vector to a list of four vectors.
-(define_mode_attr VRL4 [(V32QI "V128QI") (V16HI "V64HI") (V16HF "V64HF")
- (V8SI "V32SI") (V8SF "V32SF")
- (V4DI "V16DI") (V4DF "V16DF")])
-
-(define_mode_attr vrl4 [(V32QI "v128qi") (V16HI "v64hi") (V16HF "v64hf")
- (V8SI "v32si") (V8SF "v32sf")
- (V4DI "v16di") (V4DF "v16df")])
-
-;; SVE gather
-(define_mode_attr gather_unscaled_mods [(V8SI ", sxtw") (V8SF ", sxtw")
- (V4DI "") (V4DF "")])
-(define_mode_attr gather_scaled_mods [(V8SI ", sxtw 2") (V8SF ", sxtw 2")
- (V4DI ", lsl 3") (V4DF ", lsl 3")])
-(define_mode_attr gather_unscaled_modu [(V8SI ", uxtw") (V8SF ", uxtw")
- (V4DI "") (V4DF "")])
-(define_mode_attr gather_scaled_modu [(V8SI ", uxtw 2") (V8SF ", uxtw 2")
- (V4DI ", lsl 3") (V4DF ", lsl 3")])
-
;; Mode for atomic operation suffixes
(define_mode_attr atomic_sfx
[(QI "b") (HI "h") (SI "") (DI "")])
@@ -1008,86 +996,92 @@
(define_mode_attr got_modifier [(SI "gotpage_lo14") (DI "gotpage_lo15")])
;; The number of subvectors in an SVE_STRUCT.
-(define_mode_attr vector_count [(V64QI "2") (V32HI "2")
- (V16SI "2") (V8DI "2")
- (V32HF "2") (V16SF "2") (V8DF "2")
- (V96QI "3") (V48HI "3")
- (V24SI "3") (V12DI "3")
- (V48HF "3") (V24SF "3") (V12DF "3")
- (V128QI "4") (V64HI "4")
- (V32SI "4") (V16DI "4")
- (V64HF "4") (V32SF "4") (V16DF "4")])
+(define_mode_attr vector_count [(VNx32QI "2") (VNx16HI "2")
+ (VNx8SI "2") (VNx4DI "2")
+ (VNx16HF "2") (VNx8SF "2") (VNx4DF "2")
+ (VNx48QI "3") (VNx24HI "3")
+ (VNx12SI "3") (VNx6DI "3")
+ (VNx24HF "3") (VNx12SF "3") (VNx6DF "3")
+ (VNx64QI "4") (VNx32HI "4")
+ (VNx16SI "4") (VNx8DI "4")
+ (VNx32HF "4") (VNx16SF "4") (VNx8DF "4")])
;; The number of instruction bytes needed for an SVE_STRUCT move. This is
;; equal to vector_count * 4.
-(define_mode_attr insn_length [(V64QI "8") (V32HI "8")
- (V16SI "8") (V8DI "8")
- (V32HF "8") (V16SF "8") (V8DF "8")
- (V96QI "12") (V48HI "12")
- (V24SI "12") (V12DI "12")
- (V48HF "12") (V24SF "12") (V12DF "12")
- (V128QI "16") (V64HI "16")
- (V32SI "16") (V16DI "16")
- (V64HF "16") (V32SF "16") (V16DF "16")])
+(define_mode_attr insn_length [(VNx32QI "8") (VNx16HI "8")
+ (VNx8SI "8") (VNx4DI "8")
+ (VNx16HF "8") (VNx8SF "8") (VNx4DF "8")
+ (VNx48QI "12") (VNx24HI "12")
+ (VNx12SI "12") (VNx6DI "12")
+ (VNx24HF "12") (VNx12SF "12") (VNx6DF "12")
+ (VNx64QI "16") (VNx32HI "16")
+ (VNx16SI "16") (VNx8DI "16")
+ (VNx32HF "16") (VNx16SF "16") (VNx8DF "16")])
;; The type of a subvector in an SVE_STRUCT.
-(define_mode_attr VSINGLE [(V64QI "V32QI") (V32HI "V16HI")
- (V16SI "V8SI") (V8DI "V4DI")
- (V32HF "V16HF") (V16SF "V8SF") (V8DF "V4DF")
- (V96QI "V32QI") (V48HI "V16HI")
- (V24SI "V8SI") (V12DI "V4DI")
- (V48HF "V16HF") (V24SF "V8SF") (V12DF "V4DF")
- (V128QI "V32QI") (V64HI "V16HI")
- (V32SI "V8SI") (V16DI "V4DI")
- (V64HF "V16HF") (V32SF "V8SF") (V16DF "V4DF")])
+(define_mode_attr VSINGLE [(VNx32QI "VNx16QI")
+ (VNx16HI "VNx8HI") (VNx16HF "VNx8HF")
+ (VNx8SI "VNx4SI") (VNx8SF "VNx4SF")
+ (VNx4DI "VNx2DI") (VNx4DF "VNx2DF")
+ (VNx48QI "VNx16QI")
+ (VNx24HI "VNx8HI") (VNx24HF "VNx8HF")
+ (VNx12SI "VNx4SI") (VNx12SF "VNx4SF")
+ (VNx6DI "VNx2DI") (VNx6DF "VNx2DF")
+ (VNx64QI "VNx16QI")
+ (VNx32HI "VNx8HI") (VNx32HF "VNx8HF")
+ (VNx16SI "VNx4SI") (VNx16SF "VNx4SF")
+ (VNx8DI "VNx2DI") (VNx8DF "VNx2DF")])
;; ...and again in lower case.
-(define_mode_attr vsingle [(V64QI "v32qi") (V32HI "v16hi")
- (V16SI "v8si") (V8DI "v4di")
- (V32HF "v16hf") (V16SF "v8sf") (V8DF "v4df")
- (V96QI "v32qi") (V48HI "v16hi")
- (V24SI "v8si") (V12DI "v4di")
- (V48HF "v16hf") (V24SF "v8sf") (V12DF "v4df")
- (V128QI "v32qi") (V64HI "v16hi")
- (V32SI "v8si") (V16DI "v4di")
- (V64HF "v16hf") (V32SF "v8sf") (V16DF "v4df")])
+(define_mode_attr vsingle [(VNx32QI "vnx16qi")
+ (VNx16HI "vnx8hi") (VNx16HF "vnx8hf")
+ (VNx8SI "vnx4si") (VNx8SF "vnx4sf")
+ (VNx4DI "vnx2di") (VNx4DF "vnx2df")
+ (VNx48QI "vnx16qi")
+ (VNx24HI "vnx8hi") (VNx24HF "vnx8hf")
+ (VNx12SI "vnx4si") (VNx12SF "vnx4sf")
+ (VNx6DI "vnx2di") (VNx6DF "vnx2df")
+ (VNx64QI "vnx16qi")
+ (VNx32HI "vnx8hi") (VNx32HF "vnx8hf")
+ (VNx16SI "vnx4si") (VNx16SF "vnx4sf")
+ (VNx8DI "vnx2di") (VNx8DF "vnx2df")])
;; The predicate mode associated with an SVE data mode. For structure modes
;; this is equivalent to the <VPRED> of the subvector mode.
-(define_mode_attr VPRED [(V32QI "V32BI")
- (V16HI "V16BI") (V16HF "V16BI")
- (V8SI "V8BI") (V8SF "V8BI")
- (V4DI "V4BI") (V4DF "V4BI")
- (V64QI "V32BI")
- (V32HI "V16BI") (V32HF "V16BI")
- (V16SI "V8BI") (V16SF "V8BI")
- (V8DI "V4BI") (V8DF "V4BI")
- (V96QI "V32BI")
- (V48HI "V16BI") (V48HF "V16BI")
- (V24SI "V8BI") (V24SF "V8BI")
- (V12DI "V4BI") (V12DF "V4BI")
- (V128QI "V32BI")
- (V64HI "V16BI") (V64HF "V16BI")
- (V32SI "V8BI") (V32SF "V8BI")
- (V16DI "V4BI") (V16DF "V4BI")])
+(define_mode_attr VPRED [(VNx16QI "VNx16BI")
+ (VNx8HI "VNx8BI") (VNx8HF "VNx8BI")
+ (VNx4SI "VNx4BI") (VNx4SF "VNx4BI")
+ (VNx2DI "VNx2BI") (VNx2DF "VNx2BI")
+ (VNx32QI "VNx16BI")
+ (VNx16HI "VNx8BI") (VNx16HF "VNx8BI")
+ (VNx8SI "VNx4BI") (VNx8SF "VNx4BI")
+ (VNx4DI "VNx2BI") (VNx4DF "VNx2BI")
+ (VNx48QI "VNx16BI")
+ (VNx24HI "VNx8BI") (VNx24HF "VNx8BI")
+ (VNx12SI "VNx4BI") (VNx12SF "VNx4BI")
+ (VNx6DI "VNx2BI") (VNx6DF "VNx2BI")
+ (VNx64QI "VNx16BI")
+ (VNx32HI "VNx8BI") (VNx32HF "VNx8BI")
+ (VNx16SI "VNx4BI") (VNx16SF "VNx4BI")
+ (VNx8DI "VNx2BI") (VNx8DF "VNx2BI")])
;; ...and again in lower case.
-(define_mode_attr vpred [(V32QI "v32bi")
- (V16HI "v16bi") (V16HF "v16bi")
- (V8SI "v8bi") (V8SF "v8bi")
- (V4DI "v4bi") (V4DF "v4bi")
- (V64QI "v32bi")
- (V32HI "v16bi") (V32HF "v16bi")
- (V16SI "v8bi") (V16SF "v8bi")
- (V8DI "v4bi") (V8DF "v4bi")
- (V96QI "v32bi")
- (V48HI "v16bi") (V48HF "v16bi")
- (V24SI "v8bi") (V24SF "v8bi")
- (V12DI "v4bi") (V12DF "v4bi")
- (V128QI "v32bi")
- (V64HI "v16bi") (V64HF "v8bi")
- (V32SI "v8bi") (V32SF "v8bi")
- (V16DI "v4bi") (V16DF "v4bi")])
+(define_mode_attr vpred [(VNx16QI "vnx16bi")
+ (VNx8HI "vnx8bi") (VNx8HF "vnx8bi")
+ (VNx4SI "vnx4bi") (VNx4SF "vnx4bi")
+ (VNx2DI "vnx2bi") (VNx2DF "vnx2bi")
+ (VNx32QI "vnx16bi")
+ (VNx16HI "vnx8bi") (VNx16HF "vnx8bi")
+ (VNx8SI "vnx4bi") (VNx8SF "vnx4bi")
+ (VNx4DI "vnx2bi") (VNx4DF "vnx2bi")
+ (VNx48QI "vnx16bi")
+ (VNx24HI "vnx8bi") (VNx24HF "vnx8bi")
+ (VNx12SI "vnx4bi") (VNx12SF "vnx4bi")
+ (VNx6DI "vnx2bi") (VNx6DF "vnx2bi")
+ (VNx64QI "vnx16bi")
+ (VNx32HI "vnx8bi") (VNx32HF "vnx4bi")
+ (VNx16SI "vnx4bi") (VNx16SF "vnx4bi")
+ (VNx8DI "vnx2bi") (VNx8DF "vnx2bi")])
;; -------------------------------------------------------------------
;; Code Iterators
@@ -1489,6 +1483,9 @@
(define_int_attr optab [(UNSPEC_ANDF "and")
(UNSPEC_IORF "ior")
(UNSPEC_XORF "xor")
+ (UNSPEC_ANDV "and")
+ (UNSPEC_IORV "ior")
+ (UNSPEC_XORV "xor")
(UNSPEC_COND_ADD "add")
(UNSPEC_COND_SUB "sub")
(UNSPEC_COND_MUL "mul")
@@ -1530,10 +1527,6 @@
(UNSPEC_FMAXNM "fmaxnm")
(UNSPEC_FMINNM "fminnm")])
-(define_int_attr bit_reduc [(UNSPEC_ANDV "and")
- (UNSPEC_IORV "ior")
- (UNSPEC_XORV "xor")])
-
(define_int_attr bit_reduc_op [(UNSPEC_ANDV "andv")
(UNSPEC_IORV "orv")
(UNSPEC_XORV "eorv")])
@@ -1649,8 +1642,8 @@
(UNSPEC_PACI1716 "8")
(UNSPEC_AUTI1716 "12")])
-(define_int_attr perm_optab [(UNSPEC_ZIP1 "vec_interleave_hi")
- (UNSPEC_ZIP2 "vec_interleave_lo")
+(define_int_attr perm_optab [(UNSPEC_ZIP1 "vec_interleave_lo")
+ (UNSPEC_ZIP2 "vec_interleave_hi")
(UNSPEC_UZP1 "vec_extract_even")
(UNSPEC_UZP2 "vec_extract_odd")])
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index 972ab2182d5..f84e48aa078 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -206,6 +206,13 @@
(match_test "aarch64_legitimate_address_p (mode, XEXP (op, 0), false,
ADDR_QUERY_LDP_STP)")))
+;; Used for storing two 64-bit values in an AdvSIMD register using an STP
+;; as a 128-bit vec_concat.
+(define_predicate "aarch64_mem_pair_lanes_operand"
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (DFmode, XEXP (op, 0), 1,
+ ADDR_QUERY_LDP_STP)")))
+
(define_predicate "aarch64_prefetch_operand"
(match_test "aarch64_address_valid_for_prefetch_p (op, false)"))
@@ -363,6 +370,10 @@
(and (match_code "const,const_vector")
(match_test "op == CONST0_RTX (GET_MODE (op))")))
+(define_predicate "aarch64_simd_or_scalar_imm_zero"
+ (and (match_code "const_int,const_double,const,const_vector")
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
(define_predicate "aarch64_simd_imm_minus_one"
(and (match_code "const,const_vector")
(match_test "op == CONSTM1_RTX (GET_MODE (op))")))
@@ -592,7 +603,7 @@
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_constant_vector_operand")))
-(define_predicate "aarch64_gather_scale_operand_s"
+(define_predicate "aarch64_gather_scale_operand_w"
(and (match_code "const_int")
(match_test "INTVAL (op) == 1 || INTVAL (op) == 4")))
diff --git a/gcc/config/arc/arc.h b/gcc/config/arc/arc.h
index 0481bc64f5d..276d5624105 100644
--- a/gcc/config/arc/arc.h
+++ b/gcc/config/arc/arc.h
@@ -1407,7 +1407,7 @@ do { \
(GET_MODE (PATTERN (VEC_INSN))))))
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
- ASM_OUTPUT_ALIGN ((FILE), ADDR_VEC_ALIGN (TABLE));
+ ASM_OUTPUT_ALIGN ((FILE), ADDR_VEC_ALIGN (TABLE))
#define INSN_LENGTH_ALIGNMENT(INSN) \
((JUMP_TABLE_DATA_P (INSN) \
diff --git a/gcc/config/arm/arm-cpus.in b/gcc/config/arm/arm-cpus.in
index 0820ad74c2e..281ec162db8 100644
--- a/gcc/config/arm/arm-cpus.in
+++ b/gcc/config/arm/arm-cpus.in
@@ -114,9 +114,12 @@ define feature iwmmxt2
# Architecture rel 8.1.
define feature armv8_1
-# Architecutre rel 8.2.
+# Architecture rel 8.2.
define feature armv8_2
+# Architecture rel 8.3.
+define feature armv8_3
+
# M-Profile security extensions.
define feature cmse
@@ -238,6 +241,7 @@ define fgroup ARMv7em ARMv7m armv7em
define fgroup ARMv8a ARMv7ve armv8
define fgroup ARMv8_1a ARMv8a crc32 armv8_1
define fgroup ARMv8_2a ARMv8_1a armv8_2
+define fgroup ARMv8_3a ARMv8_2a armv8_3
define fgroup ARMv8m_base ARMv6m armv8 cmse tdiv
define fgroup ARMv8m_main ARMv7m armv8 cmse
define fgroup ARMv8r ARMv8a
@@ -579,6 +583,20 @@ begin arch armv8.2-a
option dotprod add FP_ARMv8 DOTPROD
end arch armv8.2-a
+begin arch armv8.3-a
+ tune for cortex-a53
+ tune flags CO_PROC
+ base 8A
+ profile A
+ isa ARMv8_3a
+ option simd add FP_ARMv8 NEON
+ option fp16 add fp16 FP_ARMv8 NEON
+ option crypto add FP_ARMv8 CRYPTO
+ option nocrypto remove ALL_CRYPTO
+ option nofp remove ALL_FP
+ option dotprod add FP_ARMv8 DOTPROD
+end arch armv8.3-a
+
begin arch armv8-m.base
tune for cortex-m23
base 8M_BASE
diff --git a/gcc/config/arm/arm-fixed.md b/gcc/config/arm/arm-fixed.md
index ca721437792..6730a2bbad6 100644
--- a/gcc/config/arm/arm-fixed.md
+++ b/gcc/config/arm/arm-fixed.md
@@ -35,7 +35,6 @@
"TARGET_INT_SIMD"
"sadd<qaddsub_suf>%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_dsp_reg")])
(define_insn "usadd<mode>3"
@@ -45,7 +44,6 @@
"TARGET_INT_SIMD"
"uqadd<qaddsub_suf>%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_dsp_reg")])
(define_insn "ssadd<mode>3"
@@ -55,7 +53,6 @@
"TARGET_INT_SIMD"
"qadd<qaddsub_suf>%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_dsp_reg")])
(define_insn "sub<mode>3"
@@ -75,7 +72,6 @@
"TARGET_INT_SIMD"
"ssub<qaddsub_suf>%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_dsp_reg")])
(define_insn "ussub<mode>3"
@@ -86,7 +82,6 @@
"TARGET_INT_SIMD"
"uqsub<qaddsub_suf>%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_dsp_reg")])
(define_insn "sssub<mode>3"
@@ -96,7 +91,6 @@
"TARGET_INT_SIMD"
"qsub<qaddsub_suf>%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_dsp_reg")])
;; Fractional multiplies.
@@ -414,7 +408,6 @@
"TARGET_32BIT && arm_arch6"
"ssat%?\\t%0, #16, %2%S1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "1")
(set_attr "type" "alu_shift_imm")])
@@ -424,6 +417,5 @@
"TARGET_INT_SIMD"
"usat%?\\t%0, #16, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_imm")]
)
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 4538078fe60..10c96390ea6 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -224,6 +224,8 @@ extern tree arm_valid_target_attribute_tree (tree, struct gcc_options *,
extern void arm_configure_build_target (struct arm_build_target *,
struct cl_target_option *,
struct gcc_options *, bool);
+extern void arm_option_reconfigure_globals (void);
+extern void arm_options_perform_arch_sanity_checks (void);
extern void arm_pr_long_calls (struct cpp_reader *);
extern void arm_pr_no_long_calls (struct cpp_reader *);
extern void arm_pr_long_calls_off (struct cpp_reader *);
diff --git a/gcc/config/arm/arm-tables.opt b/gcc/config/arm/arm-tables.opt
index 4e508b1555a..f7937256cd7 100644
--- a/gcc/config/arm/arm-tables.opt
+++ b/gcc/config/arm/arm-tables.opt
@@ -452,19 +452,22 @@ EnumValue
Enum(arm_arch) String(armv8.2-a) Value(28)
EnumValue
-Enum(arm_arch) String(armv8-m.base) Value(29)
+Enum(arm_arch) String(armv8.3-a) Value(29)
EnumValue
-Enum(arm_arch) String(armv8-m.main) Value(30)
+Enum(arm_arch) String(armv8-m.base) Value(30)
EnumValue
-Enum(arm_arch) String(armv8-r) Value(31)
+Enum(arm_arch) String(armv8-m.main) Value(31)
EnumValue
-Enum(arm_arch) String(iwmmxt) Value(32)
+Enum(arm_arch) String(armv8-r) Value(32)
EnumValue
-Enum(arm_arch) String(iwmmxt2) Value(33)
+Enum(arm_arch) String(iwmmxt) Value(33)
+
+EnumValue
+Enum(arm_arch) String(iwmmxt2) Value(34)
Enum
Name(arm_fpu) Type(enum fpu_type)
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 47ba0dd09e3..67ec3bd9056 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -3337,8 +3337,9 @@ arm_option_override (void)
SUBTARGET_OVERRIDE_OPTIONS;
#endif
- sprintf (arm_arch_name, "__ARM_ARCH_%s__", arm_active_target.arch_pp_name);
- arm_base_arch = arm_active_target.base_arch;
+ /* Initialize boolean versions of the architectural flags, for use
+ in the arm.md file and for enabling feature flags. */
+ arm_option_reconfigure_globals ();
arm_tune = arm_active_target.tune_core;
tune_flags = arm_active_target.tune_flags;
@@ -3348,16 +3349,6 @@ arm_option_override (void)
if (TARGET_APCS_FRAME)
flag_shrink_wrap = false;
- /* BPABI targets use linker tricks to allow interworking on cores
- without thumb support. */
- if (TARGET_INTERWORK
- && !TARGET_BPABI
- && !bitmap_bit_p (arm_active_target.isa, isa_bit_thumb))
- {
- warning (0, "target CPU does not support interworking" );
- target_flags &= ~MASK_INTERWORK;
- }
-
if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
{
warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
@@ -3373,43 +3364,6 @@ arm_option_override (void)
if (TARGET_APCS_REENT)
warning (0, "APCS reentrant code not supported. Ignored");
- /* Initialize boolean versions of the architectural flags, for use
- in the arm.md file. */
- arm_arch3m = bitmap_bit_p (arm_active_target.isa, isa_bit_armv3m);
- arm_arch4 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv4);
- arm_arch4t = arm_arch4 && bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
- arm_arch5 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv5);
- arm_arch5e = bitmap_bit_p (arm_active_target.isa, isa_bit_armv5e);
- arm_arch5te = arm_arch5e
- && bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
- arm_arch6 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv6);
- arm_arch6k = bitmap_bit_p (arm_active_target.isa, isa_bit_armv6k);
- arm_arch_notm = bitmap_bit_p (arm_active_target.isa, isa_bit_notm);
- arm_arch6m = arm_arch6 && !arm_arch_notm;
- arm_arch7 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv7);
- arm_arch7em = bitmap_bit_p (arm_active_target.isa, isa_bit_armv7em);
- arm_arch8 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8);
- arm_arch8_1 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8_1);
- arm_arch8_2 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8_2);
- arm_arch_thumb1 = bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
- arm_arch_thumb2 = bitmap_bit_p (arm_active_target.isa, isa_bit_thumb2);
- arm_arch_xscale = bitmap_bit_p (arm_active_target.isa, isa_bit_xscale);
- arm_arch_iwmmxt = bitmap_bit_p (arm_active_target.isa, isa_bit_iwmmxt);
- arm_arch_iwmmxt2 = bitmap_bit_p (arm_active_target.isa, isa_bit_iwmmxt2);
- arm_arch_thumb_hwdiv = bitmap_bit_p (arm_active_target.isa, isa_bit_tdiv);
- arm_arch_arm_hwdiv = bitmap_bit_p (arm_active_target.isa, isa_bit_adiv);
- arm_arch_crc = bitmap_bit_p (arm_active_target.isa, isa_bit_crc32);
- arm_arch_cmse = bitmap_bit_p (arm_active_target.isa, isa_bit_cmse);
- arm_fp16_inst = bitmap_bit_p (arm_active_target.isa, isa_bit_fp16);
- arm_arch_lpae = bitmap_bit_p (arm_active_target.isa, isa_bit_lpae);
- if (arm_fp16_inst)
- {
- if (arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE)
- error ("selected fp16 options are incompatible");
- arm_fp16_format = ARM_FP16_FORMAT_IEEE;
- }
-
-
/* Set up some tuning parameters. */
arm_ld_sched = (tune_flags & TF_LDSCHED) != 0;
arm_tune_strongarm = (tune_flags & TF_STRONG) != 0;
@@ -3418,86 +3372,11 @@ arm_option_override (void)
arm_tune_cortex_a9 = (arm_tune == TARGET_CPU_cortexa9) != 0;
arm_m_profile_small_mul = (tune_flags & TF_SMALLMUL) != 0;
- /* And finally, set up some quirks. */
- arm_arch_no_volatile_ce
- = bitmap_bit_p (arm_active_target.isa, isa_bit_quirk_no_volatile_ce);
- arm_arch6kz = arm_arch6k && bitmap_bit_p (arm_active_target.isa,
- isa_bit_quirk_armv6kz);
-
- /* V5 code we generate is completely interworking capable, so we turn off
- TARGET_INTERWORK here to avoid many tests later on. */
-
- /* XXX However, we must pass the right pre-processor defines to CPP
- or GLD can get confused. This is a hack. */
- if (TARGET_INTERWORK)
- arm_cpp_interwork = 1;
-
- if (arm_arch5)
- target_flags &= ~MASK_INTERWORK;
-
- if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
- error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
-
- if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
- error ("iwmmxt abi requires an iwmmxt capable cpu");
-
- /* If soft-float is specified then don't use FPU. */
- if (TARGET_SOFT_FLOAT)
- arm_fpu_attr = FPU_NONE;
- else
- arm_fpu_attr = FPU_VFP;
-
- if (TARGET_AAPCS_BASED)
- {
- if (TARGET_CALLER_INTERWORKING)
- error ("AAPCS does not support -mcaller-super-interworking");
- else
- if (TARGET_CALLEE_INTERWORKING)
- error ("AAPCS does not support -mcallee-super-interworking");
- }
-
- /* __fp16 support currently assumes the core has ldrh. */
- if (!arm_arch4 && arm_fp16_format != ARM_FP16_FORMAT_NONE)
- sorry ("__fp16 and no ldrh");
-
- if (TARGET_AAPCS_BASED)
- {
- if (arm_abi == ARM_ABI_IWMMXT)
- arm_pcs_default = ARM_PCS_AAPCS_IWMMXT;
- else if (TARGET_HARD_FLOAT_ABI)
- {
- arm_pcs_default = ARM_PCS_AAPCS_VFP;
- if (!bitmap_bit_p (arm_active_target.isa, isa_bit_vfpv2))
- error ("-mfloat-abi=hard: selected processor lacks an FPU");
- }
- else
- arm_pcs_default = ARM_PCS_AAPCS;
- }
- else
- {
- if (arm_float_abi == ARM_FLOAT_ABI_HARD)
- sorry ("-mfloat-abi=hard and VFP");
-
- if (arm_abi == ARM_ABI_APCS)
- arm_pcs_default = ARM_PCS_APCS;
- else
- arm_pcs_default = ARM_PCS_ATPCS;
- }
-
/* For arm2/3 there is no need to do any scheduling if we are doing
software floating-point. */
if (TARGET_SOFT_FLOAT && (tune_flags & TF_NO_MODE32))
flag_schedule_insns = flag_schedule_insns_after_reload = 0;
- /* Use the cp15 method if it is available. */
- if (target_thread_pointer == TP_AUTO)
- {
- if (arm_arch6k && !TARGET_THUMB1)
- target_thread_pointer = TP_CP15;
- else
- target_thread_pointer = TP_SOFT;
- }
-
/* Override the default structure alignment for AAPCS ABI. */
if (!global_options_set.x_arm_structure_size_boundary)
{
@@ -3671,14 +3550,6 @@ arm_option_override (void)
if (target_slow_flash_data || target_pure_code)
arm_disable_literal_pool = true;
- if (use_cmse && !arm_arch_cmse)
- error ("target CPU does not support ARMv8-M Security Extensions");
-
- /* We don't clear D16-D31 VFP registers for cmse_nonsecure_call functions
- and ARMv8-M Baseline and Mainline do not allow such configuration. */
- if (use_cmse && LAST_VFP_REGNUM > LAST_LO_VFP_REGNUM)
- error ("ARMv8-M Security Extensions incompatible with selected FPU");
-
/* Disable scheduling fusion by default if it's not armv7 processor
or doesn't prefer ldrd/strd. */
if (flag_schedule_fusion == 2
@@ -3688,6 +3559,7 @@ arm_option_override (void)
/* Need to remember initial options before they are overriden. */
init_optimize = build_optimization_node (&global_options);
+ arm_options_perform_arch_sanity_checks ();
arm_option_override_internal (&global_options, &global_options_set);
arm_option_check_internal (&global_options);
arm_option_params_internal ();
@@ -3703,6 +3575,151 @@ arm_option_override (void)
thumb_flipper = TARGET_THUMB;
}
+
+/* Reconfigure global status flags from the active_target.isa. */
+void
+arm_option_reconfigure_globals (void)
+{
+ sprintf (arm_arch_name, "__ARM_ARCH_%s__", arm_active_target.arch_pp_name);
+ arm_base_arch = arm_active_target.base_arch;
+
+ /* Initialize boolean versions of the architectural flags, for use
+ in the arm.md file. */
+ arm_arch3m = bitmap_bit_p (arm_active_target.isa, isa_bit_armv3m);
+ arm_arch4 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv4);
+ arm_arch4t = arm_arch4 && bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
+ arm_arch5 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv5);
+ arm_arch5e = bitmap_bit_p (arm_active_target.isa, isa_bit_armv5e);
+ arm_arch5te = arm_arch5e
+ && bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
+ arm_arch6 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv6);
+ arm_arch6k = bitmap_bit_p (arm_active_target.isa, isa_bit_armv6k);
+ arm_arch_notm = bitmap_bit_p (arm_active_target.isa, isa_bit_notm);
+ arm_arch6m = arm_arch6 && !arm_arch_notm;
+ arm_arch7 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv7);
+ arm_arch7em = bitmap_bit_p (arm_active_target.isa, isa_bit_armv7em);
+ arm_arch8 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8);
+ arm_arch8_1 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8_1);
+ arm_arch8_2 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8_2);
+ arm_arch_thumb1 = bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
+ arm_arch_thumb2 = bitmap_bit_p (arm_active_target.isa, isa_bit_thumb2);
+ arm_arch_xscale = bitmap_bit_p (arm_active_target.isa, isa_bit_xscale);
+ arm_arch_iwmmxt = bitmap_bit_p (arm_active_target.isa, isa_bit_iwmmxt);
+ arm_arch_iwmmxt2 = bitmap_bit_p (arm_active_target.isa, isa_bit_iwmmxt2);
+ arm_arch_thumb_hwdiv = bitmap_bit_p (arm_active_target.isa, isa_bit_tdiv);
+ arm_arch_arm_hwdiv = bitmap_bit_p (arm_active_target.isa, isa_bit_adiv);
+ arm_arch_crc = bitmap_bit_p (arm_active_target.isa, isa_bit_crc32);
+ arm_arch_cmse = bitmap_bit_p (arm_active_target.isa, isa_bit_cmse);
+ arm_fp16_inst = bitmap_bit_p (arm_active_target.isa, isa_bit_fp16);
+ arm_arch_lpae = bitmap_bit_p (arm_active_target.isa, isa_bit_lpae);
+ if (arm_fp16_inst)
+ {
+ if (arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE)
+ error ("selected fp16 options are incompatible");
+ arm_fp16_format = ARM_FP16_FORMAT_IEEE;
+ }
+
+ /* And finally, set up some quirks. */
+ arm_arch_no_volatile_ce
+ = bitmap_bit_p (arm_active_target.isa, isa_bit_quirk_no_volatile_ce);
+ arm_arch6kz = arm_arch6k && bitmap_bit_p (arm_active_target.isa,
+ isa_bit_quirk_armv6kz);
+
+ /* Use the cp15 method if it is available. */
+ if (target_thread_pointer == TP_AUTO)
+ {
+ if (arm_arch6k && !TARGET_THUMB1)
+ target_thread_pointer = TP_CP15;
+ else
+ target_thread_pointer = TP_SOFT;
+ }
+}
+
+/* Perform some validation between the desired architecture and the rest of the
+ options. */
+void
+arm_options_perform_arch_sanity_checks (void)
+{
+ /* V5 code we generate is completely interworking capable, so we turn off
+ TARGET_INTERWORK here to avoid many tests later on. */
+
+ /* XXX However, we must pass the right pre-processor defines to CPP
+ or GLD can get confused. This is a hack. */
+ if (TARGET_INTERWORK)
+ arm_cpp_interwork = 1;
+
+ if (arm_arch5)
+ target_flags &= ~MASK_INTERWORK;
+
+ if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
+ error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
+
+ if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
+ error ("iwmmxt abi requires an iwmmxt capable cpu");
+
+ /* BPABI targets use linker tricks to allow interworking on cores
+ without thumb support. */
+ if (TARGET_INTERWORK
+ && !TARGET_BPABI
+ && !bitmap_bit_p (arm_active_target.isa, isa_bit_thumb))
+ {
+ warning (0, "target CPU does not support interworking" );
+ target_flags &= ~MASK_INTERWORK;
+ }
+
+ /* If soft-float is specified then don't use FPU. */
+ if (TARGET_SOFT_FLOAT)
+ arm_fpu_attr = FPU_NONE;
+ else
+ arm_fpu_attr = FPU_VFP;
+
+ if (TARGET_AAPCS_BASED)
+ {
+ if (TARGET_CALLER_INTERWORKING)
+ error ("AAPCS does not support -mcaller-super-interworking");
+ else
+ if (TARGET_CALLEE_INTERWORKING)
+ error ("AAPCS does not support -mcallee-super-interworking");
+ }
+
+ /* __fp16 support currently assumes the core has ldrh. */
+ if (!arm_arch4 && arm_fp16_format != ARM_FP16_FORMAT_NONE)
+ sorry ("__fp16 and no ldrh");
+
+ if (use_cmse && !arm_arch_cmse)
+ error ("target CPU does not support ARMv8-M Security Extensions");
+
+ /* We don't clear D16-D31 VFP registers for cmse_nonsecure_call functions
+ and ARMv8-M Baseline and Mainline do not allow such configuration. */
+ if (use_cmse && LAST_VFP_REGNUM > LAST_LO_VFP_REGNUM)
+ error ("ARMv8-M Security Extensions incompatible with selected FPU");
+
+
+ if (TARGET_AAPCS_BASED)
+ {
+ if (arm_abi == ARM_ABI_IWMMXT)
+ arm_pcs_default = ARM_PCS_AAPCS_IWMMXT;
+ else if (TARGET_HARD_FLOAT_ABI)
+ {
+ arm_pcs_default = ARM_PCS_AAPCS_VFP;
+ if (!bitmap_bit_p (arm_active_target.isa, isa_bit_vfpv2))
+ error ("-mfloat-abi=hard: selected processor lacks an FPU");
+ }
+ else
+ arm_pcs_default = ARM_PCS_AAPCS;
+ }
+ else
+ {
+ if (arm_float_abi == ARM_FLOAT_ABI_HARD)
+ sorry ("-mfloat-abi=hard and VFP");
+
+ if (arm_abi == ARM_ABI_APCS)
+ arm_pcs_default = ARM_PCS_APCS;
+ else
+ arm_pcs_default = ARM_PCS_ATPCS;
+ }
+}
+
static void
arm_add_gc_roots (void)
{
@@ -9224,7 +9241,7 @@ arm_unspec_cost (rtx x, enum rtx_code /* outer_code */, bool speed_p, int *cost)
return true; \
} \
} \
- while (0);
+ while (0)
/* RTX costs. Make an estimate of the cost of executing the operation
X, which is contained with an operation with code OUTER_CODE.
@@ -9656,8 +9673,8 @@ arm_rtx_costs_internal (rtx x, enum rtx_code code, enum rtx_code outer_code,
/* We check both sides of the MINUS for shifter operands since,
unlike PLUS, it's not commutative. */
- HANDLE_NARROW_SHIFT_ARITH (MINUS, 0)
- HANDLE_NARROW_SHIFT_ARITH (MINUS, 1)
+ HANDLE_NARROW_SHIFT_ARITH (MINUS, 0);
+ HANDLE_NARROW_SHIFT_ARITH (MINUS, 1);
/* Slightly disparage, as we might need to widen the result. */
*cost += 1;
@@ -9768,7 +9785,7 @@ arm_rtx_costs_internal (rtx x, enum rtx_code code, enum rtx_code outer_code,
rtx shift_op, shift_reg;
shift_reg = NULL;
- HANDLE_NARROW_SHIFT_ARITH (PLUS, 0)
+ HANDLE_NARROW_SHIFT_ARITH (PLUS, 0);
if (CONST_INT_P (XEXP (x, 1)))
{
@@ -19419,7 +19436,12 @@ arm_get_vfp_saved_size (void)
/* Generate a function exit sequence. If REALLY_RETURN is false, then do
everything bar the final return instruction. If simple_return is true,
- then do not output epilogue, because it has already been emitted in RTL. */
+ then do not output epilogue, because it has already been emitted in RTL.
+
+ Note: do not forget to update length attribute of corresponding insn pattern
+ when changing assembly output (eg. length attribute of
+ thumb2_cmse_entry_return when updating Armv8-M Mainline Security Extensions
+ register clearing sequences). */
const char *
output_return_instruction (rtx operand, bool really_return, bool reverse,
bool simple_return)
@@ -23952,7 +23974,12 @@ thumb_pop (FILE *f, unsigned long mask)
/* Generate code to return from a thumb function.
If 'reg_containing_return_addr' is -1, then the return address is
- actually on the stack, at the stack pointer. */
+ actually on the stack, at the stack pointer.
+
+ Note: do not forget to update length attribute of corresponding insn pattern
+ when changing assembly output (eg. length attribute of epilogue_insns when
+ updating Armv8-M Baseline Security Extensions register clearing
+ sequences). */
static void
thumb_exit (FILE *f, int reg_containing_return_addr)
{
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 65d6db4d086..fa35670ac89 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -210,10 +210,11 @@ extern tree arm_fp16_type_node;
/* FPU supports ARMv8.1 Adv.SIMD extensions. */
#define TARGET_NEON_RDMA (TARGET_NEON && arm_arch8_1)
-/* Supports for Dot Product AdvSIMD extensions. */
+/* Supports the Dot Product AdvSIMD extensions. */
#define TARGET_DOTPROD (TARGET_NEON \
&& bitmap_bit_p (arm_active_target.isa, \
- isa_bit_dotprod))
+ isa_bit_dotprod) \
+ && arm_arch8_2)
/* FPU supports the floating point FP16 instructions for ARMv8.2 and later. */
#define TARGET_VFP_FP16INST \
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index ddb9d8f3590..fd3aebd428a 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -81,14 +81,17 @@
(const (if_then_else (symbol_ref "TARGET_THUMB1")
(const_string "yes") (const_string "no"))))
-; We use this attribute to disable alternatives that can produce 32-bit
-; instructions inside an IT-block in Thumb2 state. ARMv8 deprecates IT blocks
-; that contain 32-bit instructions.
-(define_attr "enabled_for_depr_it" "no,yes" (const_string "yes"))
-
-; This attribute is used to disable a predicated alternative when we have
-; arm_restrict_it.
-(define_attr "predicable_short_it" "no,yes" (const_string "yes"))
+; Mark an instruction as suitable for "short IT" blocks in Thumb-2.
+; The arm_restrict_it flag enables the "short IT" feature which
+; restricts IT blocks to a single 16-bit instruction.
+; This attribute should only be used on 16-bit Thumb-2 instructions
+; which may be predicated (the "predicable" attribute must be set).
+(define_attr "predicable_short_it" "no,yes" (const_string "no"))
+
+; Mark an instruction as suitable for "short IT" blocks in Thumb-2.
+; This attribute should only be used on instructions which may emit
+; an IT block in their expansion which is not a short IT.
+(define_attr "enabled_for_short_it" "no,yes" (const_string "yes"))
;; Operand number of an input operand that is shifted. Zero if the
;; given instruction does not shift one of its input operands.
@@ -229,7 +232,7 @@
(match_test "arm_restrict_it")))
(const_string "no")
- (and (eq_attr "enabled_for_depr_it" "no")
+ (and (eq_attr "enabled_for_short_it" "no")
(match_test "arm_restrict_it"))
(const_string "no")
@@ -1036,7 +1039,6 @@
"adc%?\\t%0, %1, %3%S2"
[(set_attr "conds" "use")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
(const_string "alu_shift_imm")
(const_string "alu_shift_reg")))]
@@ -1136,7 +1138,6 @@
[(set_attr "conds" "use")
(set_attr "arch" "*,a,t2")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "adc_reg,adc_imm,alu_shift_imm")]
)
@@ -1666,8 +1667,7 @@
"TARGET_32BIT && arm_arch6"
"mla%?\\t%0, %2, %1, %3"
[(set_attr "type" "mla")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "*mulsi3addsi_compare0"
@@ -1743,8 +1743,7 @@
"TARGET_32BIT && arm_arch_thumb2"
"mls%?\\t%0, %2, %1, %3"
[(set_attr "type" "mla")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "maddsidi4"
@@ -1780,8 +1779,7 @@
"TARGET_32BIT && arm_arch6"
"smlal%?\\t%Q0, %R0, %3, %2"
[(set_attr "type" "smlal")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
;; 32x32->64 widening multiply.
@@ -1818,8 +1816,7 @@
"TARGET_32BIT && arm_arch6"
"smull%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "smull")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "umulsidi3"
@@ -1850,8 +1847,7 @@
"TARGET_32BIT && arm_arch6"
"umull%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "umull")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "umaddsidi4"
@@ -1887,8 +1883,7 @@
"TARGET_32BIT && arm_arch6"
"umlal%?\\t%Q0, %R0, %3, %2"
[(set_attr "type" "umlal")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "smulsi3_highpart"
@@ -1932,8 +1927,7 @@
"TARGET_32BIT && arm_arch6"
"smull%?\\t%3, %0, %2, %1"
[(set_attr "type" "smull")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "umulsi3_highpart"
@@ -1977,8 +1971,7 @@
"TARGET_32BIT && arm_arch6"
"umull%?\\t%3, %0, %2, %1"
[(set_attr "type" "umull")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "mulhisi3"
@@ -2003,8 +1996,7 @@
"TARGET_DSP_MULTIPLY"
"smultb%?\\t%0, %1, %2"
[(set_attr "type" "smulxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "*mulhisi3bt"
@@ -2017,8 +2009,7 @@
"TARGET_DSP_MULTIPLY"
"smulbt%?\\t%0, %1, %2"
[(set_attr "type" "smulxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "*mulhisi3tt"
@@ -2032,8 +2023,7 @@
"TARGET_DSP_MULTIPLY"
"smultt%?\\t%0, %1, %2"
[(set_attr "type" "smulxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "maddhisi4"
@@ -2046,8 +2036,7 @@
"TARGET_DSP_MULTIPLY"
"smlabb%?\\t%0, %1, %2, %3"
[(set_attr "type" "smlaxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
;; Note: there is no maddhisi4ibt because this one is canonical form
@@ -2062,8 +2051,7 @@
"TARGET_DSP_MULTIPLY"
"smlatb%?\\t%0, %1, %2, %3"
[(set_attr "type" "smlaxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "*maddhisi4tt"
@@ -2078,8 +2066,7 @@
"TARGET_DSP_MULTIPLY"
"smlatt%?\\t%0, %1, %2, %3"
[(set_attr "type" "smlaxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "maddhidi4"
@@ -2093,8 +2080,7 @@
"TARGET_DSP_MULTIPLY"
"smlalbb%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "smlalxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
;; Note: there is no maddhidi4ibt because this one is canonical form
(define_insn "*maddhidi4tb"
@@ -2110,8 +2096,7 @@
"TARGET_DSP_MULTIPLY"
"smlaltb%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "smlalxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*maddhidi4tt"
[(set (match_operand:DI 0 "s_register_operand" "=r")
@@ -2128,8 +2113,7 @@
"TARGET_DSP_MULTIPLY"
"smlaltt%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "smlalxy")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_expand "mulsf3"
[(set (match_operand:SF 0 "s_register_operand" "")
@@ -2518,7 +2502,6 @@
"
[(set_attr "conds" "set")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logics_imm")]
)
@@ -2918,7 +2901,6 @@
"bfc%?\t%0, %2, %1"
[(set_attr "length" "4")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "bfm")]
)
@@ -2931,7 +2913,6 @@
"bfi%?\t%0, %3, %2, %1"
[(set_attr "length" "4")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "bfm")]
)
@@ -2986,7 +2967,6 @@
}"
[(set_attr "length" "4,8")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "multiple")]
)
@@ -3008,7 +2988,6 @@
}"
[(set_attr "length" "8")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "multiple")]
)
@@ -3033,7 +3012,6 @@
}"
[(set_attr "length" "8")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "multiple")]
)
@@ -3044,7 +3022,6 @@
"TARGET_32BIT"
"bic%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_reg")]
)
@@ -3078,7 +3055,6 @@
"TARGET_ARM || (TARGET_THUMB2 && CONST_INT_P (operands[2]))"
"bics%?\\t%4, %3, %1%S0"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "conds" "set")
(set_attr "shift" "1")
(set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
@@ -3104,7 +3080,6 @@
"TARGET_ARM || (TARGET_THUMB2 && CONST_INT_P (operands[2]))"
"bics%?\\t%4, %3, %1%S0"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "conds" "set")
(set_attr "shift" "1")
(set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
@@ -3219,7 +3194,6 @@
#"
[(set_attr "length" "4,8")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_reg,multiple")]
)
@@ -3419,7 +3393,6 @@
#"
[(set_attr "length" "4,8")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_reg")]
)
@@ -3563,7 +3536,6 @@
[(set_attr "length" "8")
(set_attr "ce_count" "2")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "multiple")]
)
@@ -3701,7 +3673,6 @@
"TARGET_32BIT"
"bic%?\\t%0, %1, %1, asr #31"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_shift_reg")]
)
@@ -3712,7 +3683,6 @@
"TARGET_32BIT"
"orr%?\\t%0, %1, %1, asr #31"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_shift_reg")]
)
@@ -3763,7 +3733,6 @@
"TARGET_32BIT"
"and%?\\t%0, %1, %1, asr #31"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_shift_reg")]
)
@@ -4000,7 +3969,6 @@
return "usat%?\t%0, %1, %3";
}
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alus_imm")]
)
@@ -4027,7 +3995,6 @@
return "usat%?\t%0, %1, %4%S3";
}
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "3")
(set_attr "type" "logic_shift_reg")])
@@ -4278,7 +4245,6 @@
"TARGET_32BIT"
"mvn%?\\t%0, %1%S3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "1")
(set_attr "arch" "32,a")
(set_attr "type" "mvn_shift,mvn_shift_reg")])
@@ -4554,7 +4520,6 @@
"sbfx%?\t%0, %1, %3, %2"
[(set_attr "length" "4")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "bfm")]
)
@@ -4569,7 +4534,6 @@
"ubfx%?\t%0, %1, %3, %2"
[(set_attr "length" "4")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "bfm")]
)
@@ -4585,7 +4549,6 @@
sdiv\t%0, %1, %2"
[(set_attr "arch" "32,v8mb")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "sdiv")]
)
@@ -4599,7 +4562,6 @@
udiv\t%0, %1, %2"
[(set_attr "arch" "32,v8mb")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "udiv")]
)
@@ -5377,8 +5339,7 @@
"TARGET_INT_SIMD"
"uxtah%?\\t%0, %2, %1"
[(set_attr "type" "alu_shift_reg")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "zero_extendqisi2"
@@ -5448,7 +5409,6 @@
"TARGET_INT_SIMD"
"uxtab%?\\t%0, %2, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "alu_shift_reg")]
)
@@ -5501,7 +5461,6 @@
"tst%?\\t%0, #255"
[(set_attr "conds" "set")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_imm")]
)
@@ -5611,8 +5570,7 @@
sxth%?\\t%0, %1
ldrsh%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_insn "*arm_extendhisi2addsi"
@@ -5716,8 +5674,7 @@
"TARGET_INT_SIMD"
"sxtab%?\\t%0, %2, %1"
[(set_attr "type" "alu_shift_reg")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "extendsfdf2"
@@ -6084,7 +6041,6 @@
movt\t%0, #:upper16:%c2"
[(set_attr "arch" "32,v8mb")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "length" "4")
(set_attr "type" "alu_sreg")]
)
@@ -6964,8 +6920,7 @@
[(set_attr "conds" "unconditional")
(set_attr "type" "load_4,store_4,mov_reg,multiple")
(set_attr "length" "4,4,4,8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")]
+ (set_attr "predicable" "yes")]
)
(define_expand "movsf"
@@ -7018,7 +6973,6 @@
ldr%?\\t%0, %1\\t%@ float
str%?\\t%1, %0\\t%@ float"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "mov_reg,load_4,store_4")
(set_attr "arm_pool_range" "*,4096,*")
(set_attr "thumb2_pool_range" "*,4094,*")
@@ -7436,7 +7390,7 @@
operands[1] = gen_lowpart (SImode, operands[1]);
}
[(set_attr "conds" "set")
- (set_attr "enabled_for_depr_it" "yes,yes,no,*")
+ (set_attr "enabled_for_short_it" "yes,yes,no,*")
(set_attr "arch" "t2,t2,t2,a")
(set_attr "length" "6,6,10,8")
(set_attr "type" "multiple")]
@@ -8823,7 +8777,6 @@
"TARGET_32BIT"
"<arith_shift_insn>%?\\t%0, %1, %2, lsl %b3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "2")
(set_attr "arch" "a,t2")
(set_attr "type" "alu_shift_imm")])
@@ -8838,7 +8791,6 @@
"TARGET_32BIT && GET_CODE (operands[2]) != MULT"
"<arith_shift_insn>%?\\t%0, %1, %3%S2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "3")
(set_attr "arch" "a,t2,a")
(set_attr "type" "alu_shift_imm,alu_shift_imm,alu_shift_reg")])
@@ -8906,6 +8858,7 @@
"TARGET_32BIT"
"sub%?\\t%0, %1, %3%S2"
[(set_attr "predicable" "yes")
+ (set_attr "predicable_short_it" "no")
(set_attr "shift" "3")
(set_attr "arch" "32,a")
(set_attr "type" "alus_shift_imm,alus_shift_reg")])
@@ -9344,6 +9297,7 @@
}"
[(set_attr "conds" "set")
(set_attr "arch" "t2,t2,t2,t2,t2,any,any,any,any")
+ (set_attr "enabled_for_short_it" "yes,no,no,no,no,no,no,no,no")
(set_attr "type" "multiple")
(set_attr_alternative "length"
[(const_int 6)
@@ -9427,6 +9381,7 @@
}"
[(set_attr "conds" "set")
(set_attr "arch" "t2,t2,t2,t2,t2,any,any,any,any")
+ (set_attr "enabled_for_short_it" "yes,no,no,no,no,no,no,no,no")
(set_attr_alternative "length"
[(const_int 6)
(const_int 8)
@@ -9509,7 +9464,7 @@
[(set_attr "conds" "set")
(set_attr "predicable" "no")
(set_attr "arch" "t2,t2,t2,t2,t2,any,any,any,any")
- (set_attr "enabled_for_depr_it" "yes,no,no,no,no,no,no,no,no")
+ (set_attr "enabled_for_short_it" "yes,no,no,no,no,no,no,no,no")
(set_attr_alternative "length"
[(const_int 6)
(const_int 8)
@@ -9592,7 +9547,7 @@
"
[(set_attr "conds" "set")
(set_attr "arch" "t2,t2,t2,t2,t2,any,any,any,any")
- (set_attr "enabled_for_depr_it" "yes,no,no,no,no,no,no,no,no")
+ (set_attr "enabled_for_short_it" "yes,no,no,no,no,no,no,no,no")
(set_attr_alternative "length"
[(const_int 6)
(const_int 8)
@@ -9640,7 +9595,7 @@
DOM_CC_X_OR_Y),
CC_REGNUM);"
[(set_attr "conds" "clob")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "length" "16")
(set_attr "type" "multiple")]
)
@@ -9671,7 +9626,7 @@
(set (match_dup 7) (ne:SI (match_dup 0) (const_int 0)))]
""
[(set_attr "conds" "set")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "length" "16")
(set_attr "type" "multiple")]
)
@@ -9704,7 +9659,7 @@
DOM_CC_X_AND_Y),
CC_REGNUM);"
[(set_attr "conds" "clob")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "length" "16")
(set_attr "type" "multiple")]
)
@@ -9735,7 +9690,7 @@
(set (match_dup 7) (ne:SI (match_dup 0) (const_int 0)))]
""
[(set_attr "conds" "set")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "length" "16")
(set_attr "type" "multiple")]
)
@@ -9922,7 +9877,7 @@
}
"
[(set_attr "conds" "clob")
- (set_attr "enabled_for_depr_it" "no,yes,yes")
+ (set_attr "enabled_for_short_it" "no,yes,yes")
(set_attr "type" "multiple")]
)
@@ -10540,7 +10495,7 @@
[(set_attr "conds" "use")
(set_attr "length" "4")
(set_attr "arch" "t2,32")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "type" "logic_shift_imm")]
)
@@ -10586,7 +10541,7 @@
[(set_attr "conds" "use")
(set_attr "length" "4")
(set_attr "arch" "t2,32")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "type" "logic_shift_imm")]
)
@@ -11322,7 +11277,6 @@
"TARGET_32BIT && arm_arch5"
"clz%?\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "clz")])
(define_insn "rbitsi2"
@@ -11331,7 +11285,6 @@
"TARGET_32BIT && arm_arch_thumb2"
"rbit%?\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "clz")])
;; Keep this as a CTZ expression until after reload and then split
@@ -11483,7 +11436,6 @@
movt\t%0, %L1"
[(set_attr "arch" "32,v8mb")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "length" "4")
(set_attr "type" "alu_sreg")]
)
@@ -11499,7 +11451,6 @@
[(set_attr "arch" "t1,t2,32")
(set_attr "length" "2,2,4")
(set_attr "predicable" "no,yes,yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "rev")]
)
@@ -11747,8 +11698,7 @@
false, true))"
"ldrd%?\t%0, %3, [%1, %2]"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb2_ldrd_base"
[(set (match_operand:SI 0 "s_register_operand" "=r")
@@ -11761,8 +11711,7 @@
operands[1], 0, false, true))"
"ldrd%?\t%0, %2, [%1]"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb2_ldrd_base_neg"
[(set (match_operand:SI 0 "s_register_operand" "=r")
@@ -11775,8 +11724,7 @@
operands[1], -4, false, true))"
"ldrd%?\t%0, %2, [%1, #-4]"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb2_strd"
[(set (mem:SI (plus:SI (match_operand:SI 0 "s_register_operand" "rk")
@@ -11792,8 +11740,7 @@
false, false))"
"strd%?\t%2, %4, [%0, %1]"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb2_strd_base"
[(set (mem:SI (match_operand:SI 0 "s_register_operand" "rk"))
@@ -11806,8 +11753,7 @@
operands[0], 0, false, false))"
"strd%?\t%1, %2, [%0]"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb2_strd_base_neg"
[(set (mem:SI (plus:SI (match_operand:SI 0 "s_register_operand" "rk")
@@ -11820,8 +11766,7 @@
operands[0], -4, false, false))"
"strd%?\t%1, %2, [%0, #-4]"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
;; ARMv8 CRC32 instructions.
(define_insn "<crc_variant>"
diff --git a/gcc/config/arm/ldmstm.md b/gcc/config/arm/ldmstm.md
index 01fbb553fbc..d7650d4f809 100644
--- a/gcc/config/arm/ldmstm.md
+++ b/gcc/config/arm/ldmstm.md
@@ -37,8 +37,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"ldm%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_ldm4_ia"
[(match_parallel 0 "load_multiple_operation"
@@ -75,8 +74,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
"ldmia%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_ldm4_ia_update"
[(match_parallel 0 "load_multiple_operation"
@@ -110,8 +108,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stm%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm4_ia_update"
[(match_parallel 0 "store_multiple_operation"
@@ -128,8 +125,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
"stmia%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_stm4_ia_update"
[(match_parallel 0 "store_multiple_operation"
@@ -306,8 +302,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"ldmdb%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*ldm4_db_update"
[(match_parallel 0 "load_multiple_operation"
@@ -328,8 +323,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
"ldmdb%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm4_db"
[(match_parallel 0 "store_multiple_operation"
@@ -344,8 +338,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stmdb%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm4_db_update"
[(match_parallel 0 "store_multiple_operation"
@@ -362,8 +355,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
"stmdb%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store_16")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_peephole2
[(set (match_operand:SI 0 "s_register_operand" "")
@@ -485,8 +477,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"ldm%?\t%4, {%1, %2, %3}"
[(set_attr "type" "load_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_ldm3_ia"
[(match_parallel 0 "load_multiple_operation"
@@ -517,8 +508,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"ldmia%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "load_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_ldm3_ia_update"
[(match_parallel 0 "load_multiple_operation"
@@ -547,8 +537,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stm%?\t%4, {%1, %2, %3}"
[(set_attr "type" "store_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm3_ia_update"
[(match_parallel 0 "store_multiple_operation"
@@ -563,8 +552,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stmia%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "store_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_stm3_ia_update"
[(match_parallel 0 "store_multiple_operation"
@@ -716,8 +704,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"ldmdb%?\t%4, {%1, %2, %3}"
[(set_attr "type" "load_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*ldm3_db_update"
[(match_parallel 0 "load_multiple_operation"
@@ -735,8 +722,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"ldmdb%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "load_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm3_db"
[(match_parallel 0 "store_multiple_operation"
@@ -749,8 +735,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stmdb%?\t%4, {%1, %2, %3}"
[(set_attr "type" "store_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm3_db_update"
[(match_parallel 0 "store_multiple_operation"
@@ -765,8 +750,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stmdb%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "store_12")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_peephole2
[(set (match_operand:SI 0 "s_register_operand" "")
@@ -871,8 +855,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
"ldm%?\t%3, {%1, %2}"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_ldm2_ia"
[(match_parallel 0 "load_multiple_operation"
@@ -897,8 +880,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"ldmia%?\t%3!, {%1, %2}"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_ldm2_ia_update"
[(match_parallel 0 "load_multiple_operation"
@@ -922,8 +904,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
"stm%?\t%3, {%1, %2}"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm2_ia_update"
[(match_parallel 0 "store_multiple_operation"
@@ -936,8 +917,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stmia%?\t%3!, {%1, %2}"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*thumb_stm2_ia_update"
[(match_parallel 0 "store_multiple_operation"
@@ -1064,8 +1044,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
"ldmdb%?\t%3, {%1, %2}"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*ldm2_db_update"
[(match_parallel 0 "load_multiple_operation"
@@ -1080,8 +1059,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"ldmdb%?\t%3!, {%1, %2}"
[(set_attr "type" "load_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm2_db"
[(match_parallel 0 "store_multiple_operation"
@@ -1092,8 +1070,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
"stmdb%?\t%3, {%1, %2}"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "*stm2_db_update"
[(match_parallel 0 "store_multiple_operation"
@@ -1106,8 +1083,7 @@
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stmdb%?\t%3!, {%1, %2}"
[(set_attr "type" "store_8")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_peephole2
[(set (match_operand:SI 0 "s_register_operand" "")
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index b4b4f2e6815..37a4cb3f1c5 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -87,8 +87,7 @@
}
}
[(set_attr "arch" "32,v8mb,any")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "atomic_store<mode>"
[(set (match_operand:QHSI 0 "memory_operand" "=Q,Q,Q")
@@ -115,8 +114,7 @@
}
}
[(set_attr "arch" "32,v8mb,any")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
;; An LDRD instruction usable by the atomic_loaddi expander on LPAE targets
@@ -127,8 +125,7 @@
VUNSPEC_LDRD_ATOMIC))]
"ARM_DOUBLEWORD_ALIGN && TARGET_HAVE_LPAE"
"ldrd%?\t%0, %H0, %C1"
- [(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ [(set_attr "predicable" "yes")])
;; There are three ways to expand this depending on the architecture
;; features available. As for the barriers, a load needs a barrier
@@ -461,8 +458,7 @@
ldrex<sync_sfx>%?\t%0, %C1
ldrex<sync_sfx>\t%0, %C1"
[(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "arm_load_acquire_exclusive<mode>"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
@@ -475,8 +471,7 @@
ldaex<sync_sfx>%?\\t%0, %C1
ldaex<sync_sfx>\\t%0, %C1"
[(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "arm_load_exclusivesi"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
@@ -488,8 +483,7 @@
ldrex%?\t%0, %C1
ldrex\t%0, %C1"
[(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "arm_load_acquire_exclusivesi"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
@@ -501,8 +495,7 @@
ldaex%?\t%0, %C1
ldaex\t%0, %C1"
[(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
(define_insn "arm_load_exclusivedi"
[(set (match_operand:DI 0 "s_register_operand" "=r")
@@ -511,8 +504,7 @@
VUNSPEC_LL))]
"TARGET_HAVE_LDREXD"
"ldrexd%?\t%0, %H0, %C1"
- [(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ [(set_attr "predicable" "yes")])
(define_insn "arm_load_acquire_exclusivedi"
[(set (match_operand:DI 0 "s_register_operand" "=r")
@@ -521,8 +513,7 @@
VUNSPEC_LAX))]
"TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
"ldaexd%?\t%0, %H0, %C1"
- [(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ [(set_attr "predicable" "yes")])
(define_insn "arm_store_exclusive<mode>"
[(set (match_operand:SI 0 "s_register_operand" "=&r")
@@ -548,8 +539,7 @@
else
return "strex<sync_sfx>%?\t%0, %2, %C1";
}
- [(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ [(set_attr "predicable" "yes")])
(define_insn "arm_store_release_exclusivedi"
[(set (match_operand:SI 0 "s_register_operand" "=&r")
@@ -564,8 +554,7 @@
gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
return "stlexd%?\t%0, %2, %H2, %C1";
}
- [(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ [(set_attr "predicable" "yes")])
(define_insn "arm_store_release_exclusive<mode>"
[(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
@@ -579,5 +568,4 @@
stlex<sync_sfx>%?\t%0, %2, %C1
stlex<sync_sfx>\t%0, %2, %C1"
[(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")])
+ (set_attr "predicable" "yes")])
diff --git a/gcc/config/arm/thumb2.md b/gcc/config/arm/thumb2.md
index b78c3d256ae..abe90d4f4e4 100644
--- a/gcc/config/arm/thumb2.md
+++ b/gcc/config/arm/thumb2.md
@@ -34,7 +34,6 @@
"TARGET_THUMB2"
"bic%?\\t%0, %1, %2%S4"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "2")
(set_attr "type" "alu_shift_imm")]
)
@@ -57,7 +56,7 @@
(match_dup 2)))]
""
[(set_attr "conds" "clob")
- (set_attr "enabled_for_depr_it" "yes,yes,no")
+ (set_attr "enabled_for_short_it" "yes,yes,no")
(set_attr "length" "6,6,10")
(set_attr "type" "multiple")]
)
@@ -78,7 +77,7 @@
(match_dup 2)))]
""
[(set_attr "conds" "clob")
- (set_attr "enabled_for_depr_it" "yes,yes,no")
+ (set_attr "enabled_for_short_it" "yes,yes,no")
(set_attr "length" "6,6,10")
(set_attr "type" "multiple")]
)
@@ -100,7 +99,7 @@
""
[(set_attr "conds" "clob")
(set_attr "length" "6,6,10")
- (set_attr "enabled_for_depr_it" "yes,yes,no")
+ (set_attr "enabled_for_short_it" "yes,yes,no")
(set_attr "type" "multiple")]
)
@@ -121,7 +120,7 @@
""
[(set_attr "conds" "clob")
(set_attr "length" "6,6,10")
- (set_attr "enabled_for_depr_it" "yes,yes,no")
+ (set_attr "enabled_for_short_it" "yes,yes,no")
(set_attr "type" "multiple")]
)
@@ -172,8 +171,7 @@
[(set_attr "conds" "*,clob,clob")
(set_attr "shift" "1")
(set_attr "predicable" "yes,no,no")
- (set_attr "predicable_short_it" "no")
- (set_attr "enabled_for_depr_it" "yes,yes,no")
+ (set_attr "enabled_for_short_it" "yes,yes,no")
(set_attr "ce_count" "2")
(set_attr "length" "8,6,10")
(set_attr "type" "multiple")]
@@ -226,8 +224,7 @@
[(set_attr "conds" "*,clob,clob")
(set_attr "shift" "1")
(set_attr "predicable" "yes,no,no")
- (set_attr "enabled_for_depr_it" "yes,yes,no")
- (set_attr "predicable_short_it" "no")
+ (set_attr "enabled_for_short_it" "yes,yes,no")
(set_attr "ce_count" "2")
(set_attr "length" "8,6,10")
(set_attr "type" "multiple")]
@@ -354,7 +351,7 @@
(const_int 0)))]
""
[(set_attr "conds" "use")
- (set_attr "enabled_for_depr_it" "yes,no")
+ (set_attr "enabled_for_short_it" "yes,no")
(set_attr "length" "8,10")
(set_attr "type" "multiple")]
)
@@ -504,7 +501,7 @@
DONE;
}
[(set_attr "length" "4,4,6,6,6,6,10,8,10,10,10,6")
- (set_attr "enabled_for_depr_it" "yes,yes,no,no,no,no,no,no,no,no,no,yes")
+ (set_attr "enabled_for_short_it" "yes,yes,no,no,no,no,no,no,no,no,no,yes")
(set_attr "conds" "use")
(set_attr_alternative "type"
[(if_then_else (match_operand 2 "const_int_operand" "")
@@ -1044,7 +1041,6 @@
ldrsb%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "pool_range" "*,4094")
(set_attr "neg_pool_range" "*,250")]
)
@@ -1058,7 +1054,6 @@
ldrh%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "pool_range" "*,4094")
(set_attr "neg_pool_range" "*,250")]
)
@@ -1072,7 +1067,6 @@
ldrb%?\\t%0, %1\\t%@ zero_extendqisi2"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "pool_range" "*,4094")
(set_attr "neg_pool_range" "*,250")]
)
@@ -1132,7 +1126,7 @@
; we adapt the length accordingly.
(set (attr "length")
(if_then_else (match_test "TARGET_HARD_FLOAT")
- (const_int 12)
+ (const_int 34)
(const_int 8)))
; We do not support predicate execution of returns from cmse_nonsecure_entry
; functions because we need to clear the APSR. Since predicable has to be
@@ -1538,7 +1532,6 @@
"TARGET_THUMB2"
"orn%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "logic_reg")]
)
@@ -1551,7 +1544,6 @@
"TARGET_THUMB2"
"orn%?\\t%0, %1, %2%S4"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "shift" "2")
(set_attr "type" "alu_shift_imm")]
)
diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index a541413c263..075a938d22e 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -612,7 +612,6 @@
}
"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type"
"f_mcr,f_mrc,fconsts,f_loads,f_stores,load_4,store_4,fmov,mov_reg")
(set_attr "pool_range" "*,*,*,1018,*,4090,*,*,*")
@@ -824,7 +823,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vabs%?.f32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffariths")]
)
@@ -834,7 +832,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vabs%?.f64\\t%P0, %P1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffarithd")]
)
@@ -846,7 +843,6 @@
vneg%?.f32\\t%0, %1
eor%?\\t%0, %1, #-2147483648"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffariths")]
)
@@ -892,7 +888,6 @@
}
"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "length" "4,4,8")
(set_attr "type" "ffarithd")]
)
@@ -961,7 +956,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vadd%?.f32\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fadds")]
)
@@ -972,7 +966,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vadd%?.f64\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "faddd")]
)
@@ -995,7 +988,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vsub%?.f32\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fadds")]
)
@@ -1006,7 +998,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vsub%?.f64\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "faddd")]
)
@@ -1036,7 +1027,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vdiv%?.f32\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "arch" "*,armv6_or_vfpv3")
(set_attr "type" "fdivs")]
)
@@ -1048,7 +1038,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vdiv%?.f64\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "arch" "*,armv6_or_vfpv3")
(set_attr "type" "fdivd")]
)
@@ -1074,7 +1063,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vmul%?.f32\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmuls")]
)
@@ -1085,7 +1073,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vmul%?.f64\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmuld")]
)
@@ -1116,7 +1103,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && !flag_rounding_math"
"vnmul%?.f32\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmuls")]
)
@@ -1127,7 +1113,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vnmul%?.f32\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmuls")]
)
@@ -1139,7 +1124,6 @@
&& !flag_rounding_math"
"vnmul%?.f64\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmuld")]
)
@@ -1150,7 +1134,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vnmul%?.f64\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmuld")]
)
@@ -1178,7 +1161,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vmla%?.f32\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacs")]
)
@@ -1190,7 +1172,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vmla%?.f64\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacd")]
)
@@ -1214,7 +1195,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vnmls%?.f32\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacs")]
)
@@ -1226,7 +1206,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vnmls%?.f64\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacd")]
)
@@ -1250,7 +1229,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vmls%?.f32\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacs")]
)
@@ -1262,7 +1240,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vmls%?.f64\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacd")]
)
@@ -1289,7 +1266,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vnmla%?.f32\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacs")]
)
@@ -1302,7 +1278,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vnmla%?.f64\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fmacd")]
)
@@ -1340,7 +1315,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA"
"vfma%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffma<vfp_type>")]
)
@@ -1377,7 +1351,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA"
"vfms%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffma<vfp_type>")]
)
@@ -1400,7 +1373,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA"
"vfnms%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffma<vfp_type>")]
)
@@ -1424,7 +1396,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA"
"vfnma%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "ffma<vfp_type>")]
)
@@ -1437,7 +1408,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vcvt%?.f64.f32\\t%P0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvt")]
)
@@ -1447,7 +1417,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vcvt%?.f32.f64\\t%0, %P1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvt")]
)
@@ -1457,7 +1426,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FP16 || TARGET_VFP_FP16INST)"
"vcvtb%?.f32.f16\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvt")]
)
@@ -1467,7 +1435,6 @@
"TARGET_32BIT && TARGET_FP16_TO_DOUBLE"
"vcvtb%?.f16.f64\\t%0, %P1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvt")]
)
@@ -1477,7 +1444,6 @@
"TARGET_32BIT && TARGET_FP16_TO_DOUBLE"
"vcvtb%?.f64.f16\\t%P0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvt")]
)
@@ -1487,7 +1453,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FP16 || TARGET_VFP_FP16INST)"
"vcvtb%?.f16.f32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvt")]
)
@@ -1497,7 +1462,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vcvt%?.s32.f32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvtf2i")]
)
@@ -1507,7 +1471,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vcvt%?.s32.f64\\t%0, %P1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvtf2i")]
)
@@ -1518,7 +1481,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vcvt%?.u32.f32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvtf2i")]
)
@@ -1528,7 +1490,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vcvt%?.u32.f64\\t%0, %P1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvtf2i")]
)
@@ -1539,7 +1500,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vcvt%?.f32.s32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvti2f")]
)
@@ -1549,7 +1509,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vcvt%?.f64.s32\\t%P0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvti2f")]
)
@@ -1560,7 +1519,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vcvt%?.f32.u32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvti2f")]
)
@@ -1570,7 +1528,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vcvt%?.f64.u32\\t%P0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvti2f")]
)
@@ -1607,7 +1564,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT"
"vsqrt%?.f32\\t%0, %1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "arch" "*,armv6_or_vfpv3")
(set_attr "type" "fsqrts")]
)
@@ -1618,7 +1574,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
"vsqrt%?.f64\\t%P0, %P1"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "arch" "*,armv6_or_vfpv3")
(set_attr "type" "fsqrtd")]
)
@@ -1710,7 +1665,6 @@
vcmp%?.f32\\t%0, %1
vcmp%?.f32\\t%0, #0"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fcmps")]
)
@@ -1723,7 +1677,6 @@
vcmpe%?.f32\\t%0, %1
vcmpe%?.f32\\t%0, #0"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fcmps")]
)
@@ -1736,7 +1689,6 @@
vcmp%?.f64\\t%P0, %P1
vcmp%?.f64\\t%P0, #0"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fcmpd")]
)
@@ -1749,7 +1701,6 @@
vcmpe%?.f64\\t%P0, %P1
vcmpe%?.f64\\t%P0, #0"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "fcmpd")]
)
@@ -1762,7 +1713,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP3 && !flag_rounding_math"
"vcvt%?.f32.<FCVTI32typename>\\t%0, %1, %v2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvti2f")]
)
@@ -1781,7 +1731,6 @@
vmov%?.f64\\t%P0, %1, %1\;vcvt%?.f64.<FCVTI32typename>\\t%P0, %P0, %v2"
[(set_attr "predicable" "yes")
(set_attr "ce_count" "2")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvti2f")
(set_attr "length" "8")]
)
@@ -1794,7 +1743,6 @@
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP3 && !flag_rounding_math"
"vcvt%?.s32.f32\\t%0, %1, %v2"
[(set_attr "predicable" "yes")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_cvtf2i")]
)
@@ -1940,7 +1888,6 @@
"TARGET_HARD_FLOAT && TARGET_VFP5 <vfp_double_cond>"
"vrint<vrint_variant>%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1"
[(set_attr "predicable" "<vrint_predicable>")
- (set_attr "predicable_short_it" "no")
(set_attr "type" "f_rint<vfp_type>")
(set_attr "conds" "<vrint_conds>")]
)
@@ -1953,8 +1900,7 @@
"register_operand" "<F_constraint>")] VCVT)))]
"TARGET_HARD_FLOAT && TARGET_VFP5 <vfp_double_cond>"
"vcvt<vrint_variant>.<su>32.<V_if_elem>\\t%0, %<V_reg>1"
- [(set_attr "predicable" "no")
- (set_attr "conds" "unconditional")
+ [(set_attr "conds" "unconditional")
(set_attr "type" "f_cvtf2i")]
)
diff --git a/gcc/config/arm/xgene1.md b/gcc/config/arm/xgene1.md
index c4b377354db..cf0694a5cc7 100644
--- a/gcc/config/arm/xgene1.md
+++ b/gcc/config/arm/xgene1.md
@@ -20,17 +20,26 @@
;; Pipeline description for the xgene1 micro-architecture
-(define_automaton "xgene1")
+(define_automaton "xgene1_main, xgene1_decoder, xgene1_div, xgene1_simd")
-(define_cpu_unit "xgene1_decode_out0" "xgene1")
-(define_cpu_unit "xgene1_decode_out1" "xgene1")
-(define_cpu_unit "xgene1_decode_out2" "xgene1")
-(define_cpu_unit "xgene1_decode_out3" "xgene1")
+(define_cpu_unit "xgene1_decode_out0" "xgene1_decoder")
+(define_cpu_unit "xgene1_decode_out1" "xgene1_decoder")
+(define_cpu_unit "xgene1_decode_out2" "xgene1_decoder")
+(define_cpu_unit "xgene1_decode_out3" "xgene1_decoder")
-(define_cpu_unit "xgene1_divide" "xgene1")
-(define_cpu_unit "xgene1_fp_divide" "xgene1")
-(define_cpu_unit "xgene1_fsu" "xgene1")
-(define_cpu_unit "xgene1_fcmp" "xgene1")
+(define_cpu_unit "xgene1_IXA" "xgene1_main")
+(define_cpu_unit "xgene1_IXB" "xgene1_main")
+(define_cpu_unit "xgene1_IXB_compl" "xgene1_main")
+
+(define_reservation "xgene1_IXn" "(xgene1_IXA | xgene1_IXB)")
+
+(define_cpu_unit "xgene1_multiply" "xgene1_main")
+(define_cpu_unit "xgene1_divide" "xgene1_div")
+(define_cpu_unit "xgene1_fp_divide" "xgene1_div")
+(define_cpu_unit "xgene1_fsu" "xgene1_simd")
+(define_cpu_unit "xgene1_fcmp" "xgene1_simd")
+(define_cpu_unit "xgene1_ld" "xgene1_main")
+(define_cpu_unit "xgene1_st" "xgene1_main")
(define_reservation "xgene1_decode1op"
"( xgene1_decode_out0 )
@@ -68,12 +77,12 @@
(define_insn_reservation "xgene1_f_load" 10
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "f_loadd,f_loads"))
- "xgene1_decode2op")
+ "xgene1_decode2op, xgene1_ld")
(define_insn_reservation "xgene1_f_store" 4
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "f_stored,f_stores"))
- "xgene1_decode2op")
+ "xgene1_decode2op, xgene1_st")
(define_insn_reservation "xgene1_fmov" 2
(and (eq_attr "tune" "xgene1")
@@ -92,85 +101,108 @@
(define_insn_reservation "xgene1_load_pair" 6
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "load_8, load_16"))
- "xgene1_decodeIsolated")
+ (eq_attr "type" "load_16"))
+ "xgene1_decodeIsolated, xgene1_ld*2")
(define_insn_reservation "xgene1_store_pair" 2
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "store_8, store_16"))
- "xgene1_decodeIsolated")
+ (eq_attr "type" "store_16"))
+ "xgene1_decodeIsolated, xgene1_st*2")
(define_insn_reservation "xgene1_fp_load1" 10
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "load_4")
+ (eq_attr "type" "load_4, load_8")
(eq_attr "fp" "yes"))
- "xgene1_decode1op")
+ "xgene1_decode1op, xgene1_ld")
(define_insn_reservation "xgene1_load1" 5
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "load_4"))
- "xgene1_decode1op")
+ (eq_attr "type" "load_4, load_8"))
+ "xgene1_decode1op, xgene1_ld")
-(define_insn_reservation "xgene1_store1" 2
+(define_insn_reservation "xgene1_store1" 1
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "store_4"))
- "xgene1_decode2op")
+ (eq_attr "type" "store_4, store_8"))
+ "xgene1_decode1op, xgene1_st")
(define_insn_reservation "xgene1_move" 1
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "mov_reg,mov_imm,mrs"))
- "xgene1_decode1op")
+ "xgene1_decode1op, xgene1_IXn")
+
+(define_insn_reservation "xgene1_alu_cond" 1
+ (and (eq_attr "tune" "xgene1")
+ (eq_attr "type" "csel"))
+ "xgene1_decode1op, xgene1_IXn")
(define_insn_reservation "xgene1_alu" 1
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "alu_imm,alu_sreg,alu_shift_imm,\
- alu_ext,adc_reg,csel,logic_imm,\
+ alu_ext,adc_reg,logic_imm,\
logic_reg,logic_shift_imm,clz,\
- rbit,shift_reg,adr,mov_reg,\
- mov_imm,extend"))
- "xgene1_decode1op")
+ rbit,adr,mov_reg,shift_imm,\
+ mov_imm,extend,multiple"))
+ "xgene1_decode1op, xgene1_IXn")
+
+(define_insn_reservation "xgene1_shift_rotate" 2
+ (and (eq_attr "tune" "xgene1")
+ (eq_attr "type" "shift_reg"))
+ "xgene1_decode1op, xgene1_IXB, xgene1_IXB_compl")
-(define_insn_reservation "xgene1_simd" 1
+(define_insn_reservation "xgene1_simd" 2
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "rev"))
- "xgene1_decode1op")
+ "xgene1_decode1op, xgene1_IXB, xgene1_IXB_compl")
(define_insn_reservation "xgene1_alus" 1
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "alus_imm,alu_sreg,alus_shift_imm,\
+ (eq_attr "type" "alus_imm,alus_sreg,alus_shift_imm,\
alus_ext,logics_imm,logics_reg,\
logics_shift_imm"))
- "xgene1_decode1op")
+ "xgene1_decode1op, xgene1_IXB, xgene1_IXB_compl")
+
+(define_bypass 2 "xgene1_alus"
+ "xgene1_alu_cond, xgene1_branch")
-(define_insn_reservation "xgene1_mul" 6
+(define_insn_reservation "xgene1_mul32" 4
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "mul,mla,smull,umull,smlal,umlal"))
- "xgene1_decode2op")
+ (eq_attr "mul32" "yes"))
+ "xgene1_decode2op, xgene1_IXB + xgene1_multiply, xgene1_multiply, nothing, xgene1_IXB_compl")
+
+(define_insn_reservation "xgene1_mul64" 5
+ (and (eq_attr "tune" "xgene1")
+ (eq_attr "mul64" "yes"))
+ "xgene1_decode2op, xgene1_IXB + xgene1_multiply, xgene1_multiply, nothing*2, xgene1_IXB_compl")
(define_insn_reservation "xgene1_div" 34
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "sdiv,udiv"))
- "xgene1_decode1op,xgene1_divide*7")
+ "xgene1_decode1op, xgene1_IXB + xgene1_divide*7")
(define_insn_reservation "xgene1_fcmp" 10
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "fcmpd,fcmps,fccmpd,fccmps"))
- "xgene1_decode1op,xgene1_fsu+xgene1_fcmp*3")
+ "xgene1_decode1op, xgene1_fsu + xgene1_fcmp*3")
(define_insn_reservation "xgene1_fcsel" 3
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "fcsel"))
- "xgene1_decode1op,xgene1_fsu")
+ "xgene1_decode1op, xgene1_fsu")
+
+(define_insn_reservation "xgene1_bfx" 1
+ (and (eq_attr "tune" "xgene1")
+ (eq_attr "type" "bfx"))
+ "xgene1_decode1op, xgene1_IXn")
(define_insn_reservation "xgene1_bfm" 2
(and (eq_attr "tune" "xgene1")
- (eq_attr "type" "bfm,bfx"))
- "xgene1_decode1op,xgene1_fsu")
+ (eq_attr "type" "bfm"))
+ "xgene1_decode1op, xgene1_IXB, xgene1_IXB_compl")
(define_insn_reservation "xgene1_f_rint" 5
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "f_rintd,f_rints"))
- "xgene1_decode1op,xgene1_fsu")
+ "xgene1_decode1op, xgene1_fsu")
(define_insn_reservation "xgene1_f_cvt" 3
(and (eq_attr "tune" "xgene1")
@@ -225,12 +257,12 @@
(define_insn_reservation "xgene1_neon_load1" 11
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "neon_load1_1reg, neon_load1_1reg_q"))
- "xgene1_decode2op,xgene1_fsu")
+ "xgene1_decode2op, xgene1_ld")
(define_insn_reservation "xgene1_neon_store1" 5
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "neon_store1_1reg, neon_store1_1reg_q"))
- "xgene1_decode2op,xgene1_fsu")
+ "xgene1_decode2op, xgene1_st")
(define_insn_reservation "xgene1_neon_logic" 2
(and (eq_attr "tune" "xgene1")
@@ -300,6 +332,8 @@
neon_compare_zero_q,\
neon_tst,\
neon_tst_q,\
+ neon_minmax,\
+ neon_minmax_q,\
"))
"xgene1_decode1op,xgene1_fsu")
@@ -439,8 +473,10 @@
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "neon_store1_one_lane,\
neon_store1_one_lane_q,\
+ neon_stp,\
+ neon_stp_q,\
"))
- "xgene1_decode1op")
+ "xgene1_decodeIsolated, xgene1_st")
(define_insn_reservation "xgene1_neon_halve_narrow" 6
(and (eq_attr "tune" "xgene1")
@@ -499,7 +535,7 @@
(and (eq_attr "tune" "xgene1")
(eq_attr "type" "neon_load1_all_lanes,\
"))
- "xgene1_decode1op")
+ "xgene1_decode1op, xgene1_ld")
(define_insn_reservation "xgene1_neon_fp_recp" 3
(and (eq_attr "tune" "xgene1")
diff --git a/gcc/config/cr16/cr16.h b/gcc/config/cr16/cr16.h
index 1757467d165..44d832bdc95 100644
--- a/gcc/config/cr16/cr16.h
+++ b/gcc/config/cr16/cr16.h
@@ -479,7 +479,7 @@ struct cumulative_args
#undef ASM_OUTPUT_LABELREF
#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
- asm_fprintf (STREAM, "%U%s", (*targetm.strip_name_encoding) (NAME));
+ asm_fprintf (STREAM, "%U%s", (*targetm.strip_name_encoding) (NAME))
#define ASM_OUTPUT_SYMBOL_REF(STREAM, SYMBOL) \
do \
diff --git a/gcc/config/darwin-c.c b/gcc/config/darwin-c.c
index 91f08a0dcee..bfb35b9ae6d 100644
--- a/gcc/config/darwin-c.c
+++ b/gcc/config/darwin-c.c
@@ -284,13 +284,13 @@ framework_construct_pathname (const char *fname, cpp_dir *dir)
frname = XNEWVEC (char, strlen (fname) + dir->len + 2
+ strlen(".framework/") + strlen("PrivateHeaders"));
- strncpy (&frname[0], dir->name, dir->len);
+ memcpy (&frname[0], dir->name, dir->len);
frname_len = dir->len;
if (frname_len && frname[frname_len-1] != '/')
frname[frname_len++] = '/';
- strncpy (&frname[frname_len], fname, fname_len);
+ memcpy (&frname[frname_len], fname, fname_len);
frname_len += fname_len;
- strncpy (&frname[frname_len], ".framework/", strlen (".framework/"));
+ memcpy (&frname[frname_len], ".framework/", strlen (".framework/"));
frname_len += strlen (".framework/");
if (fast_dir == 0)
@@ -316,7 +316,7 @@ framework_construct_pathname (const char *fname, cpp_dir *dir)
/* Append framework_header_dirs and header file name */
for (i = 0; framework_header_dirs[i].dirName; i++)
{
- strncpy (&frname[frname_len],
+ memcpy (&frname[frname_len],
framework_header_dirs[i].dirName,
framework_header_dirs[i].dirNameLen);
strcpy (&frname[frname_len + framework_header_dirs[i].dirNameLen],
@@ -378,23 +378,23 @@ find_subframework_file (const char *fname, const char *pname)
sfrname_len = bufptr - pname;
- strncpy (&sfrname[0], pname, sfrname_len);
+ memcpy (&sfrname[0], pname, sfrname_len);
- strncpy (&sfrname[sfrname_len], "Frameworks/", strlen ("Frameworks/"));
+ memcpy (&sfrname[sfrname_len], "Frameworks/", strlen ("Frameworks/"));
sfrname_len += strlen("Frameworks/");
- strncpy (&sfrname[sfrname_len], fname, fname_len);
+ memcpy (&sfrname[sfrname_len], fname, fname_len);
sfrname_len += fname_len;
- strncpy (&sfrname[sfrname_len], ".framework/", strlen (".framework/"));
+ memcpy (&sfrname[sfrname_len], ".framework/", strlen (".framework/"));
sfrname_len += strlen (".framework/");
/* Append framework_header_dirs and header file name */
for (i = 0; framework_header_dirs[i].dirName; i++)
{
- strncpy (&sfrname[sfrname_len],
- framework_header_dirs[i].dirName,
- framework_header_dirs[i].dirNameLen);
+ memcpy (&sfrname[sfrname_len],
+ framework_header_dirs[i].dirName,
+ framework_header_dirs[i].dirNameLen);
strcpy (&sfrname[sfrname_len + framework_header_dirs[i].dirNameLen],
&fname[fname_len]);
diff --git a/gcc/config/elfos.h b/gcc/config/elfos.h
index 47a07115e32..8149c815262 100644
--- a/gcc/config/elfos.h
+++ b/gcc/config/elfos.h
@@ -135,15 +135,15 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
- ASM_OUTPUT_ALIGN ((FILE), 2);
+ ASM_OUTPUT_ALIGN ((FILE), 2)
#endif
#undef ASM_OUTPUT_CASE_LABEL
#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
do \
{ \
- ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \
- (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
+ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE); \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
} \
while (0)
@@ -444,7 +444,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#undef ASM_OUTPUT_ASCII
#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \
- default_elf_asm_output_ascii ((FILE), (STR), (LENGTH));
+ default_elf_asm_output_ascii ((FILE), (STR), (LENGTH))
/* Allow the use of the -frecord-gcc-switches switch via the
elf_record_gcc_switches function defined in varasm.c. */
diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
index 8cb1848dff5..4fad5d2b6e1 100644
--- a/gcc/config/i386/cpuid.h
+++ b/gcc/config/i386/cpuid.h
@@ -97,6 +97,7 @@
#define bit_AVX512VBMI (1 << 1)
#define bit_PKU (1 << 3)
#define bit_OSPKE (1 << 4)
+#define bit_AVX512VBMI2 (1 << 6)
#define bit_SHSTK (1 << 7)
#define bit_GFNI (1 << 8)
#define bit_AVX512VPOPCNTDQ (1 << 14)
diff --git a/gcc/config/i386/driver-i386.c b/gcc/config/i386/driver-i386.c
index 973abddcc67..f0d1e54e142 100644
--- a/gcc/config/i386/driver-i386.c
+++ b/gcc/config/i386/driver-i386.c
@@ -417,7 +417,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
unsigned int has_avx512vbmi = 0, has_avx512ifma = 0, has_clwb = 0;
unsigned int has_mwaitx = 0, has_clzero = 0, has_pku = 0, has_rdpid = 0;
unsigned int has_avx5124fmaps = 0, has_avx5124vnniw = 0;
- unsigned int has_gfni = 0;
+ unsigned int has_gfni = 0, has_avx512vbmi2 = 0;
unsigned int has_ibt = 0, has_shstk = 0;
bool arch;
@@ -507,6 +507,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
has_prefetchwt1 = ecx & bit_PREFETCHWT1;
has_avx512vbmi = ecx & bit_AVX512VBMI;
has_pku = ecx & bit_OSPKE;
+ has_avx512vbmi2 = ecx & bit_AVX512VBMI2;
has_rdpid = ecx & bit_RDPID;
has_gfni = ecx & bit_GFNI;
@@ -1050,6 +1051,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
const char *avx512ifma = has_avx512ifma ? " -mavx512ifma" : " -mno-avx512ifma";
const char *avx512vbmi = has_avx512vbmi ? " -mavx512vbmi" : " -mno-avx512vbmi";
const char *avx5124vnniw = has_avx5124vnniw ? " -mavx5124vnniw" : " -mno-avx5124vnniw";
+ const char *avx512vbmi2 = has_avx512vbmi2 ? " -mavx512vbmi2" : " -mno-avx512vbmi2";
const char *avx5124fmaps = has_avx5124fmaps ? " -mavx5124fmaps" : " -mno-avx5124fmaps";
const char *clwb = has_clwb ? " -mclwb" : " -mno-clwb";
const char *mwaitx = has_mwaitx ? " -mmwaitx" : " -mno-mwaitx";
@@ -1068,7 +1070,8 @@ const char *host_detect_local_cpu (int argc, const char **argv)
avx512cd, avx512pf, prefetchwt1, clflushopt,
xsavec, xsaves, avx512dq, avx512bw, avx512vl,
avx512ifma, avx512vbmi, avx5124fmaps, avx5124vnniw,
- clwb, mwaitx, clzero, pku, rdpid, gfni, ibt, shstk, NULL);
+ clwb, mwaitx, clzero, pku, rdpid, gfni, ibt, shstk,
+ avx512vbmi2, NULL);
}
done:
diff --git a/gcc/config/i386/gfniintrin.h b/gcc/config/i386/gfniintrin.h
index f4ca01c5b11..e9fea2ef00a 100644
--- a/gcc/config/i386/gfniintrin.h
+++ b/gcc/config/i386/gfniintrin.h
@@ -28,12 +28,20 @@
#ifndef _GFNIINTRIN_H_INCLUDED
#define _GFNIINTRIN_H_INCLUDED
-#ifndef __GFNI__
+#if !defined(__GFNI__) || !defined(__SSE__)
#pragma GCC push_options
-#pragma GCC target("gfni")
+#pragma GCC target("gfni,sse")
#define __DISABLE_GFNI__
#endif /* __GFNI__ */
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_gf2p8mul_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
+ (__v16qi) __B);
+}
+
#ifdef __OPTIMIZE__
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -43,10 +51,21 @@ _mm_gf2p8affineinv_epi64_epi8 (__m128i __A, __m128i __B, const int __C)
(__v16qi) __B,
__C);
}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_gf2p8affine_epi64_epi8 (__m128i __A, __m128i __B, const int __C)
+{
+ return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi ((__v16qi) __A,
+ (__v16qi) __B, __C);
+}
#else
#define _mm_gf2p8affineinv_epi64_epi8(A, B, C) \
((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(C)))
+#define _mm_gf2p8affine_epi64_epi8(A, B, C) \
+ ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi ((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(C)))
#endif
#ifdef __DISABLE_GFNI__
@@ -60,6 +79,14 @@ _mm_gf2p8affineinv_epi64_epi8 (__m128i __A, __m128i __B, const int __C)
#define __DISABLE_GFNIAVX__
#endif /* __GFNIAVX__ */
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_gf2p8mul_epi8 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi ((__v32qi) __A,
+ (__v32qi) __B);
+}
+
#ifdef __OPTIMIZE__
extern __inline __m256i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -69,11 +96,22 @@ _mm256_gf2p8affineinv_epi64_epi8 (__m256i __A, __m256i __B, const int __C)
(__v32qi) __B,
__C);
}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_gf2p8affine_epi64_epi8 (__m256i __A, __m256i __B, const int __C)
+{
+ return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi ((__v32qi) __A,
+ (__v32qi) __B, __C);
+}
#else
#define _mm256_gf2p8affineinv_epi64_epi8(A, B, C) \
((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), \
(int)(C)))
+#define _mm256_gf2p8affine_epi64_epi8(A, B, C) \
+ ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi ((__v32qi)(__m256i)(A), \
+ ( __v32qi)(__m256i)(B), (int)(C)))
#endif
#ifdef __DISABLE_GFNIAVX__
@@ -87,6 +125,23 @@ _mm256_gf2p8affineinv_epi64_epi8 (__m256i __A, __m256i __B, const int __C)
#define __DISABLE_GFNIAVX512VL__
#endif /* __GFNIAVX512VL__ */
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_gf2p8mul_epi8 (__m128i __A, __mmask16 __B, __m128i __C, __m128i __D)
+{
+ return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi_mask ((__v16qi) __C,
+ (__v16qi) __D,
+ (__v16qi)__A, __B);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_gf2p8mul_epi8 (__mmask16 __A, __m128i __B, __m128i __C)
+{
+ return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi_mask ((__v16qi) __B,
+ (__v16qi) __C, (__v16qi) _mm_setzero_si128 (), __A);
+}
+
#ifdef __OPTIMIZE__
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -110,6 +165,24 @@ _mm_maskz_gf2p8affineinv_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C,
(__v16qi) _mm_setzero_si128 (),
__A);
}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_gf2p8affine_epi64_epi8 (__m128i __A, __mmask16 __B, __m128i __C,
+ __m128i __D, const int __E)
+{
+ return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask ((__v16qi) __C,
+ (__v16qi) __D, __E, (__v16qi)__A, __B);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_gf2p8affine_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C,
+ const int __D)
+{
+ return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask ((__v16qi) __B,
+ (__v16qi) __C, __D, (__v16qi) _mm_setzero_si128 (), __A);
+}
#else
#define _mm_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \
@@ -120,6 +193,13 @@ _mm_maskz_gf2p8affineinv_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C,
(__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), \
(int)(D), (__v16qi)(__m128i) _mm_setzero_si128 (), \
(__mmask16)(A)))
+#define _mm_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
+ ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask((__v16qi)(__m128i)(C),\
+ (__v16qi)(__m128i)(D), (int)(E), (__v16qi)(__m128i)(A), (__mmask16)(B)))
+#define _mm_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \
+ ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask((__v16qi)(__m128i)(B),\
+ (__v16qi)(__m128i)(C), (int)(D), \
+ (__v16qi)(__m128i) _mm_setzero_si128 (), (__mmask16)(A)))
#endif
#ifdef __DISABLE_GFNIAVX512VL__
@@ -133,6 +213,24 @@ _mm_maskz_gf2p8affineinv_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C,
#define __DISABLE_GFNIAVX512VLBW__
#endif /* __GFNIAVX512VLBW__ */
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_gf2p8mul_epi8 (__m256i __A, __mmask32 __B, __m256i __C,
+ __m256i __D)
+{
+ return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi_mask ((__v32qi) __C,
+ (__v32qi) __D,
+ (__v32qi)__A, __B);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_gf2p8mul_epi8 (__mmask32 __A, __m256i __B, __m256i __C)
+{
+ return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi_mask ((__v32qi) __B,
+ (__v32qi) __C, (__v32qi) _mm256_setzero_si256 (), __A);
+}
+
#ifdef __OPTIMIZE__
extern __inline __m256i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -155,6 +253,27 @@ _mm256_maskz_gf2p8affineinv_epi64_epi8 (__mmask32 __A, __m256i __B,
(__v32qi) __C, __D,
(__v32qi) _mm256_setzero_si256 (), __A);
}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_gf2p8affine_epi64_epi8 (__m256i __A, __mmask32 __B, __m256i __C,
+ __m256i __D, const int __E)
+{
+ return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask ((__v32qi) __C,
+ (__v32qi) __D,
+ __E,
+ (__v32qi)__A,
+ __B);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_gf2p8affine_epi64_epi8 (__mmask32 __A, __m256i __B,
+ __m256i __C, const int __D)
+{
+ return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask ((__v32qi) __B,
+ (__v32qi) __C, __D, (__v32qi)_mm256_setzero_si256 (), __A);
+}
#else
#define _mm256_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \
@@ -164,6 +283,13 @@ _mm256_maskz_gf2p8affineinv_epi64_epi8 (__mmask32 __A, __m256i __B,
((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \
(__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), \
(__v32qi)(__m256i) _mm256_setzero_si256 (), (__mmask32)(A)))
+#define _mm256_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
+ ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask((__v32qi)(__m256i)(C),\
+ (__v32qi)(__m256i)(D), (int)(E), (__v32qi)(__m256i)(A), (__mmask32)(B)))
+#define _mm256_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \
+ ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask((__v32qi)(__m256i)(B),\
+ (__v32qi)(__m256i)(C), (int)(D), \
+ (__v32qi)(__m256i) _mm256_setzero_si256 (), (__mmask32)(A)))
#endif
#ifdef __DISABLE_GFNIAVX512VLBW__
@@ -177,6 +303,30 @@ _mm256_maskz_gf2p8affineinv_epi64_epi8 (__mmask32 __A, __m256i __B,
#define __DISABLE_GFNIAVX512FBW__
#endif /* __GFNIAVX512FBW__ */
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_gf2p8mul_epi8 (__m512i __A, __mmask64 __B, __m512i __C,
+ __m512i __D)
+{
+ return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __C,
+ (__v64qi) __D, (__v64qi)__A, __B);
+}
+
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_gf2p8mul_epi8 (__mmask64 __A, __m512i __B, __m512i __C)
+{
+ return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __B,
+ (__v64qi) __C, (__v64qi) _mm512_setzero_si512 (), __A);
+}
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_gf2p8mul_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi ((__v64qi) __A,
+ (__v64qi) __B);
+}
+
#ifdef __OPTIMIZE__
extern __inline __m512i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -207,6 +357,31 @@ _mm512_gf2p8affineinv_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ((__v64qi) __A,
(__v64qi) __B, __C);
}
+
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_gf2p8affine_epi64_epi8 (__m512i __A, __mmask64 __B, __m512i __C,
+ __m512i __D, const int __E)
+{
+ return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __C,
+ (__v64qi) __D, __E, (__v64qi)__A, __B);
+}
+
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_gf2p8affine_epi64_epi8 (__mmask64 __A, __m512i __B, __m512i __C,
+ const int __D)
+{
+ return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __B,
+ (__v64qi) __C, __D, (__v64qi) _mm512_setzero_si512 (), __A);
+}
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
+{
+ return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi) __A,
+ (__v64qi) __B, __C);
+}
#else
#define _mm512_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \
@@ -219,6 +394,16 @@ _mm512_gf2p8affineinv_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
#define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) \
((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ( \
(__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
+#define _mm512_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
+ ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(C),\
+ (__v64qi)(__m512i)(D), (int)(E), (__v64qi)(__m512i)(A), (__mmask64)(B)))
+#define _mm512_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \
+ ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(B),\
+ (__v64qi)(__m512i)(C), (int)(D), \
+ (__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A)))
+#define _mm512_gf2p8affine_epi64_epi8(A, B, C) \
+ ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(C)))
#endif
#ifdef __DISABLE_GFNIAVX512FBW__
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 5b3b96ea2d0..04fcb99ae8c 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1218,3 +1218,4 @@ DEF_FUNCTION_TYPE (V64QI, V64QI, V64QI, INT)
DEF_FUNCTION_TYPE (V64QI, V64QI, V64QI, INT, V64QI, UDI)
DEF_FUNCTION_TYPE (V32QI, V32QI, V32QI, INT, V32QI, USI)
DEF_FUNCTION_TYPE (V16QI, V16QI, V16QI, INT, V16QI, UHI)
+DEF_FUNCTION_TYPE (V64QI, V64QI, V64QI)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 76e5f0fafdd..577a592892f 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -2394,6 +2394,26 @@ BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, CODE_FOR_avx512vl_
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, CODE_FOR_avx512vl_vpermi2varv32qi3_mask, "__builtin_ia32_vpermi2varqi256_mask", IX86_BUILTIN_VPERMI2VARQI256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, CODE_FOR_avx512vl_vpermi2varv16qi3_mask, "__builtin_ia32_vpermi2varqi128_mask", IX86_BUILTIN_VPERMI2VARQI128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI_UHI)
+/* GFNI */
+BDESC (OPTION_MASK_ISA_GFNI, CODE_FOR_vgf2p8affineinvqb_v64qi, "__builtin_ia32_vgf2p8affineinvqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEINVQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8affineinvqb_v64qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX, CODE_FOR_vgf2p8affineinvqb_v32qi, "__builtin_ia32_vgf2p8affineinvqb_v32qi", IX86_BUILTIN_VGF2P8AFFINEINVQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW | OPTION_MASK_ISA_AVX512VL, CODE_FOR_vgf2p8affineinvqb_v32qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v32qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT_V32QI_USI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE, CODE_FOR_vgf2p8affineinvqb_v16qi, "__builtin_ia32_vgf2p8affineinvqb_v16qi", IX86_BUILTIN_VGF2P8AFFINEINVQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE, CODE_FOR_vgf2p8affineinvqb_v16qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v16qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB128MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT_V16QI_UHI)
+BDESC (OPTION_MASK_ISA_GFNI, CODE_FOR_vgf2p8affineqb_v64qi, "__builtin_ia32_vgf2p8affineqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8affineqb_v64qi_mask, "__builtin_ia32_vgf2p8affineqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX, CODE_FOR_vgf2p8affineqb_v32qi, "__builtin_ia32_vgf2p8affineqb_v32qi", IX86_BUILTIN_VGF2P8AFFINEQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW | OPTION_MASK_ISA_AVX512VL, CODE_FOR_vgf2p8affineqb_v32qi_mask, "__builtin_ia32_vgf2p8affineqb_v32qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT_V32QI_USI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE, CODE_FOR_vgf2p8affineqb_v16qi, "__builtin_ia32_vgf2p8affineqb_v16qi", IX86_BUILTIN_VGF2P8AFFINEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE, CODE_FOR_vgf2p8affineqb_v16qi_mask, "__builtin_ia32_vgf2p8affineqb_v16qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB128MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT_V16QI_UHI)
+BDESC (OPTION_MASK_ISA_GFNI, CODE_FOR_vgf2p8mulb_v64qi, "__builtin_ia32_vgf2p8mulb_v64qi", IX86_BUILTIN_VGF2P8MULB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8mulb_v64qi_mask, "__builtin_ia32_vgf2p8mulb_v64qi_mask", IX86_BUILTIN_VGF2P8MULB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX, CODE_FOR_vgf2p8mulb_v32qi, "__builtin_ia32_vgf2p8mulb_v32qi", IX86_BUILTIN_VGF2P8MULB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8mulb_v32qi_mask, "__builtin_ia32_vgf2p8mulb_v32qi_mask", IX86_BUILTIN_VGF2P8MULB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI_USI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE, CODE_FOR_vgf2p8mulb_v16qi, "__builtin_ia32_vgf2p8mulb_v16qi", IX86_BUILTIN_VGF2P8MULB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8mulb_v16qi_mask, "__builtin_ia32_vgf2p8mulb_v16qi_mask", IX86_BUILTIN_VGF2P8MULB128MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI_UHI)
+
/* Builtins with rounding support. */
BDESC_END (ARGS, ROUND_ARGS)
@@ -2588,14 +2608,6 @@ BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, CODE_FOR_vpopcountv8di_mask, "__builtin_
/* RDPID */
BDESC (OPTION_MASK_ISA_RDPID, CODE_FOR_rdpid, "__builtin_ia32_rdpid", IX86_BUILTIN_RDPID, UNKNOWN, (int) UNSIGNED_FTYPE_VOID)
-
-/* GFNI */
-BDESC (OPTION_MASK_ISA_GFNI, CODE_FOR_vgf2p8affineinvqb_v64qi, "__builtin_ia32_vgf2p8affineinvqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEINVQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8affineinvqb_v64qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_GFNI, CODE_FOR_vgf2p8affineinvqb_v32qi, "__builtin_ia32_vgf2p8affineinvqb_v32qi", IX86_BUILTIN_VGF2P8AFFINEINVQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8affineinvqb_v32qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v32qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT_V32QI_USI)
-BDESC (OPTION_MASK_ISA_GFNI, CODE_FOR_vgf2p8affineinvqb_v16qi, "__builtin_ia32_vgf2p8affineinvqb_v16qi", IX86_BUILTIN_VGF2P8AFFINEINVQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, CODE_FOR_vgf2p8affineinvqb_v16qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v16qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB128MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT_V16QI_UHI)
BDESC_END (ARGS2, MPX)
/* Builtins for MPX. */
diff --git a/gcc/config/i386/i386-c.c b/gcc/config/i386/i386-c.c
index be99d01f110..b39754868c3 100644
--- a/gcc/config/i386/i386-c.c
+++ b/gcc/config/i386/i386-c.c
@@ -387,6 +387,8 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__AVX512IFMA__");
if (isa_flag2 & OPTION_MASK_ISA_AVX5124VNNIW)
def_or_undef (parse_in, "__AVX5124VNNIW__");
+ if (isa_flag2 & OPTION_MASK_ISA_AVX512VBMI2)
+ def_or_undef (parse_in, "__AVX512VBMI2__");
if (isa_flag2 & OPTION_MASK_ISA_SGX)
def_or_undef (parse_in, "__SGX__");
if (isa_flag2 & OPTION_MASK_ISA_AVX5124FMAPS)
@@ -449,7 +451,7 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__XSAVEC__");
if (isa_flag & OPTION_MASK_ISA_XSAVES)
def_or_undef (parse_in, "__XSAVES__");
- if (isa_flag & OPTION_MASK_ISA_MPX)
+ if (isa_flag2 & OPTION_MASK_ISA_MPX)
def_or_undef (parse_in, "__MPX__");
if (isa_flag & OPTION_MASK_ISA_CLWB)
def_or_undef (parse_in, "__CLWB__");
@@ -459,7 +461,7 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__PKU__");
if (isa_flag2 & OPTION_MASK_ISA_RDPID)
def_or_undef (parse_in, "__RDPID__");
- if (isa_flag2 & OPTION_MASK_ISA_GFNI)
+ if (isa_flag & OPTION_MASK_ISA_GFNI)
def_or_undef (parse_in, "__GFNI__");
if (isa_flag2 & OPTION_MASK_ISA_IBT)
{
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 4b684522082..9c543c319cb 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -877,7 +877,7 @@ rest_of_handle_insert_vzeroupper (void)
int i;
/* vzeroupper instructions are inserted immediately after reload to
- account for possible spills from 256bit registers. The pass
+ account for possible spills from 256bit or 512bit registers. The pass
reuses mode switching infrastructure by re-running mode insertion
pass, so disable entities that have already been processed. */
for (i = 0; i < MAX_386_ENTITIES; i++)
@@ -2499,7 +2499,7 @@ public:
/* opt_pass methods: */
virtual bool gate (function *)
{
- return TARGET_AVX && !TARGET_AVX512F
+ return TARGET_AVX
&& TARGET_VZEROUPPER && flag_expensive_optimizations
&& !optimize_size;
}
@@ -2745,7 +2745,8 @@ ix86_target_string (HOST_WIDE_INT isa, HOST_WIDE_INT isa2,
ISAs come first. Target string will be displayed in the same order. */
static struct ix86_target_opts isa2_opts[] =
{
- { "-mgfni", OPTION_MASK_ISA_GFNI },
+ { "-mmpx", OPTION_MASK_ISA_MPX },
+ { "-mavx512vbmi2", OPTION_MASK_ISA_AVX512VBMI2 },
{ "-mrdpid", OPTION_MASK_ISA_RDPID },
{ "-msgx", OPTION_MASK_ISA_SGX },
{ "-mavx5124vnniw", OPTION_MASK_ISA_AVX5124VNNIW },
@@ -2756,6 +2757,7 @@ ix86_target_string (HOST_WIDE_INT isa, HOST_WIDE_INT isa2,
};
static struct ix86_target_opts isa_opts[] =
{
+ { "-mgfni", OPTION_MASK_ISA_GFNI },
{ "-mavx512vbmi", OPTION_MASK_ISA_AVX512VBMI },
{ "-mavx512ifma", OPTION_MASK_ISA_AVX512IFMA },
{ "-mavx512vl", OPTION_MASK_ISA_AVX512VL },
@@ -2813,7 +2815,6 @@ ix86_target_string (HOST_WIDE_INT isa, HOST_WIDE_INT isa2,
{ "-mlwp", OPTION_MASK_ISA_LWP },
{ "-mhle", OPTION_MASK_ISA_HLE },
{ "-mfxsr", OPTION_MASK_ISA_FXSR },
- { "-mmpx", OPTION_MASK_ISA_MPX },
{ "-mclwb", OPTION_MASK_ISA_CLWB }
};
@@ -4081,8 +4082,8 @@ ix86_option_override_internal (bool main_args_p,
&& !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512VL))
opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VL;
if (processor_alias_table[i].flags & PTA_MPX
- && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_MPX))
- opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MPX;
+ && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_MPX))
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_MPX;
if (processor_alias_table[i].flags & PTA_AVX512VBMI
&& !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512VBMI))
opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VBMI;
@@ -4125,10 +4126,10 @@ ix86_option_override_internal (bool main_args_p,
break;
}
- if (TARGET_X32 && (opts->x_ix86_isa_flags & OPTION_MASK_ISA_MPX))
+ if (TARGET_X32 && (opts->x_ix86_isa_flags2 & OPTION_MASK_ISA_MPX))
error ("Intel MPX does not support x32");
- if (TARGET_X32 && (ix86_isa_flags & OPTION_MASK_ISA_MPX))
+ if (TARGET_X32 && (ix86_isa_flags2 & OPTION_MASK_ISA_MPX))
error ("Intel MPX does not support x32");
if (i == pta_size)
@@ -4668,7 +4669,8 @@ ix86_option_override_internal (bool main_args_p,
if (TARGET_SEH && TARGET_CALL_MS2SYSV_XLOGUES)
sorry ("-mcall-ms2sysv-xlogues isn%'t currently supported with SEH");
- if (!(opts_set->x_target_flags & MASK_VZEROUPPER))
+ if (!(opts_set->x_target_flags & MASK_VZEROUPPER)
+ && TARGET_EMIT_VZEROUPPER)
opts->x_target_flags |= MASK_VZEROUPPER;
if (!(opts_set->x_target_flags & MASK_STV))
opts->x_target_flags |= MASK_STV;
@@ -5244,6 +5246,7 @@ ix86_valid_target_attribute_inner_p (tree args, char *p_strings[],
IX86_ATTR_ISA ("avx5124fmaps", OPT_mavx5124fmaps),
IX86_ATTR_ISA ("avx5124vnniw", OPT_mavx5124vnniw),
IX86_ATTR_ISA ("avx512vpopcntdq", OPT_mavx512vpopcntdq),
+ IX86_ATTR_ISA ("avx512vbmi2", OPT_mavx512vbmi2),
IX86_ATTR_ISA ("avx512vbmi", OPT_mavx512vbmi),
IX86_ATTR_ISA ("avx512ifma", OPT_mavx512ifma),
@@ -10488,8 +10491,6 @@ symbolic_reference_mentioned_p (rtx op)
bool
ix86_can_use_return_insn_p (void)
{
- struct ix86_frame frame;
-
if (ix86_function_naked (current_function_decl))
return false;
@@ -10504,7 +10505,7 @@ ix86_can_use_return_insn_p (void)
if (crtl->args.pops_args && crtl->args.size >= 32768)
return 0;
- frame = cfun->machine->frame;
+ struct ix86_frame &frame = cfun->machine->frame;
return (frame.stack_pointer_offset == UNITS_PER_WORD
&& (frame.nregs + frame.nsseregs) == 0);
}
@@ -10998,7 +10999,7 @@ ix86_can_eliminate (const int from, const int to)
HOST_WIDE_INT
ix86_initial_elimination_offset (int from, int to)
{
- struct ix86_frame frame = cfun->machine->frame;
+ struct ix86_frame &frame = cfun->machine->frame;
if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
return frame.hard_frame_pointer_offset;
@@ -11519,12 +11520,15 @@ choose_basereg (HOST_WIDE_INT cfa_offset, rtx &base_reg,
an alignment value (in bits) that is preferred or zero and will
recieve the alignment of the base register that was selected,
irrespective of rather or not CFA_OFFSET is a multiple of that
- alignment value.
+ alignment value. If it is possible for the base register offset to be
+ non-immediate then SCRATCH_REGNO should specify a scratch register to
+ use.
The valid base registers are taken from CFUN->MACHINE->FS. */
static rtx
-choose_baseaddr (HOST_WIDE_INT cfa_offset, unsigned int *align)
+choose_baseaddr (HOST_WIDE_INT cfa_offset, unsigned int *align,
+ unsigned int scratch_regno = INVALID_REGNUM)
{
rtx base_reg = NULL;
HOST_WIDE_INT base_offset = 0;
@@ -11538,6 +11542,19 @@ choose_baseaddr (HOST_WIDE_INT cfa_offset, unsigned int *align)
choose_basereg (cfa_offset, base_reg, base_offset, 0, align);
gcc_assert (base_reg != NULL);
+
+ rtx base_offset_rtx = GEN_INT (base_offset);
+
+ if (!x86_64_immediate_operand (base_offset_rtx, Pmode))
+ {
+ gcc_assert (scratch_regno != INVALID_REGNUM);
+
+ rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
+ emit_move_insn (scratch_reg, base_offset_rtx);
+
+ return gen_rtx_PLUS (Pmode, base_reg, scratch_reg);
+ }
+
return plus_constant (Pmode, base_reg, base_offset);
}
@@ -12085,7 +12102,17 @@ release_scratch_register_on_entry (struct scratch_reg *sr)
}
}
-#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+/* Return the probing interval for -fstack-clash-protection. */
+
+static HOST_WIDE_INT
+get_probe_interval (void)
+{
+ if (flag_stack_clash_protection)
+ return (HOST_WIDE_INT_1U
+ << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ else
+ return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
+}
/* Emit code to adjust the stack pointer by SIZE bytes while probing it.
@@ -12154,8 +12181,7 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
/* We're allocating a large enough stack frame that we need to
emit probes. Either emit them inline or in a loop depending
on the size. */
- HOST_WIDE_INT probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ HOST_WIDE_INT probe_interval = get_probe_interval ();
if (size <= 4 * probe_interval)
{
HOST_WIDE_INT i;
@@ -12164,7 +12190,7 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
/* Allocate PROBE_INTERVAL bytes. */
rtx insn
= pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (-PROBE_INTERVAL), -1,
+ GEN_INT (-probe_interval), -1,
m->fs.cfa_reg == stack_pointer_rtx);
add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
@@ -12257,7 +12283,7 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
that's the easy case. The run-time loop is made up of 9 insns in the
generic case while the compile-time loop is made up of 3+2*(n-1) insns
for n # of intervals. */
- if (size <= 4 * PROBE_INTERVAL)
+ if (size <= 4 * get_probe_interval ())
{
HOST_WIDE_INT i, adjust;
bool first_probe = true;
@@ -12266,15 +12292,15 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
values of N from 1 until it exceeds SIZE. If only one probe is
needed, this will not generate any code. Then adjust and probe
to PROBE_INTERVAL + SIZE. */
- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ for (i = get_probe_interval (); i < size; i += get_probe_interval ())
{
if (first_probe)
{
- adjust = 2 * PROBE_INTERVAL + dope;
+ adjust = 2 * get_probe_interval () + dope;
first_probe = false;
}
else
- adjust = PROBE_INTERVAL;
+ adjust = get_probe_interval ();
emit_insn (gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
@@ -12283,9 +12309,9 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
}
if (first_probe)
- adjust = size + PROBE_INTERVAL + dope;
+ adjust = size + get_probe_interval () + dope;
else
- adjust = size + PROBE_INTERVAL - i;
+ adjust = size + get_probe_interval () - i;
emit_insn (gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
@@ -12295,7 +12321,8 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
/* Adjust back to account for the additional first interval. */
last = emit_insn (gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- PROBE_INTERVAL + dope)));
+ (get_probe_interval ()
+ + dope))));
}
/* Otherwise, do the same as above, but in a loop. Note that we must be
@@ -12313,7 +12340,7 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
/* Step 1: round SIZE to the previous multiple of the interval. */
- rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
+ rounded_size = ROUND_DOWN (size, get_probe_interval ());
/* Step 2: compute initial and final value of the loop counter. */
@@ -12321,7 +12348,7 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
/* SP = SP_0 + PROBE_INTERVAL. */
emit_insn (gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- - (PROBE_INTERVAL + dope))));
+ - (get_probe_interval () + dope))));
/* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
if (rounded_size <= (HOST_WIDE_INT_1 << 31))
@@ -12366,7 +12393,8 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
/* Adjust back to account for the additional first interval. */
last = emit_insn (gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- PROBE_INTERVAL + dope)));
+ (get_probe_interval ()
+ + dope))));
release_scratch_register_on_entry (&sr);
}
@@ -12383,7 +12411,7 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
XVECEXP (expr, 0, 1)
= gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- PROBE_INTERVAL + dope + size));
+ get_probe_interval () + dope + size));
add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
RTX_FRAME_RELATED_P (last) = 1;
@@ -12410,7 +12438,7 @@ output_adjust_stack_and_probe (rtx reg)
/* SP = SP + PROBE_INTERVAL. */
xops[0] = stack_pointer_rtx;
- xops[1] = GEN_INT (PROBE_INTERVAL);
+ xops[1] = GEN_INT (get_probe_interval ());
output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
/* Probe at SP. */
@@ -12440,14 +12468,14 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
that's the easy case. The run-time loop is made up of 6 insns in the
generic case while the compile-time loop is made up of n insns for n #
of intervals. */
- if (size <= 6 * PROBE_INTERVAL)
+ if (size <= 6 * get_probe_interval ())
{
HOST_WIDE_INT i;
/* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
it exceeds SIZE. If only one probe is needed, this will not
generate any code. Then probe at FIRST + SIZE. */
- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ for (i = get_probe_interval (); i < size; i += get_probe_interval ())
emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
-(first + i)));
@@ -12470,7 +12498,7 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
/* Step 1: round SIZE to the previous multiple of the interval. */
- rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
+ rounded_size = ROUND_DOWN (size, get_probe_interval ());
/* Step 2: compute initial and final value of the loop counter. */
@@ -12531,7 +12559,7 @@ output_probe_stack_range (rtx reg, rtx end)
/* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
xops[0] = reg;
- xops[1] = GEN_INT (PROBE_INTERVAL);
+ xops[1] = GEN_INT (get_probe_interval ());
output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
/* Probe at TEST_ADDR. */
@@ -12803,23 +12831,19 @@ ix86_emit_outlined_ms2sysv_save (const struct ix86_frame &frame)
rtx sym, addr;
rtx rax = gen_rtx_REG (word_mode, AX_REG);
const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
- HOST_WIDE_INT allocate = frame.stack_pointer_offset - m->fs.sp_offset;
/* AL should only be live with sysv_abi. */
gcc_assert (!ix86_eax_live_at_start_p ());
+ gcc_assert (m->fs.sp_offset >= frame.sse_reg_save_offset);
/* Setup RAX as the stub's base pointer. We use stack_realign_offset rather
we've actually realigned the stack or not. */
align = GET_MODE_ALIGNMENT (V4SFmode);
addr = choose_baseaddr (frame.stack_realign_offset
- + xlogue.get_stub_ptr_offset (), &align);
+ + xlogue.get_stub_ptr_offset (), &align, AX_REG);
gcc_assert (align >= GET_MODE_ALIGNMENT (V4SFmode));
- emit_insn (gen_rtx_SET (rax, addr));
- /* Allocate stack if not already done. */
- if (allocate > 0)
- pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (-allocate), -1, false);
+ emit_insn (gen_rtx_SET (rax, addr));
/* Get the stub symbol. */
sym = xlogue.get_stub_rtx (frame_pointer_needed ? XLOGUE_STUB_SAVE_HFP
@@ -12851,6 +12875,7 @@ ix86_expand_prologue (void)
HOST_WIDE_INT allocate;
bool int_registers_saved;
bool sse_registers_saved;
+ bool save_stub_call_needed;
rtx static_chain = NULL_RTX;
if (ix86_function_naked (current_function_decl))
@@ -13026,6 +13051,8 @@ ix86_expand_prologue (void)
int_registers_saved = (frame.nregs == 0);
sse_registers_saved = (frame.nsseregs == 0);
+ save_stub_call_needed = (m->call_ms2sysv);
+ gcc_assert (sse_registers_saved || !save_stub_call_needed);
if (frame_pointer_needed && !m->fs.fp_valid)
{
@@ -13120,10 +13147,28 @@ ix86_expand_prologue (void)
target. */
if (TARGET_SEH)
m->fs.sp_valid = false;
- }
- if (m->call_ms2sysv)
- ix86_emit_outlined_ms2sysv_save (frame);
+ /* If SP offset is non-immediate after allocation of the stack frame,
+ then emit SSE saves or stub call prior to allocating the rest of the
+ stack frame. This is less efficient for the out-of-line stub because
+ we can't combine allocations across the call barrier, but it's better
+ than using a scratch register. */
+ else if (!x86_64_immediate_operand (GEN_INT (frame.stack_pointer_offset
+ - m->fs.sp_realigned_offset),
+ Pmode))
+ {
+ if (!sse_registers_saved)
+ {
+ ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
+ sse_registers_saved = true;
+ }
+ else if (save_stub_call_needed)
+ {
+ ix86_emit_outlined_ms2sysv_save (frame);
+ save_stub_call_needed = false;
+ }
+ }
+ }
allocate = frame.stack_pointer_offset - m->fs.sp_offset;
@@ -13192,7 +13237,7 @@ ix86_expand_prologue (void)
else if (STACK_CHECK_MOVING_SP)
{
if (!(crtl->is_leaf && !cfun->calls_alloca
- && allocate <= PROBE_INTERVAL))
+ && allocate <= get_probe_interval ()))
{
ix86_adjust_stack_and_probe (allocate);
allocate = 0;
@@ -13209,7 +13254,7 @@ ix86_expand_prologue (void)
{
if (crtl->is_leaf && !cfun->calls_alloca)
{
- if (size > PROBE_INTERVAL)
+ if (size > get_probe_interval ())
ix86_emit_probe_stack_range (0, size);
}
else
@@ -13220,7 +13265,7 @@ ix86_expand_prologue (void)
{
if (crtl->is_leaf && !cfun->calls_alloca)
{
- if (size > PROBE_INTERVAL
+ if (size > get_probe_interval ()
&& size > get_stack_check_protect ())
ix86_emit_probe_stack_range (get_stack_check_protect (),
size - get_stack_check_protect ());
@@ -13351,6 +13396,8 @@ ix86_expand_prologue (void)
ix86_emit_save_regs_using_mov (frame.reg_save_offset);
if (!sse_registers_saved)
ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
+ else if (save_stub_call_needed)
+ ix86_emit_outlined_ms2sysv_save (frame);
/* For the mcount profiling on 32 bit PIC mode we need to emit SET_GOT
in PROLOGUE. */
@@ -13591,8 +13638,9 @@ ix86_emit_outlined_ms2sysv_restore (const struct ix86_frame &frame,
/* Setup RSI as the stub's base pointer. */
align = GET_MODE_ALIGNMENT (V4SFmode);
- tmp = choose_baseaddr (rsi_offset, &align);
+ tmp = choose_baseaddr (rsi_offset, &align, SI_REG);
gcc_assert (align >= GET_MODE_ALIGNMENT (V4SFmode));
+
emit_insn (gen_rtx_SET (rsi, tmp));
/* Get a symbol for the stub. */
@@ -14289,7 +14337,6 @@ ix86_split_stack_guard (void)
void
ix86_expand_split_stack_prologue (void)
{
- struct ix86_frame frame;
HOST_WIDE_INT allocate;
unsigned HOST_WIDE_INT args_size;
rtx_code_label *label;
@@ -14301,7 +14348,7 @@ ix86_expand_split_stack_prologue (void)
gcc_assert (flag_split_stack && reload_completed);
ix86_finalize_stack_frame_flags ();
- frame = cfun->machine->frame;
+ struct ix86_frame &frame = cfun->machine->frame;
allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
/* This is the label we will branch to if we have enough stack
@@ -18598,16 +18645,17 @@ ix86_dirflag_mode_needed (rtx_insn *insn)
return X86_DIRFLAG_ANY;
}
-/* Check if a 256bit AVX register is referenced inside of EXP. */
+/* Check if a 256bit or 512 bit AVX register is referenced inside of EXP. */
static bool
-ix86_check_avx256_register (const_rtx exp)
+ix86_check_avx_upper_register (const_rtx exp)
{
if (SUBREG_P (exp))
exp = SUBREG_REG (exp);
return (REG_P (exp)
- && VALID_AVX256_REG_OR_OI_MODE (GET_MODE (exp)));
+ && (VALID_AVX256_REG_OR_OI_MODE (GET_MODE (exp))
+ || VALID_AVX512F_REG_OR_XI_MODE (GET_MODE (exp))));
}
/* Return needed mode for entity in optimize_mode_switching pass. */
@@ -18620,7 +18668,7 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
rtx link;
/* Needed mode is set to AVX_U128_CLEAN if there are
- no 256bit modes used in function arguments. */
+ no 256bit or 512bit modes used in function arguments. */
for (link = CALL_INSN_FUNCTION_USAGE (insn);
link;
link = XEXP (link, 1))
@@ -18629,7 +18677,7 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
{
rtx arg = XEXP (XEXP (link, 0), 0);
- if (ix86_check_avx256_register (arg))
+ if (ix86_check_avx_upper_register (arg))
return AVX_U128_DIRTY;
}
}
@@ -18637,13 +18685,13 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
return AVX_U128_CLEAN;
}
- /* Require DIRTY mode if a 256bit AVX register is referenced. Hardware
- changes state only when a 256bit register is written to, but we need
- to prevent the compiler from moving optimal insertion point above
- eventual read from 256bit register. */
+ /* Require DIRTY mode if a 256bit or 512bit AVX register is referenced.
+ Hardware changes state only when a 256bit register is written to,
+ but we need to prevent the compiler from moving optimal insertion
+ point above eventual read from 256bit or 512 bit register. */
subrtx_iterator::array_type array;
FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
- if (ix86_check_avx256_register (*iter))
+ if (ix86_check_avx_upper_register (*iter))
return AVX_U128_DIRTY;
return AVX_U128_ANY;
@@ -18725,12 +18773,12 @@ ix86_mode_needed (int entity, rtx_insn *insn)
return 0;
}
-/* Check if a 256bit AVX register is referenced in stores. */
+/* Check if a 256bit or 512bit AVX register is referenced in stores. */
static void
-ix86_check_avx256_stores (rtx dest, const_rtx, void *data)
+ix86_check_avx_upper_stores (rtx dest, const_rtx, void *data)
{
- if (ix86_check_avx256_register (dest))
+ if (ix86_check_avx_upper_register (dest))
{
bool *used = (bool *) data;
*used = true;
@@ -18749,18 +18797,18 @@ ix86_avx_u128_mode_after (int mode, rtx_insn *insn)
return AVX_U128_CLEAN;
/* We know that state is clean after CALL insn if there are no
- 256bit registers used in the function return register. */
+ 256bit or 512bit registers used in the function return register. */
if (CALL_P (insn))
{
- bool avx_reg256_found = false;
- note_stores (pat, ix86_check_avx256_stores, &avx_reg256_found);
+ bool avx_upper_reg_found = false;
+ note_stores (pat, ix86_check_avx_upper_stores, &avx_upper_reg_found);
- return avx_reg256_found ? AVX_U128_DIRTY : AVX_U128_CLEAN;
+ return avx_upper_reg_found ? AVX_U128_DIRTY : AVX_U128_CLEAN;
}
/* Otherwise, return current mode. Remember that if insn
- references AVX 256bit registers, the mode was already changed
- to DIRTY from MODE_NEEDED. */
+ references AVX 256bit or 512bit registers, the mode was already
+ changed to DIRTY from MODE_NEEDED. */
return mode;
}
@@ -18803,13 +18851,13 @@ ix86_avx_u128_mode_entry (void)
tree arg;
/* Entry mode is set to AVX_U128_DIRTY if there are
- 256bit modes used in function arguments. */
+ 256bit or 512bit modes used in function arguments. */
for (arg = DECL_ARGUMENTS (current_function_decl); arg;
arg = TREE_CHAIN (arg))
{
rtx incoming = DECL_INCOMING_RTL (arg);
- if (incoming && ix86_check_avx256_register (incoming))
+ if (incoming && ix86_check_avx_upper_register (incoming))
return AVX_U128_DIRTY;
}
@@ -18843,9 +18891,9 @@ ix86_avx_u128_mode_exit (void)
{
rtx reg = crtl->return_rtx;
- /* Exit mode is set to AVX_U128_DIRTY if there are
- 256bit modes used in the function return register. */
- if (reg && ix86_check_avx256_register (reg))
+ /* Exit mode is set to AVX_U128_DIRTY if there are 256bit
+ or 512 bit modes used in the function return register. */
+ if (reg && ix86_check_avx_upper_register (reg))
return AVX_U128_DIRTY;
return AVX_U128_CLEAN;
@@ -19736,7 +19784,8 @@ ix86_swap_binary_operands_p (enum rtx_code code, machine_mode mode,
rtx src2 = operands[2];
/* If the operation is not commutative, we can't do anything. */
- if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
+ if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
+ && GET_RTX_CLASS (code) != RTX_COMM_COMPARE)
return false;
/* Highest priority is that src1 should match dst. */
@@ -19967,7 +20016,7 @@ ix86_binary_operator_ok (enum rtx_code code, machine_mode mode,
/* If the destination is memory, we must have a matching source operand. */
if (MEM_P (dst) && !rtx_equal_p (dst, src1))
- return false;
+ return false;
/* Source 1 cannot be a constant. */
if (CONSTANT_P (src1))
@@ -30748,7 +30797,7 @@ ix86_init_mpx_builtins ()
continue;
ftype = (enum ix86_builtin_func_type) d->flag;
- decl = def_builtin (d->mask, d->name, ftype, d->code);
+ decl = def_builtin2 (d->mask, d->name, ftype, d->code);
/* With no leaf and nothrow flags for MPX builtins
abnormal edges may follow its call when setjmp
@@ -30781,7 +30830,7 @@ ix86_init_mpx_builtins ()
continue;
ftype = (enum ix86_builtin_func_type) d->flag;
- decl = def_builtin_const (d->mask, d->name, ftype, d->code);
+ decl = def_builtin_const2 (d->mask, d->name, ftype, d->code);
if (decl)
{
@@ -33408,6 +33457,7 @@ ix86_expand_args_builtin (const struct builtin_description *d,
case V1DI_FTYPE_V2SI_V2SI:
case V32QI_FTYPE_V16HI_V16HI:
case V16HI_FTYPE_V8SI_V8SI:
+ case V64QI_FTYPE_V64QI_V64QI:
case V32QI_FTYPE_V32QI_V32QI:
case V16HI_FTYPE_V32QI_V32QI:
case V16HI_FTYPE_V16HI_V16HI:
@@ -35136,13 +35186,15 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
at all, -m64 is a whole TU option. */
if (((ix86_builtins_isa[fcode].isa
& ~(OPTION_MASK_ISA_AVX512VL | OPTION_MASK_ISA_MMX
- | OPTION_MASK_ISA_64BIT))
+ | OPTION_MASK_ISA_64BIT | OPTION_MASK_ISA_GFNI))
&& !(ix86_builtins_isa[fcode].isa
& ~(OPTION_MASK_ISA_AVX512VL | OPTION_MASK_ISA_MMX
- | OPTION_MASK_ISA_64BIT)
+ | OPTION_MASK_ISA_64BIT | OPTION_MASK_ISA_GFNI)
& ix86_isa_flags))
|| ((ix86_builtins_isa[fcode].isa & OPTION_MASK_ISA_AVX512VL)
&& !(ix86_isa_flags & OPTION_MASK_ISA_AVX512VL))
+ || ((ix86_builtins_isa[fcode].isa & OPTION_MASK_ISA_GFNI)
+ && !(ix86_isa_flags & OPTION_MASK_ISA_GFNI))
|| ((ix86_builtins_isa[fcode].isa & OPTION_MASK_ISA_MMX)
&& !(ix86_isa_flags & OPTION_MASK_ISA_MMX))
|| (ix86_builtins_isa[fcode].isa2
@@ -40429,7 +40481,8 @@ static void
x86_print_call_or_nop (FILE *file, const char *target)
{
if (flag_nop_mcount)
- fprintf (file, "1:\tnopl 0x00(%%eax,%%eax,1)\n"); /* 5 byte nop. */
+ /* 5 byte nop: nopl 0(%[re]ax,%[re]ax,1) */
+ fprintf (file, "1:" ASM_BYTE "0x0f, 0x1f, 0x44, 0x00, 0x00\n");
else
fprintf (file, "1:\tcall\t%s\n", target);
}
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 4855105c4ac..8011621bc3a 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -85,6 +85,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define TARGET_AVX5124FMAPS_P(x) TARGET_ISA_AVX5124FMAPS_P(x)
#define TARGET_AVX5124VNNIW TARGET_ISA_AVX5124VNNIW
#define TARGET_AVX5124VNNIW_P(x) TARGET_ISA_AVX5124VNNIW_P(x)
+#define TARGET_AVX512VBMI2 TARGET_ISA_AVX512VBMI2
+#define TARGET_AVX512VBMI2_P(x) TARGET_ISA_AVX512VBMI2_P(x)
#define TARGET_AVX512VPOPCNTDQ TARGET_ISA_AVX512VPOPCNTDQ
#define TARGET_AVX512VPOPCNTDQ_P(x) TARGET_ISA_AVX512VPOPCNTDQ_P(x)
#define TARGET_FMA TARGET_ISA_FMA
@@ -517,6 +519,8 @@ extern unsigned char ix86_tune_features[X86_TUNE_LAST];
ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI]
#define TARGET_ONE_IF_CONV_INSN \
ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN]
+#define TARGET_EMIT_VZEROUPPER \
+ ix86_tune_features[X86_TUNE_EMIT_VZEROUPPER]
/* Feature tests against the various architecture variations. */
enum ix86_arch_indices {
@@ -1097,6 +1101,9 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
|| (MODE) == V16SImode || (MODE) == V16SFmode || (MODE) == V32HImode \
|| (MODE) == V4TImode)
+#define VALID_AVX512F_REG_OR_XI_MODE(MODE) \
+ (VALID_AVX512F_REG_MODE (MODE) || (MODE) == XImode)
+
#define VALID_AVX512VL_128_REG_MODE(MODE) \
((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \
|| (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index d48decbb7d9..90e622cbaf5 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -1275,6 +1275,26 @@
(compare:CC (match_operand:SWI48 0 "nonimmediate_operand")
(match_operand:SWI48 1 "<general_operand>")))])
+(define_mode_iterator SWI1248_AVX512BWDQ2_64
+ [(QI "TARGET_AVX512DQ") (HI "TARGET_AVX512DQ")
+ (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW && TARGET_64BIT")])
+
+(define_insn "*cmp<mode>_ccz_1"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:SWI1248_AVX512BWDQ2_64 0
+ "nonimmediate_operand" "<r>,?m<r>,$k")
+ (match_operand:SWI1248_AVX512BWDQ2_64 1 "const0_operand")))]
+ "ix86_match_ccmode (insn, CCZmode)"
+ "@
+ test{<imodesuffix>}\t%0, %0
+ cmp{<imodesuffix>}\t{%1, %0|%0, %1}
+ ktest<mskmodesuffix>\t%0, %0"
+ [(set_attr "type" "test,icmp,msklog")
+ (set_attr "length_immediate" "0,1,*")
+ (set_attr "modrm_class" "op0,unknown,*")
+ (set_attr "prefix" "*,*,vex")
+ (set_attr "mode" "<MODE>")])
+
(define_insn "*cmp<mode>_ccno_1"
[(set (reg FLAGS_REG)
(compare (match_operand:SWI 0 "nonimmediate_operand" "<r>,?m<r>")
@@ -3864,10 +3884,10 @@
(define_insn "*zero_extendsidi2"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=r,?r,?o,r ,o,?*Ym,?!*y,?r ,?*Yi,*x,*x,*v,*r")
+ "=r,?r,?o,r ,o,?*Ym,?!*y,$r,$Yi,$x,*x,*v,*r")
(zero_extend:DI
(match_operand:SI 1 "x86_64_zext_operand"
- "0 ,rm,r ,rmWz,0,r ,m ,*Yj,r ,m ,*x,*v,*k")))]
+ "0 ,rm,r ,rmWz,0,r ,m ,Yj,r ,m ,*x,*v,*k")))]
""
{
switch (get_attr_type (insn))
@@ -3983,15 +4003,6 @@
(set (match_dup 4) (const_int 0))]
"split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);")
-(define_peephole2
- [(set (match_operand:DI 0 "general_reg_operand")
- (zero_extend:DI (match_operand:SI 1 "nonimmediate_gr_operand")))
- (set (match_operand:DI 2 "sse_reg_operand") (match_dup 0))]
- "TARGET_64BIT && TARGET_SSE2 && TARGET_INTER_UNIT_MOVES_TO_VEC
- && peep2_reg_dead_p (2, operands[0])"
- [(set (match_dup 2)
- (zero_extend:DI (match_dup 1)))])
-
(define_mode_attr kmov_isa
[(QI "avx512dq") (HI "avx512f") (SI "avx512bw") (DI "avx512bw")])
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index 7c9dd471686..0fb46989ef5 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -717,6 +717,10 @@ mavx512vpopcntdq
Target Report Mask(ISA_AVX512VPOPCNTDQ) Var(ix86_isa_flags2) Save
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2, AVX512F and AVX512VPOPCNTDQ built-in functions and code generation.
+mavx512vbmi2
+Target Report Mask(ISA_AVX512VBMI2) Var(ix86_isa_flags2) Save
+Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2, AVX512F and AVX512VBMI2 built-in functions and code generation.
+
mfma
Target Report Mask(ISA_FMA) Var(ix86_isa_flags) Save
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX and FMA built-in functions and code generation.
@@ -754,7 +758,7 @@ Target Report Mask(ISA_RDPID) Var(ix86_isa_flags2) Save
Support RDPID built-in functions and code generation.
mgfni
-Target Report Mask(ISA_GFNI) Var(ix86_isa_flags2) Save
+Target Report Mask(ISA_GFNI) Var(ix86_isa_flags) Save
Support GFNI built-in functions and code generation.
mbmi
@@ -903,7 +907,7 @@ Target Report Mask(ISA_RTM) Var(ix86_isa_flags) Save
Support RTM built-in functions and code generation.
mmpx
-Target Report Mask(ISA_MPX) Var(ix86_isa_flags) Save
+Target Report Mask(ISA_MPX) Var(ix86_isa_flags2) Save
Support MPX code generation.
mmwaitx
@@ -977,3 +981,7 @@ mcet-switch
Target Report Undocumented Var(flag_cet_switch) Init(0)
Turn on CET instrumentation for switch statements, which use jump table and
indirect jump.
+
+mforce-indirect-call
+Target Report Var(flag_force_indirect_call) Init(0)
+Make all function calls indirect.
diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
index c3f442eb8ac..c6e6e980959 100644
--- a/gcc/config/i386/predicates.md
+++ b/gcc/config/i386/predicates.md
@@ -600,7 +600,8 @@
(define_predicate "constant_call_address_operand"
(match_code "symbol_ref")
{
- if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
+ if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC
+ || flag_force_indirect_call)
return false;
if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
return false;
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 4dfb2f8d3b3..32d241a27b1 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -158,6 +158,8 @@
;; For GFNI support
UNSPEC_GF2P8AFFINEINV
+ UNSPEC_GF2P8AFFINE
+ UNSPEC_GF2P8MUL
])
(define_c_enum "unspecv" [
@@ -1602,7 +1604,8 @@
(plusminus:VF
(match_operand:VF 1 "<round_nimm_predicate>" "<comm>0,v")
(match_operand:VF 2 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
- "TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands) && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)
+ && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
<plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
v<plusminus_mnemonic><ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -1641,7 +1644,9 @@
(mult:VF
(match_operand:VF 1 "<round_nimm_predicate>" "%0,v")
(match_operand:VF 2 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
- "TARGET_SSE && ix86_binary_operator_ok (MULT, <MODE>mode, operands) && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))
+ && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
mul<ssemodesuffix>\t{%2, %0|%0, %2}
vmul<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -1953,7 +1958,8 @@
(smaxmin:VF
(match_operand:VF 1 "<round_saeonly_nimm_predicate>" "%0,v")
(match_operand:VF 2 "<round_saeonly_nimm_predicate>" "xBm,<round_saeonly_constraint>")))]
- "TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)
+ "TARGET_SSE
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))
&& <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
"@
<maxmin_float><ssemodesuffix>\t{%2, %0|%0, %2}
@@ -3197,7 +3203,7 @@
(match_operand:VF_128_256 1 "vector_operand" "%0,x,v,v")
(match_operand:VF_128_256 2 "vector_operand" "xBm,xm,vm,vm")))]
"TARGET_SSE && <mask_avx512vl_condition>
- && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
{
static char buf[128];
const char *ops;
@@ -3261,7 +3267,7 @@
(any_logic:VF_512
(match_operand:VF_512 1 "nonimmediate_operand" "%v")
(match_operand:VF_512 2 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
{
static char buf[128];
const char *ops;
@@ -3515,8 +3521,7 @@
(any_logic:TF
(match_operand:TF 1 "vector_operand" "%0,x,v,v")
(match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))]
- "TARGET_SSE
- && ix86_binary_operator_ok (<CODE>, TFmode, operands)"
+ "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
{
static char buf[128];
const char *ops;
@@ -9988,8 +9993,7 @@
(plusminus:VI_AVX2
(match_operand:VI_AVX2 1 "vector_operand" "<comm>0,v")
(match_operand:VI_AVX2 2 "vector_operand" "xBm,vm")))]
- "TARGET_SSE2
- && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "TARGET_SSE2 && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
"@
p<plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
vp<plusminus_mnemonic><ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
@@ -10007,8 +10011,7 @@
(match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm"))
(match_operand:VI48_AVX512VL 3 "vector_move_operand" "0C")
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F
- && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "TARGET_AVX512F && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
"vp<plusminus_mnemonic><ssemodesuffix>\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}"
[(set_attr "type" "sseiadd")
(set_attr "prefix" "evex")
@@ -10073,8 +10076,7 @@
[(set (match_operand:VI2_AVX2 0 "register_operand" "=x,v")
(mult:VI2_AVX2 (match_operand:VI2_AVX2 1 "vector_operand" "%0,v")
(match_operand:VI2_AVX2 2 "vector_operand" "xBm,vm")))]
- "TARGET_SSE2
- && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))
&& <mask_mode512bit_condition> && <mask_avx512bw_condition>"
"@
pmullw\t{%2, %0|%0, %2}
@@ -10109,8 +10111,7 @@
(any_extend:<ssedoublemode>
(match_operand:VI2_AVX2 2 "vector_operand" "xBm,vm")))
(const_int 16))))]
- "TARGET_SSE2
- && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))
&& <mask_mode512bit_condition> && <mask_avx512bw_condition>"
"@
pmulh<u>w\t{%2, %0|%0, %2}
@@ -10158,7 +10159,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && ix86_binary_operator_ok (MULT, V16SImode, operands)"
+ "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuludq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
@@ -10195,7 +10196,7 @@
(parallel [(const_int 0) (const_int 2)
(const_int 4) (const_int 6)])))))]
"TARGET_AVX2 && <mask_avx512vl_condition>
- && ix86_binary_operator_ok (MULT, V8SImode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuludq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix" "maybe_evex")
@@ -10227,7 +10228,7 @@
(match_operand:V4SI 2 "vector_operand" "xBm,vm")
(parallel [(const_int 0) (const_int 2)])))))]
"TARGET_SSE2 && <mask_avx512vl_condition>
- && ix86_binary_operator_ok (MULT, V4SImode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pmuludq\t{%2, %0|%0, %2}
vpmuludq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
@@ -10274,7 +10275,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && ix86_binary_operator_ok (MULT, V16SImode, operands)"
+ "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
@@ -10310,8 +10311,7 @@
(match_operand:V8SI 2 "nonimmediate_operand" "vm")
(parallel [(const_int 0) (const_int 2)
(const_int 4) (const_int 6)])))))]
- "TARGET_AVX2
- && ix86_binary_operator_ok (MULT, V8SImode, operands)"
+ "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
@@ -10344,7 +10344,7 @@
(match_operand:V4SI 2 "vector_operand" "YrBm,*xBm,vm")
(parallel [(const_int 0) (const_int 2)])))))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>
- && ix86_binary_operator_ok (MULT, V4SImode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pmuldq\t{%2, %0|%0, %2}
pmuldq\t{%2, %0|%0, %2}
@@ -10433,7 +10433,7 @@
(const_int 5) (const_int 7)
(const_int 9) (const_int 11)
(const_int 13) (const_int 15)]))))))]
- "TARGET_AVX2 && ix86_binary_operator_ok (MULT, V16HImode, operands)"
+ "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmaddwd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
(set_attr "isa" "*,avx512bw")
@@ -10489,7 +10489,7 @@
(vec_select:V4HI (match_dup 2)
(parallel [(const_int 1) (const_int 3)
(const_int 5) (const_int 7)]))))))]
- "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pmaddwd\t{%2, %0|%0, %2}
vpmaddwd\t{%2, %1, %0|%0, %1, %2}
@@ -10539,7 +10539,8 @@
(mult:VI4_AVX512F
(match_operand:VI4_AVX512F 1 "vector_operand" "%0,0,v")
(match_operand:VI4_AVX512F 2 "vector_operand" "YrBm,*xBm,vm")))]
- "TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, <MODE>mode, operands) && <mask_mode512bit_condition>"
+ "TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))
+ && <mask_mode512bit_condition>"
"@
pmulld\t{%2, %0|%0, %2}
pmulld\t{%2, %0|%0, %2}
@@ -10857,7 +10858,7 @@
(maxmin:VI124_256
(match_operand:VI124_256 1 "nonimmediate_operand" "%v")
(match_operand:VI124_256 2 "nonimmediate_operand" "vm")))]
- "TARGET_AVX2 && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vp<maxmin_int><ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
(set_attr "prefix_extra" "1")
@@ -10880,7 +10881,7 @@
(maxmin:VI48_AVX512VL
(match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "%v")
(match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vp<maxmin_int><ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseiadd")
(set_attr "prefix_extra" "1")
@@ -10986,7 +10987,7 @@
(match_operand:VI14_128 2 "vector_operand" "YrBm,*xBm,vm")))]
"TARGET_SSE4_1
&& <mask_mode512bit_condition>
- && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
p<maxmin_int><ssemodesuffix>\t{%2, %0|%0, %2}
p<maxmin_int><ssemodesuffix>\t{%2, %0|%0, %2}
@@ -11002,7 +11003,7 @@
(smaxmin:V8HI
(match_operand:V8HI 1 "vector_operand" "%0,x,v")
(match_operand:V8HI 2 "vector_operand" "xBm,xm,vm")))]
- "TARGET_SSE2 && ix86_binary_operator_ok (<CODE>, V8HImode, operands)"
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
p<maxmin_int>w\t{%2, %0|%0, %2}
vp<maxmin_int>w\t{%2, %1, %0|%0, %1, %2}
@@ -11071,7 +11072,7 @@
(match_operand:VI24_128 2 "vector_operand" "YrBm,*xBm,vm")))]
"TARGET_SSE4_1
&& <mask_mode512bit_condition>
- && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
p<maxmin_int><ssemodesuffix>\t{%2, %0|%0, %2}
p<maxmin_int><ssemodesuffix>\t{%2, %0|%0, %2}
@@ -11087,7 +11088,7 @@
(umaxmin:V16QI
(match_operand:V16QI 1 "vector_operand" "%0,x,v")
(match_operand:V16QI 2 "vector_operand" "xBm,xm,vm")))]
- "TARGET_SSE2 && ix86_binary_operator_ok (<CODE>, V16QImode, operands)"
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
p<maxmin_int>b\t{%2, %0|%0, %2}
vp<maxmin_int>b\t{%2, %1, %0|%0, %1, %2}
@@ -11118,7 +11119,7 @@
(eq:VI_256
(match_operand:VI_256 1 "nonimmediate_operand" "%x")
(match_operand:VI_256 2 "nonimmediate_operand" "xm")))]
- "TARGET_AVX2 && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpcmpeq<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecmp")
(set_attr "prefix_extra" "1")
@@ -11128,7 +11129,7 @@
(define_expand "<avx512>_eq<mode>3<mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand")
(unspec:<avx512fmaskmode>
- [(match_operand:VI12_AVX512VL 1 "register_operand")
+ [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand")
(match_operand:VI12_AVX512VL 2 "nonimmediate_operand")]
UNSPEC_MASKED_EQ))]
"TARGET_AVX512BW"
@@ -11137,7 +11138,7 @@
(define_expand "<avx512>_eq<mode>3<mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand")
(unspec:<avx512fmaskmode>
- [(match_operand:VI48_AVX512VL 1 "register_operand")
+ [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand")
(match_operand:VI48_AVX512VL 2 "nonimmediate_operand")]
UNSPEC_MASKED_EQ))]
"TARGET_AVX512F"
@@ -11146,10 +11147,10 @@
(define_insn "<avx512>_eq<mode>3<mask_scalar_merge_name>_1"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=Yk")
(unspec:<avx512fmaskmode>
- [(match_operand:VI12_AVX512VL 1 "register_operand" "%v")
+ [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "%v")
(match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm")]
UNSPEC_MASKED_EQ))]
- "TARGET_AVX512F && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpcmpeq<ssemodesuffix>\t{%2, %1, %0<mask_scalar_merge_operand3>|%0<mask_scalar_merge_operand3>, %1, %2}"
[(set_attr "type" "ssecmp")
(set_attr "prefix_extra" "1")
@@ -11159,10 +11160,10 @@
(define_insn "<avx512>_eq<mode>3<mask_scalar_merge_name>_1"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=Yk")
(unspec:<avx512fmaskmode>
- [(match_operand:VI48_AVX512VL 1 "register_operand" "%v")
+ [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "%v")
(match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")]
UNSPEC_MASKED_EQ))]
- "TARGET_AVX512F && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpcmpeq<ssemodesuffix>\t{%2, %1, %0<mask_scalar_merge_operand3>|%0<mask_scalar_merge_operand3>, %1, %2}"
[(set_attr "type" "ssecmp")
(set_attr "prefix_extra" "1")
@@ -11174,7 +11175,7 @@
(eq:V2DI
(match_operand:V2DI 1 "vector_operand" "%0,0,x")
(match_operand:V2DI 2 "vector_operand" "YrBm,*xBm,xm")))]
- "TARGET_SSE4_1 && ix86_binary_operator_ok (EQ, V2DImode, operands)"
+ "TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pcmpeqq\t{%2, %0|%0, %2}
pcmpeqq\t{%2, %0|%0, %2}
@@ -11191,7 +11192,7 @@
(match_operand:VI124_128 1 "vector_operand" "%0,x")
(match_operand:VI124_128 2 "vector_operand" "xBm,xm")))]
"TARGET_SSE2 && !TARGET_XOP
- && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pcmpeq<ssemodesuffix>\t{%2, %0|%0, %2}
vpcmpeq<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
@@ -11656,7 +11657,7 @@
(match_operand:VI48_AVX_AVX512F 1 "vector_operand" "%0,x,v")
(match_operand:VI48_AVX_AVX512F 2 "vector_operand" "xBm,xm,vm")))]
"TARGET_SSE && <mask_mode512bit_condition>
- && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
{
static char buf[64];
const char *ops;
@@ -11753,10 +11754,10 @@
(define_insn "*<code><mode>3"
[(set (match_operand:VI12_AVX_AVX512F 0 "register_operand" "=x,x,v")
- (any_logic: VI12_AVX_AVX512F
+ (any_logic:VI12_AVX_AVX512F
(match_operand:VI12_AVX_AVX512F 1 "vector_operand" "%0,x,v")
(match_operand:VI12_AVX_AVX512F 2 "vector_operand" "xBm,xm,vm")))]
- "TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
{
static char buf[64];
const char *ops;
@@ -14067,7 +14068,7 @@
(match_operand:VI12_AVX2 <mask_expand_op3> "const1_operand"))
(const_int 1))))]
"TARGET_SSE2 && <mask_mode512bit_condition> && <mask_avx512bw_condition>
- && ix86_binary_operator_ok (PLUS, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pavg<ssemodesuffix>\t{%2, %0|%0, %2}
vpavg<ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
@@ -14741,7 +14742,7 @@
(match_operand:VI2_AVX2 3 "const1_operand"))
(const_int 1))))]
"TARGET_SSSE3 && <mask_mode512bit_condition> && <mask_avx512bw_condition>
- && ix86_binary_operator_ok (MULT, <MODE>mode, operands)"
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pmulhrsw\t{%2, %0|%0, %2}
vpmulhrsw\t{%2, %1, %0<mask_operand4>|%0<mask_operand4>, %1, %2}
@@ -14767,7 +14768,7 @@
(const_int 14))
(match_operand:V4HI 3 "const1_operand"))
(const_int 1))))]
- "TARGET_SSSE3 && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "TARGET_SSSE3 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"pmulhrsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
@@ -19991,3 +19992,36 @@
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex,evex")
(set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "vgf2p8affineqb_<mode><mask_name>"
+ [(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,x,v")
+ (unspec:VI1_AVX512F [(match_operand:VI1_AVX512F 1 "register_operand" "%0,x,v")
+ (match_operand:VI1_AVX512F 2 "nonimmediate_operand" "xBm,xm,vm")
+ (match_operand:QI 3 "const_0_to_255_operand" "n,n,n")]
+ UNSPEC_GF2P8AFFINE))]
+ "TARGET_GFNI"
+ "@
+ gf2p8affineqb\t{%3, %2, %0| %0, %2, %3}
+ vgf2p8affineqb\t{%3, %2, %1, %0<mask_operand4>| %0<mask_operand4>, %1, %2, %3}
+ vgf2p8affineqb\t{%3, %2, %1, %0<mask_operand4>| %0<mask_operand4>, %1, %2, %3}"
+ [(set_attr "isa" "noavx,avx,avx512bw")
+ (set_attr "prefix_data16" "1,*,*")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "orig,maybe_evex,evex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "vgf2p8mulb_<mode><mask_name>"
+ [(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,x,v")
+ (unspec:VI1_AVX512F [(match_operand:VI1_AVX512F 1 "register_operand" "%0,x,v")
+ (match_operand:VI1_AVX512F 2 "nonimmediate_operand" "xBm,xm,vm")]
+ UNSPEC_GF2P8MUL))]
+ "TARGET_GFNI"
+ "@
+ gf2p8mulb\t{%2, %0| %0, %2}
+ vgf2p8mulb\t{%2, %1, %0<mask_operand3>| %0<mask_operand3>, %1, %2}
+ vgf2p8mulb\t{%2, %1, %0<mask_operand3>| %0<mask_operand3>, %1, %2}"
+ [(set_attr "isa" "noavx,avx,avx512bw")
+ (set_attr "prefix_data16" "1,*,*")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "orig,maybe_evex,evex")
+ (set_attr "mode" "<sseinsnmode>")])
diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def
index 99282c88341..19fd2b52b30 100644
--- a/gcc/config/i386/x86-tune.def
+++ b/gcc/config/i386/x86-tune.def
@@ -543,3 +543,7 @@ DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", ~0U)
arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme
is usually used for RISC targets. */
DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", 0U)
+
+/* X86_TUNE_EMIT_VZEROUPPER: This enables vzeroupper instruction insertion
+ before a transfer of control flow out of the function. */
+DEF_TUNE (X86_TUNE_EMIT_VZEROUPPER, "emit_vzeroupper", ~m_KNL)
diff --git a/gcc/config/m68k/m68kelf.h b/gcc/config/m68k/m68kelf.h
index 159223f64c7..408eec89548 100644
--- a/gcc/config/m68k/m68kelf.h
+++ b/gcc/config/m68k/m68kelf.h
@@ -133,7 +133,7 @@ do { \
table. */
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
- fprintf ((FILE), "%s&%d\n", SWBEG_ASM_OP, XVECLEN (PATTERN (TABLE), 1));
+ fprintf ((FILE), "%s&%d\n", SWBEG_ASM_OP, XVECLEN (PATTERN (TABLE), 1))
/* end of stuff from m68kv4.h */
#undef ENDFILE_SPEC
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index f5c28bf70e3..bea2ce844ce 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -2566,12 +2566,15 @@ typedef struct mips_args {
/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
'the start of the function that this code is output in'. */
-#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
- asm_fprintf ((FILE), "%U%s", \
- XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
- else \
- asm_fprintf ((FILE), "%U%s", (NAME))
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ do { \
+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
+ asm_fprintf ((FILE), "%U%s", \
+ XSTR (XEXP (DECL_RTL (current_function_decl), \
+ 0), 0)); \
+ else \
+ asm_fprintf ((FILE), "%U%s", (NAME)); \
+ } while (0)
/* Flag to mark a function decl symbol that requires a long call. */
#define SYMBOL_FLAG_LONG_CALL (SYMBOL_FLAG_MACH_DEP << 0)
@@ -2953,7 +2956,7 @@ do { \
if (JUMP_TABLES_IN_TEXT_SECTION) \
mips_set_text_contents_type (STREAM, "__jump_", NUM, FALSE); \
} \
- while (0);
+ while (0)
/* Reset text marking to code after an inline jump table. Like with
the beginning of a jump table use the label number to keep symbols
@@ -2963,7 +2966,7 @@ do { \
do \
if (JUMP_TABLES_IN_TEXT_SECTION) \
mips_set_text_contents_type (STREAM, "__jend_", NUM, TRUE); \
- while (0);
+ while (0)
/* This is how to output an assembler line
that says to advance the location counter
diff --git a/gcc/config/powerpcspe/aix43.h b/gcc/config/powerpcspe/aix43.h
index d61956d3b28..bd8a2c08da2 100644
--- a/gcc/config/powerpcspe/aix43.h
+++ b/gcc/config/powerpcspe/aix43.h
@@ -39,7 +39,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/powerpcspe/aix51.h b/gcc/config/powerpcspe/aix51.h
index af7e38e1775..e9f88df23da 100644
--- a/gcc/config/powerpcspe/aix51.h
+++ b/gcc/config/powerpcspe/aix51.h
@@ -33,7 +33,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/powerpcspe/aix52.h b/gcc/config/powerpcspe/aix52.h
index 35d2286e5b3..eade64c5f64 100644
--- a/gcc/config/powerpcspe/aix52.h
+++ b/gcc/config/powerpcspe/aix52.h
@@ -39,7 +39,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/powerpcspe/aix53.h b/gcc/config/powerpcspe/aix53.h
index a1fbd834d47..b16488b3ba3 100644
--- a/gcc/config/powerpcspe/aix53.h
+++ b/gcc/config/powerpcspe/aix53.h
@@ -39,7 +39,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/powerpcspe/aix61.h b/gcc/config/powerpcspe/aix61.h
index 0b615057087..25fcb62ea95 100644
--- a/gcc/config/powerpcspe/aix61.h
+++ b/gcc/config/powerpcspe/aix61.h
@@ -56,7 +56,7 @@ do { \
{ \
rs6000_current_cmodel = CMODEL_LARGE; \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/powerpcspe/aix71.h b/gcc/config/powerpcspe/aix71.h
index 4b986d6a818..3b8de897ede 100644
--- a/gcc/config/powerpcspe/aix71.h
+++ b/gcc/config/powerpcspe/aix71.h
@@ -56,7 +56,7 @@ do { \
{ \
rs6000_current_cmodel = CMODEL_LARGE; \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/powerpcspe/xcoff.h b/gcc/config/powerpcspe/xcoff.h
index 36f40f4b11e..1eeb75c3e6c 100644
--- a/gcc/config/powerpcspe/xcoff.h
+++ b/gcc/config/powerpcspe/xcoff.h
@@ -179,7 +179,7 @@
`assemble_name' uses this. */
#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- asm_fprintf ((FILE), "%U%s", rs6000_xcoff_strip_dollar (NAME));
+ asm_fprintf ((FILE), "%U%s", rs6000_xcoff_strip_dollar (NAME))
/* This is how to output an internal label prefix. rs6000.c uses this
when generating traceback tables. */
diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
index ecf424d1a2b..6c7e3c4e819 100644
--- a/gcc/config/riscv/linux.h
+++ b/gcc/config/riscv/linux.h
@@ -24,6 +24,17 @@ along with GCC; see the file COPYING3. If not see
#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1"
+#define MUSL_ABI_SUFFIX \
+ "%{mabi=ilp32:-sf}" \
+ "%{mabi=ilp32f:-sp}" \
+ "%{mabi=ilp32d:}" \
+ "%{mabi=lp64:-sf}" \
+ "%{mabi=lp64f:-sp}" \
+ "%{mabi=lp64d:}" \
+
+#undef MUSL_DYNAMIC_LINKER
+#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-riscv" XLEN_SPEC MUSL_ABI_SUFFIX ".so.1"
+
/* Because RISC-V only has word-sized atomics, it requries libatomic where
others do not. So link libatomic by default, as needed. */
#undef LIB_SPEC
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index ae551fb3977..5f65b20e792 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -67,7 +67,8 @@ extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
extern void riscv_expand_prologue (void);
extern void riscv_expand_epilogue (bool);
extern bool riscv_can_use_return_insn (void);
-extern rtx riscv_function_value (const_tree, const_tree, machine_mode);
+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
+extern bool riscv_expand_block_move (rtx, rtx, rtx);
/* Routines implemented in riscv-c.c. */
void riscv_cpu_cpp_builtins (cpp_reader *);
diff --git a/gcc/config/riscv/riscv.c b/gcc/config/riscv/riscv.c
index 8ce93528b99..37fbe9e27bf 100644
--- a/gcc/config/riscv/riscv.c
+++ b/gcc/config/riscv/riscv.c
@@ -53,6 +53,7 @@ along with GCC; see the file COPYING3. If not see
#include "df.h"
#include "diagnostic.h"
#include "builtins.h"
+#include "predict.h"
/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
#define UNSPEC_ADDRESS_P(X) \
@@ -219,7 +220,7 @@ struct riscv_cpu_info {
/* Global variables for machine-dependent things. */
/* Whether unaligned accesses execute very slowly. */
-static bool riscv_slow_unaligned_access_p;
+bool riscv_slow_unaligned_access_p;
/* Which tuning parameters to use. */
static const struct riscv_tune_info *tune_info;
@@ -2644,6 +2645,162 @@ riscv_legitimize_call_address (rtx addr)
return addr;
}
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+ Assume that the areas do not overlap. */
+
+static void
+riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
+{
+ HOST_WIDE_INT offset, delta;
+ unsigned HOST_WIDE_INT bits;
+ int i;
+ enum machine_mode mode;
+ rtx *regs;
+
+ bits = MAX (BITS_PER_UNIT,
+ MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))));
+
+ mode = mode_for_size (bits, MODE_INT, 0).require ();
+ delta = bits / BITS_PER_UNIT;
+
+ /* Allocate a buffer for the temporary registers. */
+ regs = XALLOCAVEC (rtx, length / delta);
+
+ /* Load as many BITS-sized chunks as possible. Use a normal load if
+ the source has enough alignment, otherwise use left/right pairs. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ {
+ regs[i] = gen_reg_rtx (mode);
+ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
+ }
+
+ /* Copy the chunks to the destination. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
+
+ /* Mop up any left-over bytes. */
+ if (offset < length)
+ {
+ src = adjust_address (src, BLKmode, offset);
+ dest = adjust_address (dest, BLKmode, offset);
+ move_by_pieces (dest, src, length - offset,
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
+ }
+}
+
+/* Helper function for doing a loop-based block operation on memory
+ reference MEM. Each iteration of the loop will operate on LENGTH
+ bytes of MEM.
+
+ Create a new base register for use within the loop and point it to
+ the start of MEM. Create a new memory reference that uses this
+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
+
+static void
+riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
+ rtx *loop_reg, rtx *loop_mem)
+{
+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
+
+ /* Although the new mem does not refer to a known location,
+ it does keep up to LENGTH bytes of alignment. */
+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
+}
+
+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
+ the memory regions do not overlap. */
+
+static void
+riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+ HOST_WIDE_INT bytes_per_iter)
+{
+ rtx label, src_reg, dest_reg, final_src, test;
+ HOST_WIDE_INT leftover;
+
+ leftover = length % bytes_per_iter;
+ length -= leftover;
+
+ /* Create registers and memory references for use within the loop. */
+ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
+ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
+
+ /* Calculate the value that SRC_REG should have after the last iteration
+ of the loop. */
+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
+ 0, 0, OPTAB_WIDEN);
+
+ /* Emit the start of the loop. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ /* Emit the loop body. */
+ riscv_block_move_straight (dest, src, bytes_per_iter);
+
+ /* Move on to the next block. */
+ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
+ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
+
+ /* Emit the loop condition. */
+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
+ if (Pmode == DImode)
+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
+ else
+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
+
+ /* Mop up any left-over bytes. */
+ if (leftover)
+ riscv_block_move_straight (dest, src, leftover);
+ else
+ emit_insn(gen_nop ());
+}
+
+/* Expand a movmemsi instruction, which copies LENGTH bytes from
+ memory reference SRC to memory reference DEST. */
+
+bool
+riscv_expand_block_move (rtx dest, rtx src, rtx length)
+{
+ if (CONST_INT_P (length))
+ {
+ HOST_WIDE_INT factor, align;
+
+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
+ factor = BITS_PER_WORD / align;
+
+ if (optimize_function_for_size_p (cfun)
+ && INTVAL (length) * factor * UNITS_PER_WORD > MOVE_RATIO (false))
+ return false;
+
+ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
+ {
+ riscv_block_move_straight (dest, src, INTVAL (length));
+ return true;
+ }
+ else if (optimize && align >= BITS_PER_WORD)
+ {
+ unsigned min_iter_words
+ = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
+ unsigned iter_words = min_iter_words;
+ HOST_WIDE_INT bytes = INTVAL (length), words = bytes / UNITS_PER_WORD;
+
+ /* Lengthen the loop body if it shortens the tail. */
+ for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
+ {
+ unsigned cur_cost = iter_words + words % iter_words;
+ unsigned new_cost = i + words % i;
+ if (new_cost <= cur_cost)
+ iter_words = i;
+ }
+
+ riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
+ return true;
+ }
+ }
+ return false;
+}
+
/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
in context CONTEXT. HI_RELOC indicates a high-part reloc. */
@@ -2735,7 +2892,8 @@ riscv_memmodel_needs_release_fence (enum memmodel model)
'C' Print the integer branch condition for comparison OP.
'A' Print the atomic operation suffix for memory model OP.
'F' Print a FENCE if the memory model requires a release.
- 'z' Print x0 if OP is zero, otherwise print OP normally. */
+ 'z' Print x0 if OP is zero, otherwise print OP normally.
+ 'i' Print i if the operand is not a register. */
static void
riscv_print_operand (FILE *file, rtx op, int letter)
@@ -2770,6 +2928,11 @@ riscv_print_operand (FILE *file, rtx op, int letter)
fputs ("fence iorw,ow; ", file);
break;
+ case 'i':
+ if (code != REG)
+ fputs ("i", file);
+ break;
+
default:
switch (code)
{
@@ -3774,9 +3937,13 @@ riscv_option_override (void)
/* Use -mtune's setting for slow_unaligned_access, even when optimizing
for size. For architectures that trap and emulate unaligned accesses,
- the performance cost is too great, even for -Os. */
+ the performance cost is too great, even for -Os. Similarly, if
+ -m[no-]strict-align is left unspecified, heed -mtune's advice. */
riscv_slow_unaligned_access_p = (cpu->tune_info->slow_unaligned_access
|| TARGET_STRICT_ALIGN);
+ if ((target_flags_explicit & MASK_STRICT_ALIGN) == 0
+ && cpu->tune_info->slow_unaligned_access)
+ target_flags |= MASK_STRICT_ALIGN;
/* If the user hasn't specified a branch cost, use the processor's
default. */
diff --git a/gcc/config/riscv/riscv.h b/gcc/config/riscv/riscv.h
index e53555efe82..fe09e84e895 100644
--- a/gcc/config/riscv/riscv.h
+++ b/gcc/config/riscv/riscv.h
@@ -585,12 +585,15 @@ typedef struct {
/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
'the start of the function that this code is output in'. */
-#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
- asm_fprintf ((FILE), "%U%s", \
- XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
- else \
- asm_fprintf ((FILE), "%U%s", (NAME))
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ do { \
+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
+ asm_fprintf ((FILE), "%U%s", \
+ XSTR (XEXP (DECL_RTL (current_function_decl), \
+ 0), 0)); \
+ else \
+ asm_fprintf ((FILE), "%U%s", (NAME)); \
+ } while (0)
#define JUMP_TABLES_IN_TEXT_SECTION 0
#define CASE_VECTOR_MODE SImode
@@ -615,7 +618,12 @@ typedef struct {
#define MOVE_MAX UNITS_PER_WORD
#define MAX_MOVE_MAX 8
-#define SLOW_BYTE_ACCESS 0
+/* The SPARC port says:
+ Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
#define SHIFT_COUNT_TRUNCATED 1
@@ -803,10 +811,25 @@ while (0)
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
+/* The maximum number of bytes copied by one iteration of a movmemsi loop. */
+
+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
+
+/* The maximum number of bytes that can be copied by a straight-line
+ movmemsi implementation. */
+
+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 3)
+
/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction pairs, we will do a movmem or libcall instead. */
+ move-instruction pairs, we will do a movmem or libcall instead.
+ Do not use move_by_pieces at all when strict alignment is not
+ in effect but the target has slow unaligned accesses; in this
+ case, movmem or libcall is more efficient. */
-#define MOVE_RATIO(speed) (CLEAR_RATIO (speed) / 2)
+#define MOVE_RATIO(speed) \
+ (!STRICT_ALIGNMENT && riscv_slow_unaligned_access_p ? 1 : \
+ (speed) ? RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD : \
+ CLEAR_RATIO (speed) / 2)
/* For CLEAR_RATIO, when optimizing for size, give a better estimate
of the length of a memset call, but use the default otherwise. */
@@ -821,6 +844,8 @@ while (0)
#ifndef USED_FOR_TARGET
extern const enum reg_class riscv_regno_to_class[];
+extern bool riscv_slow_unaligned_access_p;
+extern unsigned riscv_stack_boundary;
#endif
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 9f056bbcda4..814ff6ec6ad 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -414,7 +414,7 @@
(plus:SI (match_operand:SI 1 "register_operand" " r,r")
(match_operand:SI 2 "arith_operand" " r,I")))]
""
- { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
+ { return TARGET_64BIT ? "add%i2w\t%0,%1,%2" : "add%i2\t%0,%1,%2"; }
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
@@ -423,7 +423,7 @@
(plus:DI (match_operand:DI 1 "register_operand" " r,r")
(match_operand:DI 2 "arith_operand" " r,I")))]
"TARGET_64BIT"
- "add\t%0,%1,%2"
+ "add%i2\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "DI")])
@@ -433,7 +433,7 @@
(plus:SI (match_operand:SI 1 "register_operand" " r,r")
(match_operand:SI 2 "arith_operand" " r,I"))))]
"TARGET_64BIT"
- "addw\t%0,%1,%2"
+ "add%i2w\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
@@ -444,7 +444,7 @@
(match_operand:DI 2 "arith_operand" " r,I"))
0)))]
"TARGET_64BIT"
- "addw\t%0,%1,%2"
+ "add%i2w\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
@@ -705,7 +705,7 @@
(any_div:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r")))]
"TARGET_DIV"
- { return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2"; }
+ { return TARGET_64BIT ? "<insn>%i2w\t%0,%1,%2" : "<insn>%i2\t%0,%1,%2"; }
[(set_attr "type" "idiv")
(set_attr "mode" "SI")])
@@ -714,7 +714,7 @@
(any_div:DI (match_operand:DI 1 "register_operand" " r")
(match_operand:DI 2 "register_operand" " r")))]
"TARGET_DIV && TARGET_64BIT"
- "<insn>\t%0,%1,%2"
+ "<insn>%i2\t%0,%1,%2"
[(set_attr "type" "idiv")
(set_attr "mode" "DI")])
@@ -724,7 +724,7 @@
(any_div:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r"))))]
"TARGET_DIV && TARGET_64BIT"
- "<insn>w\t%0,%1,%2"
+ "<insn>%i2w\t%0,%1,%2"
[(set_attr "type" "idiv")
(set_attr "mode" "DI")])
@@ -928,7 +928,7 @@
(any_bitwise:X (match_operand:X 1 "register_operand" "%r,r")
(match_operand:X 2 "arith_operand" " r,I")))]
""
- "<insn>\t%0,%1,%2"
+ "<insn>%i2\t%0,%1,%2"
[(set_attr "type" "logical")
(set_attr "mode" "<MODE>")])
@@ -937,7 +937,7 @@
(any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "arith_operand" " r,I")))]
"TARGET_64BIT"
- "<insn>\t%0,%1,%2"
+ "<insn>%i2\t%0,%1,%2"
[(set_attr "type" "logical")
(set_attr "mode" "SI")])
@@ -1025,7 +1025,7 @@
(match_operand:QI 1 "nonimmediate_operand" " r,m")))]
""
"@
- and\t%0,%1,0xff
+ andi\t%0,%1,0xff
lbu\t%0,%1"
[(set_attr "move_type" "andi,load")
(set_attr "mode" "<SUPERQI:MODE>")])
@@ -1318,7 +1318,7 @@
(plus:HI (match_operand:HISI 1 "register_operand" " r,r")
(match_operand:HISI 2 "arith_operand" " r,I")))]
""
- { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
+ { return TARGET_64BIT ? "add%i2w\t%0,%1,%2" : "add%i2\t%0,%1,%2"; }
[(set_attr "type" "arith")
(set_attr "mode" "HI")])
@@ -1327,7 +1327,7 @@
(xor:HI (match_operand:HISI 1 "register_operand" " r,r")
(match_operand:HISI 2 "arith_operand" " r,I")))]
""
- "xor\t%0,%1,%2"
+ "xor%i2\t%0,%1,%2"
[(set_attr "type" "logical")
(set_attr "mode" "HI")])
@@ -1436,6 +1436,19 @@
DONE;
})
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand")
+ (match_operand:BLK 1 "general_operand"))
+ (use (match_operand:SI 2 ""))
+ (use (match_operand:SI 3 "const_int_operand"))])]
+ ""
+{
+ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
+ DONE;
+ else
+ FAIL;
+})
+
;; Expand in-line code to clear the instruction cache between operand[0] and
;; operand[1].
(define_expand "clear_cache"
@@ -1475,7 +1488,7 @@
operands[2] = GEN_INT (INTVAL (operands[2])
& (GET_MODE_BITSIZE (SImode) - 1));
- return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
+ return TARGET_64BIT ? "<insn>%i2w\t%0,%1,%2" : "<insn>%i2\t%0,%1,%2";
}
[(set_attr "type" "shift")
(set_attr "mode" "SI")])
@@ -1491,7 +1504,7 @@
operands[2] = GEN_INT (INTVAL (operands[2])
& (GET_MODE_BITSIZE (DImode) - 1));
- return "<insn>\t%0,%1,%2";
+ return "<insn>%i2\t%0,%1,%2";
}
[(set_attr "type" "shift")
(set_attr "mode" "DI")])
@@ -1506,7 +1519,7 @@
if (GET_CODE (operands[2]) == CONST_INT)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
- return "<insn>w\t%0,%1,%2";
+ return "<insn>%i2w\t%0,%1,%2";
}
[(set_attr "type" "shift")
(set_attr "mode" "SI")])
@@ -1725,7 +1738,7 @@
(any_ge:GPR (match_operand:X 1 "register_operand" " r")
(const_int 1)))]
""
- "slt<u>\t%0,zero,%1"
+ "slt%i2<u>\t%0,zero,%1"
[(set_attr "type" "slt")
(set_attr "mode" "<MODE>")])
@@ -1734,7 +1747,7 @@
(any_lt:GPR (match_operand:X 1 "register_operand" " r")
(match_operand:X 2 "arith_operand" " rI")))]
""
- "slt<u>\t%0,%1,%2"
+ "slt%i2<u>\t%0,%1,%2"
[(set_attr "type" "slt")
(set_attr "mode" "<MODE>")])
@@ -1745,7 +1758,7 @@
""
{
operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
- return "slt<u>\t%0,%1,%2";
+ return "slt%i2<u>\t%0,%1,%2";
}
[(set_attr "type" "slt")
(set_attr "mode" "<MODE>")])
diff --git a/gcc/config/rs6000/aix43.h b/gcc/config/rs6000/aix43.h
index d61956d3b28..bd8a2c08da2 100644
--- a/gcc/config/rs6000/aix43.h
+++ b/gcc/config/rs6000/aix43.h
@@ -39,7 +39,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/rs6000/aix51.h b/gcc/config/rs6000/aix51.h
index af7e38e1775..e9f88df23da 100644
--- a/gcc/config/rs6000/aix51.h
+++ b/gcc/config/rs6000/aix51.h
@@ -33,7 +33,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/rs6000/aix52.h b/gcc/config/rs6000/aix52.h
index 35d2286e5b3..eade64c5f64 100644
--- a/gcc/config/rs6000/aix52.h
+++ b/gcc/config/rs6000/aix52.h
@@ -39,7 +39,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/rs6000/aix53.h b/gcc/config/rs6000/aix53.h
index a1fbd834d47..b16488b3ba3 100644
--- a/gcc/config/rs6000/aix53.h
+++ b/gcc/config/rs6000/aix53.h
@@ -39,7 +39,7 @@ do { \
{ \
error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/rs6000/aix61.h b/gcc/config/rs6000/aix61.h
index 0b615057087..25fcb62ea95 100644
--- a/gcc/config/rs6000/aix61.h
+++ b/gcc/config/rs6000/aix61.h
@@ -56,7 +56,7 @@ do { \
{ \
rs6000_current_cmodel = CMODEL_LARGE; \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/rs6000/aix71.h b/gcc/config/rs6000/aix71.h
index 4b986d6a818..3b8de897ede 100644
--- a/gcc/config/rs6000/aix71.h
+++ b/gcc/config/rs6000/aix71.h
@@ -56,7 +56,7 @@ do { \
{ \
rs6000_current_cmodel = CMODEL_LARGE; \
} \
-} while (0);
+} while (0)
#undef ASM_SPEC
#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
diff --git a/gcc/config/rs6000/altivec.h b/gcc/config/rs6000/altivec.h
index 94a4db24a78..068dfef2e00 100644
--- a/gcc/config/rs6000/altivec.h
+++ b/gcc/config/rs6000/altivec.h
@@ -357,6 +357,7 @@
#define vec_xl __builtin_vec_vsx_ld
#define vec_xl_be __builtin_vec_xl_be
#define vec_xst __builtin_vec_vsx_st
+#define vec_xst_be __builtin_vec_xst_be
/* Note, xxsldi and xxpermdi were added as __builtin_vsx_<xxx> functions
instead of __builtin_vec_<xxx> */
@@ -415,10 +416,15 @@
#define vec_vsubuqm __builtin_vec_vsubuqm
#define vec_vupkhsw __builtin_vec_vupkhsw
#define vec_vupklsw __builtin_vec_vupklsw
+#define vec_revb __builtin_vec_revb
#endif
#ifdef __POWER9_VECTOR__
/* Vector additions added in ISA 3.0. */
+#define vec_first_match_index __builtin_vec_first_match_index
+#define vec_first_match_or_eos_index __builtin_vec_first_match_or_eos_index
+#define vec_first_mismatch_index __builtin_vec_first_mismatch_index
+#define vec_first_mismatch_or_eos_index __builtin_vec_first_mismatch_or_eos_index
#define vec_pack_to_short_fp32 __builtin_vec_convert_4f32_8i16
#define vec_parity_lsbb __builtin_vec_vparity_lsbb
#define vec_vctz __builtin_vec_vctz
@@ -478,8 +484,6 @@
#define vec_xlx __builtin_vec_vextulx
#define vec_xrx __builtin_vec_vexturx
-
-#define vec_revb __builtin_vec_revb
#endif
/* Predicates.
diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md
index d0fcd1c3d8a..7122f99bffd 100644
--- a/gcc/config/rs6000/altivec.md
+++ b/gcc/config/rs6000/altivec.md
@@ -2130,7 +2130,7 @@
})
;; Slightly prefer vperm, since the target does not overlap the source
-(define_insn "*altivec_vperm_<mode>_internal"
+(define_insn "altivec_vperm_<mode>_direct"
[(set (match_operand:VM 0 "register_operand" "=v,?wo")
(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
(match_operand:VM 2 "register_operand" "v,0")
@@ -4020,7 +4020,7 @@
"TARGET_P9_VECTOR")
;; Vector absolute difference unsigned
-(define_insn "*p9_vadu<mode>3"
+(define_insn "p9_vadu<mode>3"
[(set (match_operand:VI 0 "register_operand" "=v")
(unspec:VI [(match_operand:VI 1 "register_operand" "v")
(match_operand:VI 2 "register_operand" "v")]
@@ -4184,6 +4184,49 @@
"vbpermd %0,%1,%2"
[(set_attr "type" "vecsimple")])
+;; Support for SAD (sum of absolute differences).
+
+;; Due to saturating semantics, we can't combine the sum-across
+;; with the vector accumulate in vsum4ubs. A vadduwm is needed.
+(define_expand "usadv16qi"
+ [(use (match_operand:V4SI 0 "register_operand"))
+ (use (match_operand:V16QI 1 "register_operand"))
+ (use (match_operand:V16QI 2 "register_operand"))
+ (use (match_operand:V4SI 3 "register_operand"))]
+ "TARGET_P9_VECTOR"
+{
+ rtx absd = gen_reg_rtx (V16QImode);
+ rtx zero = gen_reg_rtx (V4SImode);
+ rtx psum = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
+ emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
+ emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
+ emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
+ DONE;
+})
+
+;; Since vsum4shs is saturating and further performs signed
+;; arithmetic, we can't combine the sum-across with the vector
+;; accumulate in vsum4shs. A vadduwm is needed.
+(define_expand "usadv8hi"
+ [(use (match_operand:V4SI 0 "register_operand"))
+ (use (match_operand:V8HI 1 "register_operand"))
+ (use (match_operand:V8HI 2 "register_operand"))
+ (use (match_operand:V4SI 3 "register_operand"))]
+ "TARGET_P9_VECTOR"
+{
+ rtx absd = gen_reg_rtx (V8HImode);
+ rtx zero = gen_reg_rtx (V4SImode);
+ rtx psum = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
+ emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
+ emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
+ emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
+ DONE;
+})
+
;; Decimal Integer operations
(define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
diff --git a/gcc/config/rs6000/power9.md b/gcc/config/rs6000/power9.md
index 217864faaed..82e4b1cf65c 100644
--- a/gcc/config/rs6000/power9.md
+++ b/gcc/config/rs6000/power9.md
@@ -434,7 +434,13 @@
(and (eq_attr "type" "vecdiv")
(eq_attr "size" "128")
(eq_attr "cpu" "power9"))
- "DU_super_power9,dfu_power9")
+ "DU_super_power9,dfu_power9*44")
+
+(define_insn_reservation "power9-qpmul" 24
+ (and (eq_attr "type" "qmul")
+ (eq_attr "size" "128")
+ (eq_attr "cpu" "power9"))
+ "DU_super_power9,dfu_power9*12")
(define_insn_reservation "power9-mffgpr" 2
(and (eq_attr "type" "mffgpr")
diff --git a/gcc/config/rs6000/rs6000-builtin.def b/gcc/config/rs6000/rs6000-builtin.def
index c8a425cba7e..cfb6e55edc0 100644
--- a/gcc/config/rs6000/rs6000-builtin.def
+++ b/gcc/config/rs6000/rs6000-builtin.def
@@ -660,48 +660,6 @@
| RS6000_BTC_BINARY), \
CODE_FOR_ ## ICODE) /* ICODE */
-/* IEEE 128-bit floating-point builtins. */
-#define BU_FLOAT128_2(ENUM, NAME, ATTR, ICODE) \
- RS6000_BUILTIN_2 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
- "__builtin_" NAME, /* NAME */ \
- RS6000_BTM_FLOAT128, /* MASK */ \
- (RS6000_BTC_ ## ATTR /* ATTR */ \
- | RS6000_BTC_BINARY), \
- CODE_FOR_ ## ICODE) /* ICODE */
-
-#define BU_FLOAT128_1(ENUM, NAME, ATTR, ICODE) \
- RS6000_BUILTIN_1 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
- "__builtin_" NAME, /* NAME */ \
- RS6000_BTM_FLOAT128, /* MASK */ \
- (RS6000_BTC_ ## ATTR /* ATTR */ \
- | RS6000_BTC_UNARY), \
- CODE_FOR_ ## ICODE) /* ICODE */
-
-/* IEEE 128-bit floating-point builtins that need the ISA 3.0 hardware. */
-#define BU_FLOAT128_1_HW(ENUM, NAME, ATTR, ICODE) \
- RS6000_BUILTIN_1 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
- "__builtin_" NAME, /* NAME */ \
- RS6000_BTM_FLOAT128_HW, /* MASK */ \
- (RS6000_BTC_ ## ATTR /* ATTR */ \
- | RS6000_BTC_UNARY), \
- CODE_FOR_ ## ICODE) /* ICODE */
-
-#define BU_FLOAT128_2_HW(ENUM, NAME, ATTR, ICODE) \
- RS6000_BUILTIN_2 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
- "__builtin_" NAME, /* NAME */ \
- RS6000_BTM_FLOAT128_HW, /* MASK */ \
- (RS6000_BTC_ ## ATTR /* ATTR */ \
- | RS6000_BTC_BINARY), \
- CODE_FOR_ ## ICODE) /* ICODE */
-
-#define BU_FLOAT128_3_HW(ENUM, NAME, ATTR, ICODE) \
- RS6000_BUILTIN_3 (MISC_BUILTIN_ ## ENUM, /* ENUM */ \
- "__builtin_" NAME, /* NAME */ \
- RS6000_BTM_FLOAT128_HW, /* MASK */ \
- (RS6000_BTC_ ## ATTR /* ATTR */ \
- | RS6000_BTC_TERNARY), \
- CODE_FOR_ ## ICODE) /* ICODE */
-
/* Miscellaneous builtins for instructions added in ISA 3.0. These
instructions don't require either the DFP or VSX options, just the basic
ISA 3.0 enablement since they operate on general purpose registers. */
@@ -951,6 +909,51 @@
| RS6000_BTC_BINARY), \
CODE_FOR_nothing) /* ICODE */
+/* Built-in functions for IEEE 128-bit hardware floating point. IEEE 128-bit
+ hardware requires p9-vector and 64-bit operation. These functions use just
+ __builtin_ as the prefix. */
+#define BU_FLOAT128_HW_1(ENUM, NAME, ATTR, ICODE) \
+ RS6000_BUILTIN_1 (FLOAT128_BUILTIN_ ## ENUM, /* ENUM */ \
+ "__builtin_" NAME, /* NAME */ \
+ RS6000_BTM_FLOAT128_HW, /* MASK */ \
+ (RS6000_BTC_ ## ATTR /* ATTR */ \
+ | RS6000_BTC_UNARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
+#define BU_FLOAT128_HW_2(ENUM, NAME, ATTR, ICODE) \
+ RS6000_BUILTIN_2 (FLOAT128_BUILTIN_ ## ENUM, /* ENUM */ \
+ "__builtin_" NAME, /* NAME */ \
+ RS6000_BTM_FLOAT128_HW, /* MASK */ \
+ (RS6000_BTC_ ## ATTR /* ATTR */ \
+ | RS6000_BTC_BINARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
+#define BU_FLOAT128_HW_3(ENUM, NAME, ATTR, ICODE) \
+ RS6000_BUILTIN_3 (FLOAT128_BUILTIN_ ## ENUM, /* ENUM */ \
+ "__builtin_" NAME, /* NAME */ \
+ RS6000_BTM_FLOAT128_HW, /* MASK */ \
+ (RS6000_BTC_ ## ATTR /* ATTR */ \
+ | RS6000_BTC_TERNARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
+/* Built-in functions for IEEE 128-bit hardware floating point. These
+ functions use __builtin_vsx_ as the prefix. */
+#define BU_FLOAT128_HW_VSX_1(ENUM, NAME, ATTR, ICODE) \
+ RS6000_BUILTIN_1 (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
+ "__builtin_vsx_" NAME, /* NAME */ \
+ RS6000_BTM_FLOAT128_HW, /* MASK */ \
+ (RS6000_BTC_ ## ATTR /* ATTR */ \
+ | RS6000_BTC_UNARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
+#define BU_FLOAT128_HW_VSX_2(ENUM, NAME, ATTR, ICODE) \
+ RS6000_BUILTIN_2 (P9V_BUILTIN_ ## ENUM, /* ENUM */ \
+ "__builtin_vsx_" NAME, /* NAME */ \
+ RS6000_BTM_FLOAT128_HW, /* MASK */ \
+ (RS6000_BTC_ ## ATTR /* ATTR */ \
+ | RS6000_BTC_BINARY), \
+ CODE_FOR_ ## ICODE) /* ICODE */
+
#endif
@@ -1121,6 +1124,7 @@ BU_ALTIVEC_2 (VSUM4SBS, "vsum4sbs", CONST, altivec_vsum4sbs)
BU_ALTIVEC_2 (VSUM4SHS, "vsum4shs", CONST, altivec_vsum4shs)
BU_ALTIVEC_2 (VSUM2SWS, "vsum2sws", CONST, altivec_vsum2sws)
BU_ALTIVEC_2 (VSUMSWS, "vsumsws", CONST, altivec_vsumsws)
+BU_ALTIVEC_2 (VSUMSWS_BE, "vsumsws_be", CONST, altivec_vsumsws_direct)
BU_ALTIVEC_2 (VXOR, "vxor", CONST, xorv4si3)
BU_ALTIVEC_2 (COPYSIGN_V4SF, "copysignfp", CONST, vector_copysignv4sf3)
@@ -1770,14 +1774,6 @@ BU_VSX_X (LXVW4X_V4SF, "lxvw4x_v4sf", MEM)
BU_VSX_X (LXVW4X_V4SI, "lxvw4x_v4si", MEM)
BU_VSX_X (LXVW4X_V8HI, "lxvw4x_v8hi", MEM)
BU_VSX_X (LXVW4X_V16QI, "lxvw4x_v16qi", MEM)
-
-BU_VSX_X (XL_BE_V16QI, "xl_be_v16qi", MEM)
-BU_VSX_X (XL_BE_V8HI, "xl_be_v8hi", MEM)
-BU_VSX_X (XL_BE_V4SI, "xl_be_v4si", MEM)
-BU_VSX_X (XL_BE_V2DI, "xl_be_v2di", MEM)
-BU_VSX_X (XL_BE_V4SF, "xl_be_v4sf", MEM)
-BU_VSX_X (XL_BE_V2DF, "xl_be_v2df", MEM)
-
BU_VSX_X (STXSDX, "stxsdx", MEM)
BU_VSX_X (STXVD2X_V1TI, "stxvd2x_v1ti", MEM)
BU_VSX_X (STXVD2X_V2DF, "stxvd2x_v2df", MEM)
@@ -1880,6 +1876,7 @@ BU_VSX_OVERLOAD_X (ST, "st")
BU_VSX_OVERLOAD_X (XL, "xl")
BU_VSX_OVERLOAD_X (XL_BE, "xl_be")
BU_VSX_OVERLOAD_X (XST, "xst")
+BU_VSX_OVERLOAD_X (XST_BE, "xst_be")
/* 1 argument builtins pre ISA 2.04. */
BU_FP_MISC_1 (FCTID, "fctid", CONST, lrintdfdi2)
@@ -1892,6 +1889,13 @@ BU_P6_64BIT_2 (CMPB, "cmpb", CONST, cmpbdi3)
/* 1 argument VSX instructions added in ISA 2.07. */
BU_P8V_VSX_1 (XSCVSPDPN, "xscvspdpn", CONST, vsx_xscvspdpn)
BU_P8V_VSX_1 (XSCVDPSPN, "xscvdpspn", CONST, vsx_xscvdpspn)
+BU_P8V_VSX_1 (REVB_V1TI, "revb_v1ti", CONST, revb_v1ti)
+BU_P8V_VSX_1 (REVB_V2DI, "revb_v2di", CONST, revb_v2di)
+BU_P8V_VSX_1 (REVB_V4SI, "revb_v4si", CONST, revb_v4si)
+BU_P8V_VSX_1 (REVB_V8HI, "revb_v8hi", CONST, revb_v8hi)
+BU_P8V_VSX_1 (REVB_V16QI, "revb_v16qi", CONST, revb_v16qi)
+BU_P8V_VSX_1 (REVB_V2DF, "revb_v2df", CONST, revb_v2df)
+BU_P8V_VSX_1 (REVB_V4SF, "revb_v4sf", CONST, revb_v4sf)
/* 1 argument altivec instructions added in ISA 2.07. */
BU_P8V_AV_1 (ABS_V2DI, "abs_v2di", CONST, absv2di2)
@@ -2001,6 +2005,7 @@ BU_P8V_OVERLOAD_1 (VPOPCNTUH, "vpopcntuh")
BU_P8V_OVERLOAD_1 (VPOPCNTUW, "vpopcntuw")
BU_P8V_OVERLOAD_1 (VPOPCNTUD, "vpopcntud")
BU_P8V_OVERLOAD_1 (VGBBD, "vgbbd")
+BU_P8V_OVERLOAD_1 (REVB, "revb")
/* ISA 2.07 vector overloaded 2 argument functions. */
BU_P8V_OVERLOAD_2 (EQV, "eqv")
@@ -2040,6 +2045,31 @@ BU_P9V_AV_2 (VSLV, "vslv", CONST, vslv)
BU_P9V_AV_2 (VSRV, "vsrv", CONST, vsrv)
BU_P9V_AV_2 (CONVERT_4F32_8I16, "convert_4f32_8i16", CONST, convert_4f32_8i16)
+BU_P9V_AV_2 (VFIRSTMATCHINDEX_V16QI, "first_match_index_v16qi",
+ CONST, first_match_index_v16qi)
+BU_P9V_AV_2 (VFIRSTMATCHINDEX_V8HI, "first_match_index_v8hi",
+ CONST, first_match_index_v8hi)
+BU_P9V_AV_2 (VFIRSTMATCHINDEX_V4SI, "first_match_index_v4si",
+ CONST, first_match_index_v4si)
+BU_P9V_AV_2 (VFIRSTMATCHOREOSINDEX_V16QI, "first_match_or_eos_index_v16qi",
+ CONST, first_match_or_eos_index_v16qi)
+BU_P9V_AV_2 (VFIRSTMATCHOREOSINDEX_V8HI, "first_match_or_eos_index_v8hi",
+ CONST, first_match_or_eos_index_v8hi)
+BU_P9V_AV_2 (VFIRSTMATCHOREOSINDEX_V4SI, "first_match_or_eos_index_v4si",
+ CONST, first_match_or_eos_index_v4si)
+BU_P9V_AV_2 (VFIRSTMISMATCHINDEX_V16QI, "first_mismatch_index_v16qi",
+ CONST, first_mismatch_index_v16qi)
+BU_P9V_AV_2 (VFIRSTMISMATCHINDEX_V8HI, "first_mismatch_index_v8hi",
+ CONST, first_mismatch_index_v8hi)
+BU_P9V_AV_2 (VFIRSTMISMATCHINDEX_V4SI, "first_mismatch_index_v4si",
+ CONST, first_mismatch_index_v4si)
+BU_P9V_AV_2 (VFIRSTMISMATCHOREOSINDEX_V16QI, "first_mismatch_or_eos_index_v16qi",
+ CONST, first_mismatch_or_eos_index_v16qi)
+BU_P9V_AV_2 (VFIRSTMISMATCHOREOSINDEX_V8HI, "first_mismatch_or_eos_index_v8hi",
+ CONST, first_mismatch_or_eos_index_v8hi)
+BU_P9V_AV_2 (VFIRSTMISMATCHOREOSINDEX_V4SI, "first_mismatch_or_eos_index_v4si",
+ CONST, first_mismatch_or_eos_index_v4si)
+
/* ISA 3.0 vector overloaded 2-argument functions. */
BU_P9V_OVERLOAD_2 (VSLV, "vslv")
BU_P9V_OVERLOAD_2 (VSRV, "vsrv")
@@ -2072,10 +2102,10 @@ BU_P9V_OVERLOAD_3 (RLMI, "rlmi")
BU_P9V_64BIT_VSX_1 (VSEEDP, "scalar_extract_exp", CONST, xsxexpdp)
BU_P9V_64BIT_VSX_1 (VSESDP, "scalar_extract_sig", CONST, xsxsigdp)
-BU_P9V_64BIT_VSX_1 (VSEEQP, "scalar_extract_expq", CONST, xsxexpqp)
-BU_P9V_64BIT_VSX_1 (VSESQP, "scalar_extract_sigq", CONST, xsxsigqp)
+BU_FLOAT128_HW_VSX_1 (VSEEQP, "scalar_extract_expq", CONST, xsxexpqp_kf)
+BU_FLOAT128_HW_VSX_1 (VSESQP, "scalar_extract_sigq", CONST, xsxsigqp_kf)
-BU_P9V_VSX_1 (VSTDCNQP, "scalar_test_neg_qp", CONST, xststdcnegqp)
+BU_FLOAT128_HW_VSX_1 (VSTDCNQP, "scalar_test_neg_qp", CONST, xststdcnegqp_kf)
BU_P9V_VSX_1 (VSTDCNDP, "scalar_test_neg_dp", CONST, xststdcnegdp)
BU_P9V_VSX_1 (VSTDCNSP, "scalar_test_neg_sp", CONST, xststdcnegsp)
@@ -2091,15 +2121,15 @@ BU_P9V_VSX_1 (XXBRH_V8HI, "xxbrh_v8hi", CONST, p9_xxbrh_v8hi)
BU_P9V_64BIT_VSX_2 (VSIEDP, "scalar_insert_exp", CONST, xsiexpdp)
BU_P9V_64BIT_VSX_2 (VSIEDPF, "scalar_insert_exp_dp", CONST, xsiexpdpf)
-BU_P9V_64BIT_VSX_2 (VSIEQP, "scalar_insert_exp_q", CONST, xsiexpqp)
-BU_P9V_64BIT_VSX_2 (VSIEQPF, "scalar_insert_exp_qp", CONST, xsiexpqpf)
+BU_FLOAT128_HW_VSX_2 (VSIEQP, "scalar_insert_exp_q", CONST, xsiexpqp_kf)
+BU_FLOAT128_HW_VSX_2 (VSIEQPF, "scalar_insert_exp_qp", CONST, xsiexpqpf_kf)
BU_P9V_VSX_2 (VSCEDPGT, "scalar_cmp_exp_dp_gt", CONST, xscmpexpdp_gt)
BU_P9V_VSX_2 (VSCEDPLT, "scalar_cmp_exp_dp_lt", CONST, xscmpexpdp_lt)
BU_P9V_VSX_2 (VSCEDPEQ, "scalar_cmp_exp_dp_eq", CONST, xscmpexpdp_eq)
BU_P9V_VSX_2 (VSCEDPUO, "scalar_cmp_exp_dp_unordered", CONST, xscmpexpdp_unordered)
-BU_P9V_VSX_2 (VSTDCQP, "scalar_test_data_class_qp", CONST, xststdcqp)
+BU_FLOAT128_HW_VSX_2 (VSTDCQP, "scalar_test_data_class_qp", CONST, xststdcqp_kf)
BU_P9V_VSX_2 (VSTDCDP, "scalar_test_data_class_dp", CONST, xststdcdp)
BU_P9V_VSX_2 (VSTDCSP, "scalar_test_data_class_sp", CONST, xststdcsp)
@@ -2112,12 +2142,15 @@ BU_P9V_OVERLOAD_1 (VSTDCNQP, "scalar_test_neg_qp")
BU_P9V_OVERLOAD_1 (VSTDCNDP, "scalar_test_neg_dp")
BU_P9V_OVERLOAD_1 (VSTDCNSP, "scalar_test_neg_sp")
-BU_P9V_OVERLOAD_1 (REVB, "revb")
-
BU_P9V_OVERLOAD_1 (VEXTRACT_FP_FROM_SHORTH, "vextract_fp_from_shorth")
BU_P9V_OVERLOAD_1 (VEXTRACT_FP_FROM_SHORTL, "vextract_fp_from_shortl")
/* ISA 3.0 vector scalar overloaded 2 argument functions. */
+BU_P9V_OVERLOAD_2 (VFIRSTMATCHINDEX, "first_match_index")
+BU_P9V_OVERLOAD_2 (VFIRSTMISMATCHINDEX, "first_mismatch_index")
+BU_P9V_OVERLOAD_2 (VFIRSTMATCHOREOSINDEX, "first_match_or_eos_index")
+BU_P9V_OVERLOAD_2 (VFIRSTMISMATCHOREOSINDEX, "first_mismatch_or_eos_index")
+
BU_P9V_OVERLOAD_2 (VSIEDP, "scalar_insert_exp")
BU_P9V_OVERLOAD_2 (VSTDC, "scalar_test_data_class")
@@ -2178,6 +2211,16 @@ BU_P9V_VSX_2 (VEXTRACT4B, "vextract4b", CONST, vextract4b)
BU_P9V_VSX_3 (VINSERT4B, "vinsert4b", CONST, vinsert4b)
BU_P9V_VSX_3 (VINSERT4B_DI, "vinsert4b_di", CONST, vinsert4b_di)
+/* Hardware IEEE 128-bit floating point round to odd instrucitons added in ISA
+ 3.0 (power9). */
+BU_FLOAT128_HW_1 (SQRTF128_ODD, "sqrtf128_round_to_odd", FP, sqrtkf2_odd)
+BU_FLOAT128_HW_1 (TRUNCF128_ODD, "truncf128_round_to_odd", FP, trunckfdf2_odd)
+BU_FLOAT128_HW_2 (ADDF128_ODD, "addf128_round_to_odd", FP, addkf3_odd)
+BU_FLOAT128_HW_2 (SUBF128_ODD, "subf128_round_to_odd", FP, subkf3_odd)
+BU_FLOAT128_HW_2 (MULF128_ODD, "mulf128_round_to_odd", FP, mulkf3_odd)
+BU_FLOAT128_HW_2 (DIVF128_ODD, "divf128_round_to_odd", FP, divkf3_odd)
+BU_FLOAT128_HW_3 (FMAF128_ODD, "fmaf128_round_to_odd", FP, fmakf4_odd)
+
/* 3 argument vector functions returning void, treated as SPECIAL,
added in ISA 3.0 (power9). */
BU_P9V_64BIT_AV_X (STXVL, "stxvl", MISC)
@@ -2185,7 +2228,9 @@ BU_P9V_64BIT_AV_X (XST_LEN_R, "xst_len_r", MISC)
/* 1 argument vector functions added in ISA 3.0 (power9). */
BU_P9V_AV_1 (VCLZLSBB, "vclzlsbb", CONST, vclzlsbb)
-BU_P9V_AV_1 (VCTZLSBB, "vctzlsbb", CONST, vctzlsbb)
+BU_P9V_AV_1 (VCTZLSBB_V16QI, "vctzlsbb_v16qi", CONST, vctzlsbb_v16qi)
+BU_P9V_AV_1 (VCTZLSBB_V8HI, "vctzlsbb_v8hi", CONST, vctzlsbb_v8hi)
+BU_P9V_AV_1 (VCTZLSBB_V4SI, "vctzlsbb_v4si", CONST, vctzlsbb_v4si)
/* Built-in support for Power9 "VSU option" string operations includes
new awareness of the "vector compare not equal" (vcmpneb, vcmpneb.,
@@ -2365,23 +2410,6 @@ BU_P9_64BIT_2 (CMPEQB, "byte_in_set", CONST, cmpeqb)
BU_P9_OVERLOAD_2 (CMPRB, "byte_in_range")
BU_P9_OVERLOAD_2 (CMPRB2, "byte_in_either_range")
BU_P9_OVERLOAD_2 (CMPEQB, "byte_in_set")
-
-/* 1 and 2 argument IEEE 128-bit floating-point functions. These functions use
- the older 'q' suffix from libquadmath. The standard built-in functions
- support fabsf128 and copysignf128, but older code used these 'q' versions,
- so keep them around. */
-BU_FLOAT128_1 (FABSQ, "fabsq", CONST, abskf2)
-BU_FLOAT128_2 (COPYSIGNQ, "copysignq", CONST, copysignkf3)
-
-/* 1, 2, and 3 argument IEEE 128-bit floating point functions that require ISA
- 3.0 hardware. These functions use the new 'f128' suffix. */
-BU_FLOAT128_1_HW (SQRTF128_ODD, "sqrtf128_round_to_odd", CONST, sqrtkf2_odd)
-BU_FLOAT128_1_HW (TRUNCF128_ODD, "truncf128_round_to_odd", CONST, trunckfdf2_odd)
-BU_FLOAT128_2_HW (ADDF128_ODD, "addf128_round_to_odd", CONST, addkf3_odd)
-BU_FLOAT128_2_HW (SUBF128_ODD, "subf128_round_to_odd", CONST, subkf3_odd)
-BU_FLOAT128_2_HW (MULF128_ODD, "mulf128_round_to_odd", CONST, mulkf3_odd)
-BU_FLOAT128_2_HW (DIVF128_ODD, "divf128_round_to_odd", CONST, divkf3_odd)
-BU_FLOAT128_3_HW (FMAF128_ODD, "fmaf128_round_to_odd", CONST, fmakf4_odd)
/* 1 argument crypto functions. */
BU_CRYPTO_1 (VSBOX, "vsbox", CONST, crypto_vsbox)
@@ -2517,18 +2545,6 @@ BU_SPECIAL_X (RS6000_BUILTIN_CPU_IS, "__builtin_cpu_is",
BU_SPECIAL_X (RS6000_BUILTIN_CPU_SUPPORTS, "__builtin_cpu_supports",
RS6000_BTM_ALWAYS, RS6000_BTC_MISC)
-BU_SPECIAL_X (RS6000_BUILTIN_NANQ, "__builtin_nanq",
- RS6000_BTM_FLOAT128, RS6000_BTC_CONST)
-
-BU_SPECIAL_X (RS6000_BUILTIN_NANSQ, "__builtin_nansq",
- RS6000_BTM_FLOAT128, RS6000_BTC_CONST)
-
-BU_SPECIAL_X (RS6000_BUILTIN_INFQ, "__builtin_infq",
- RS6000_BTM_FLOAT128, RS6000_BTC_CONST)
-
-BU_SPECIAL_X (RS6000_BUILTIN_HUGE_VALQ, "__builtin_huge_valq",
- RS6000_BTM_FLOAT128, RS6000_BTC_CONST)
-
/* Darwin CfString builtin. */
BU_SPECIAL_X (RS6000_BUILTIN_CFSTRING, "__builtin_cfstring", RS6000_BTM_ALWAYS,
RS6000_BTC_MISC)
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index 5c4b7664430..301ca172207 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -685,6 +685,17 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
}
+ /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */
+ if (TARGET_FLOAT128_TYPE)
+ {
+ builtin_define ("__builtin_fabsq=__builtin_fabsf128");
+ builtin_define ("__builtin_copysignq=__builtin_copysignf128");
+ builtin_define ("__builtin_nanq=__builtin_nanf128");
+ builtin_define ("__builtin_nansq=__builtin_nansf128");
+ builtin_define ("__builtin_infq=__builtin_inff128");
+ builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
+ }
+
/* Tell users they can use __builtin_bswap{16,64}. */
builtin_define ("__HAVE_BSWAP__");
@@ -2402,6 +2413,62 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
{ P9V_BUILTIN_VEC_CONVERT_4F32_8I16, P9V_BUILTIN_CONVERT_4F32_8I16,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX,
+ P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V16QI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX,
+ P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V16QI, RS6000_BTI_UINTSI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX,
+ P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX,
+ P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V8HI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX,
+ P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX,
+ P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V4SI,
+ RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+
{ ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
@@ -3046,69 +3113,94 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS,
RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DF,
+
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DF,
RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DF,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DF,
RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI,
RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_V2DI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI,
RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_long_long, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SF,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SF,
RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SF,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SF,
RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI,
RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI,
RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI,
RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI,
RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI,
RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI,
RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_V16QI, 0 },
- { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V16QI,
- RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V16QI,
- RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V8HI,
- RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V8HI,
- RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V4SI,
- RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V4SI,
- RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V2DI,
+
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DF,
+ RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DF,
+ RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V2DI,
- RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long_long, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V4SF,
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
+ ~RS6000_BTI_unsigned_V2DI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
+ ~RS6000_BTI_unsigned_long_long, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SF,
RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
- { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V2DF,
- RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
+ ~RS6000_BTI_unsigned_V16QI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
@@ -3884,53 +3976,111 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
{ ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DF,
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DF,
+ RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI,
+ RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
+ ~RS6000_BTI_unsigned_V2DI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI,
+ RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI,
+ ~RS6000_BTI_bool_V2DI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SF,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SF,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DF,
RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DF,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DF,
RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI,
RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI,
RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI,
~RS6000_BTI_long_long },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI,
RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_V2DI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V2DI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI,
RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_long_long },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SF,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SF,
RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SF,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SF,
RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI,
RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI,
RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI,
RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_V4SI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V4SI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI,
RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI,
~RS6000_BTI_UINTSI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI,
RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI,
RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI,
RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_V8HI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V8HI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI,
RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI,
~RS6000_BTI_UINTHI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI,
RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI,
RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI,
RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
~RS6000_BTI_unsigned_V16QI },
- { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_ST_ELEMREV_V16QI,
+ { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI,
RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI,
~RS6000_BTI_UINTQI },
{ VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI,
@@ -5180,10 +5330,14 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
{ P9V_BUILTIN_VEC_VCLZLSBB, P9V_BUILTIN_VCLZLSBB,
RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 },
- { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB,
+ { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V16QI,
RS6000_BTI_INTSI, RS6000_BTI_V16QI, 0, 0 },
- { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB,
+ { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V16QI,
RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 },
+ { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V8HI,
+ RS6000_BTI_INTSI, RS6000_BTI_V8HI, 0, 0 },
+ { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V4SI,
+ RS6000_BTI_INTSI, RS6000_BTI_V4SI, 0, 0 },
{ P9V_BUILTIN_VEC_VEXTRACT4B, P9V_BUILTIN_VEXTRACT4B,
RS6000_BTI_INTDI, RS6000_BTI_V16QI, RS6000_BTI_UINTSI, 0 },
@@ -5553,36 +5707,38 @@ const struct altivec_builtin_types altivec_overloaded_builtins[] = {
RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI,
RS6000_BTI_unsigned_V16QI, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRQ_V16QI,
- RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRQ_V16QI,
- RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRQ_V16QI,
- RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRQ_V1TI,
- RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRQ_V1TI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V1TI,
RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRD_V2DI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V1TI,
+ RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DI,
+ RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DI,
RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRD_V2DI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRD_V2DF,
- RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRW_V4SI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SI,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRW_V4SI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SI,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRW_V4SI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SI,
RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRW_V4SF,
- RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRH_V8HI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V8HI,
RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRH_V8HI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V8HI,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 },
- { P9V_BUILTIN_VEC_REVB, P9V_BUILTIN_XXBRH_V8HI,
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V8HI,
RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DF,
+ RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 },
+ { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
{ ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V2DI,
RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 },
@@ -5688,12 +5844,22 @@ rs6000_builtin_type (int id)
return id < 0 ? build_pointer_type (t) : t;
}
-/* Check whether the type of an argument, T, is compatible with a
- type ID stored into a struct altivec_builtin_types. Integer
- types are considered compatible; otherwise, the language hook
- lang_hooks.types_compatible_p makes the decision. */
+/* Check whether the type of an argument, T, is compatible with a type ID
+ stored into a struct altivec_builtin_types. Integer types are considered
+ compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
+ the decision. Also allow long double and _Float128 to be compatible if
+ -mabi=ieeelongdouble. */
static inline bool
+is_float128_p (tree t)
+{
+ return (t == float128_type_node
+ || (TARGET_IEEEQUAD
+ && TARGET_LONG_DOUBLE_128
+ && t == long_double_type_node));
+}
+
+static inline bool
rs6000_builtin_type_compatible (tree t, int id)
{
tree builtin_type;
@@ -5702,6 +5868,9 @@ rs6000_builtin_type_compatible (tree t, int id)
return false;
if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
return true;
+ else if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
+ && is_float128_p (t) && is_float128_p (builtin_type))
+ return true;
else
return lang_hooks.types_compatible_p (t, builtin_type);
}
diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h
index 721b906ee65..07288000705 100644
--- a/gcc/config/rs6000/rs6000-protos.h
+++ b/gcc/config/rs6000/rs6000-protos.h
@@ -136,6 +136,8 @@ extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx);
extern void rs6000_emit_minmax (rtx, enum rtx_code, rtx, rtx);
extern void rs6000_split_signbit (rtx, rtx);
extern void rs6000_expand_atomic_compare_and_swap (rtx op[]);
+extern rtx swap_endian_selector_for_mode (machine_mode mode);
+
extern void rs6000_expand_atomic_exchange (rtx op[]);
extern void rs6000_expand_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
extern void rs6000_emit_swdiv (rtx, rtx, rtx, bool);
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 6402c0386a6..2d739fef57c 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -173,6 +173,7 @@ typedef struct GTY(()) machine_function
bool gpr_is_wrapped_separately[32];
bool fpr_is_wrapped_separately[32];
bool lr_is_wrapped_separately;
+ bool toc_is_wrapped_separately;
} machine_function;
/* Support targetm.vectorize.builtin_mask_for_load. */
@@ -4428,6 +4429,13 @@ rs6000_option_override_internal (bool global_init_p)
&& ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
+ /* If we can shrink-wrap the TOC register save separately, then use
+ -msave-toc-indirect unless explicitly disabled. */
+ if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
+ && flag_shrink_wrap_separate
+ && optimize_function_for_speed_p (cfun))
+ rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
+
/* Enable power8 fusion if we are tuning for power8, even if we aren't
generating power8 instructions. */
if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
@@ -4800,10 +4808,7 @@ rs6000_option_override_internal (bool global_init_p)
/* For the E500 family of cores, reset the single/double FP flags to let us
check that they remain constant across attributes or pragmas. Also,
clear a possible request for string instructions, not supported and which
- we might have silently queried above for -Os.
-
- For other families, clear ISEL in case it was set implicitly.
- */
+ we might have silently queried above for -Os. */
switch (rs6000_cpu)
{
@@ -4813,19 +4818,12 @@ rs6000_option_override_internal (bool global_init_p)
case PROCESSOR_PPCE500MC64:
case PROCESSOR_PPCE5500:
case PROCESSOR_PPCE6500:
-
rs6000_single_float = 0;
rs6000_double_float = 0;
-
rs6000_isa_flags &= ~OPTION_MASK_STRING;
-
break;
default:
-
- if (cpu_index >= 0 && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
- rs6000_isa_flags &= ~OPTION_MASK_ISEL;
-
break;
}
@@ -14088,7 +14086,8 @@ rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
return CONST0_RTX (tmode);
}
}
- else if (icode == CODE_FOR_xststdcqp
+ else if (icode == CODE_FOR_xststdcqp_kf
+ || icode == CODE_FOR_xststdcqp_tf
|| icode == CODE_FOR_xststdcdp
|| icode == CODE_FOR_xststdcsp
|| icode == CODE_FOR_xvtstdcdp
@@ -14305,6 +14304,44 @@ swap_selector_for_mode (machine_mode mode)
return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
}
+rtx
+swap_endian_selector_for_mode (machine_mode mode)
+{
+ unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
+ unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
+ unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
+
+ unsigned int *swaparray, i;
+ rtx perm[16];
+
+ switch (mode)
+ {
+ case E_V1TImode:
+ swaparray = swap1;
+ break;
+ case E_V2DFmode:
+ case E_V2DImode:
+ swaparray = swap2;
+ break;
+ case E_V4SFmode:
+ case E_V4SImode:
+ swaparray = swap4;
+ break;
+ case E_V8HImode:
+ swaparray = swap8;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ for (i = 0; i < 16; ++i)
+ perm[i] = GEN_INT (swaparray[i]);
+
+ return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
+ gen_rtvec_v (16, perm)));
+}
+
/* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
with -maltivec=be specified. Issue the load followed by an element-
reversing permute. */
@@ -14443,58 +14480,6 @@ altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
}
static rtx
-altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
-{
- rtx pat, addr;
- tree arg0 = CALL_EXPR_ARG (exp, 0);
- tree arg1 = CALL_EXPR_ARG (exp, 1);
- machine_mode tmode = insn_data[icode].operand[0].mode;
- machine_mode mode0 = Pmode;
- machine_mode mode1 = Pmode;
- rtx op0 = expand_normal (arg0);
- rtx op1 = expand_normal (arg1);
-
- if (icode == CODE_FOR_nothing)
- /* Builtin not supported on this processor. */
- return 0;
-
- /* If we got invalid arguments bail out before generating bad rtl. */
- if (arg0 == error_mark_node || arg1 == error_mark_node)
- return const0_rtx;
-
- if (target == 0
- || GET_MODE (target) != tmode
- || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
- target = gen_reg_rtx (tmode);
-
- op1 = copy_to_mode_reg (mode1, op1);
-
- if (op0 == const0_rtx)
- addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
- else
- {
- op0 = copy_to_mode_reg (mode0, op0);
- addr = gen_rtx_MEM (blk ? BLKmode : tmode,
- gen_rtx_PLUS (Pmode, op1, op0));
- }
-
- pat = GEN_FCN (icode) (target, addr);
- if (!pat)
- return 0;
-
- emit_insn (pat);
- /* Reverse element order of elements if in LE mode */
- if (!VECTOR_ELT_ORDER_BIG)
- {
- rtx sel = swap_selector_for_mode (tmode);
- rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
- UNSPEC_VPERM);
- emit_insn (gen_rtx_SET (target, vperm));
- }
- return target;
-}
-
-static rtx
paired_expand_stv_builtin (enum insn_code icode, tree exp)
{
tree arg0 = CALL_EXPR_ARG (exp, 0);
@@ -15890,50 +15875,6 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
/* Fall through. */
}
- /* XL_BE We initialized them to always load in big endian order. */
- switch (fcode)
- {
- case VSX_BUILTIN_XL_BE_V2DI:
- {
- enum insn_code code = CODE_FOR_vsx_load_v2di;
- return altivec_expand_xl_be_builtin (code, exp, target, false);
- }
- break;
- case VSX_BUILTIN_XL_BE_V4SI:
- {
- enum insn_code code = CODE_FOR_vsx_load_v4si;
- return altivec_expand_xl_be_builtin (code, exp, target, false);
- }
- break;
- case VSX_BUILTIN_XL_BE_V8HI:
- {
- enum insn_code code = CODE_FOR_vsx_load_v8hi;
- return altivec_expand_xl_be_builtin (code, exp, target, false);
- }
- break;
- case VSX_BUILTIN_XL_BE_V16QI:
- {
- enum insn_code code = CODE_FOR_vsx_load_v16qi;
- return altivec_expand_xl_be_builtin (code, exp, target, false);
- }
- break;
- case VSX_BUILTIN_XL_BE_V2DF:
- {
- enum insn_code code = CODE_FOR_vsx_load_v2df;
- return altivec_expand_xl_be_builtin (code, exp, target, false);
- }
- break;
- case VSX_BUILTIN_XL_BE_V4SF:
- {
- enum insn_code code = CODE_FOR_vsx_load_v4sf;
- return altivec_expand_xl_be_builtin (code, exp, target, false);
- }
- break;
- default:
- break;
- /* Fall through. */
- }
-
*expandedp = false;
return NULL_RTX;
}
@@ -16110,39 +16051,11 @@ rs6000_invalid_builtin (enum rs6000_builtins fncode)
from ia64.c. */
static tree
-rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
- tree *args, bool ignore ATTRIBUTE_UNUSED)
+rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
+ int n_args ATTRIBUTE_UNUSED,
+ tree *args ATTRIBUTE_UNUSED,
+ bool ignore ATTRIBUTE_UNUSED)
{
- if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
- {
- enum rs6000_builtins fn_code
- = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
- switch (fn_code)
- {
- case RS6000_BUILTIN_NANQ:
- case RS6000_BUILTIN_NANSQ:
- {
- tree type = TREE_TYPE (TREE_TYPE (fndecl));
- const char *str = c_getstr (*args);
- int quiet = fn_code == RS6000_BUILTIN_NANQ;
- REAL_VALUE_TYPE real;
-
- if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
- return build_real (type, real);
- return NULL_TREE;
- }
- case RS6000_BUILTIN_INFQ:
- case RS6000_BUILTIN_HUGE_VALQ:
- {
- tree type = TREE_TYPE (TREE_TYPE (fndecl));
- REAL_VALUE_TYPE inf;
- real_inf (&inf);
- return build_real (type, inf);
- }
- default:
- break;
- }
- }
#ifdef SUBTARGET_FOLD_BUILTIN
return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
#else
@@ -16169,6 +16082,36 @@ rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
}
}
+/* Helper function to handle the gimple folding of a vector compare
+ operation. This sets up true/false vectors, and uses the
+ VEC_COND_EXPR operation.
+ CODE indicates which comparison is to be made. (EQ, GT, ...).
+ TYPE indicates the type of the result. */
+static tree
+fold_build_vec_cmp (tree_code code, tree type,
+ tree arg0, tree arg1)
+{
+ tree cmp_type = build_same_sized_truth_vector_type (type);
+ tree zero_vec = build_zero_cst (type);
+ tree minus_one_vec = build_minus_one_cst (type);
+ tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
+ return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
+}
+
+/* Helper function to handle the in-between steps for the
+ vector compare built-ins. */
+static void
+fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
+{
+ tree arg0 = gimple_call_arg (stmt, 0);
+ tree arg1 = gimple_call_arg (stmt, 1);
+ tree lhs = gimple_call_lhs (stmt);
+ tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
+ gimple *g = gimple_build_assign (lhs, cmp);
+ gimple_set_location (g, gimple_location (stmt));
+ gsi_replace (gsi, g, true);
+}
+
/* Fold a machine-dependent built-in in GIMPLE. (For folding into
a constant, use rs6000_fold_builtin.) */
@@ -16664,6 +16607,53 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
return true;
}
+ /* Vector compares; EQ, NE, GE, GT, LE. */
+ case ALTIVEC_BUILTIN_VCMPEQUB:
+ case ALTIVEC_BUILTIN_VCMPEQUH:
+ case ALTIVEC_BUILTIN_VCMPEQUW:
+ case P8V_BUILTIN_VCMPEQUD:
+ fold_compare_helper (gsi, EQ_EXPR, stmt);
+ return true;
+
+ case P9V_BUILTIN_CMPNEB:
+ case P9V_BUILTIN_CMPNEH:
+ case P9V_BUILTIN_CMPNEW:
+ fold_compare_helper (gsi, NE_EXPR, stmt);
+ return true;
+
+ case VSX_BUILTIN_CMPGE_16QI:
+ case VSX_BUILTIN_CMPGE_U16QI:
+ case VSX_BUILTIN_CMPGE_8HI:
+ case VSX_BUILTIN_CMPGE_U8HI:
+ case VSX_BUILTIN_CMPGE_4SI:
+ case VSX_BUILTIN_CMPGE_U4SI:
+ case VSX_BUILTIN_CMPGE_2DI:
+ case VSX_BUILTIN_CMPGE_U2DI:
+ fold_compare_helper (gsi, GE_EXPR, stmt);
+ return true;
+
+ case ALTIVEC_BUILTIN_VCMPGTSB:
+ case ALTIVEC_BUILTIN_VCMPGTUB:
+ case ALTIVEC_BUILTIN_VCMPGTSH:
+ case ALTIVEC_BUILTIN_VCMPGTUH:
+ case ALTIVEC_BUILTIN_VCMPGTSW:
+ case ALTIVEC_BUILTIN_VCMPGTUW:
+ case P8V_BUILTIN_VCMPGTUD:
+ case P8V_BUILTIN_VCMPGTSD:
+ fold_compare_helper (gsi, GT_EXPR, stmt);
+ return true;
+
+ case VSX_BUILTIN_CMPLE_16QI:
+ case VSX_BUILTIN_CMPLE_U16QI:
+ case VSX_BUILTIN_CMPLE_8HI:
+ case VSX_BUILTIN_CMPLE_U8HI:
+ case VSX_BUILTIN_CMPLE_4SI:
+ case VSX_BUILTIN_CMPLE_U4SI:
+ case VSX_BUILTIN_CMPLE_2DI:
+ case VSX_BUILTIN_CMPLE_U2DI:
+ fold_compare_helper (gsi, LE_EXPR, stmt);
+ return true;
+
default:
if (TARGET_DEBUG_BUILTIN)
fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
@@ -16695,10 +16685,37 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
bool success;
HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
+ enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
+
+ /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
+ floating point type, depending on whether long double is the IBM extended
+ double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
+ we only define one variant of the built-in function, and switch the code
+ when defining it, rather than defining two built-ins and using the
+ overload table in rs6000-c.c to switch between the two. */
+ if (FLOAT128_IEEE_P (TFmode))
+ switch (icode)
+ {
+ default:
+ break;
+
+ case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
+ case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
+ case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
+ case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
+ case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
+ case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
+ case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
+ case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
+ case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
+ case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
+ case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
+ case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
+ case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
+ }
if (TARGET_DEBUG_BUILTIN)
{
- enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
const char *name1 = rs6000_builtin_info[uns_fcode].name;
const char *name2 = (icode != CODE_FOR_nothing)
? get_insn_name ((int) icode)
@@ -16777,10 +16794,10 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
case ALTIVEC_BUILTIN_MASK_FOR_STORE:
{
- int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
+ int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
: (int) CODE_FOR_altivec_lvsl_direct);
- machine_mode tmode = insn_data[icode].operand[0].mode;
- machine_mode mode = insn_data[icode].operand[1].mode;
+ machine_mode tmode = insn_data[icode2].operand[0].mode;
+ machine_mode mode = insn_data[icode2].operand[1].mode;
tree arg;
rtx op, addr, pat;
@@ -16802,10 +16819,10 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
if (target == 0
|| GET_MODE (target) != tmode
- || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
- pat = GEN_FCN (icode) (target, op);
+ pat = GEN_FCN (icode2) (target, op);
if (!pat)
return 0;
emit_insn (pat);
@@ -16863,25 +16880,25 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
d = bdesc_1arg;
for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
if (d->code == fcode)
- return rs6000_expand_unop_builtin (d->icode, exp, target);
+ return rs6000_expand_unop_builtin (icode, exp, target);
/* Handle simple binary operations. */
d = bdesc_2arg;
for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
if (d->code == fcode)
- return rs6000_expand_binop_builtin (d->icode, exp, target);
+ return rs6000_expand_binop_builtin (icode, exp, target);
/* Handle simple ternary operations. */
d = bdesc_3arg;
for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
if (d->code == fcode)
- return rs6000_expand_ternop_builtin (d->icode, exp, target);
+ return rs6000_expand_ternop_builtin (icode, exp, target);
/* Handle simple no-argument operations. */
d = bdesc_0arg;
for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
if (d->code == fcode)
- return rs6000_expand_zeroop_builtin (d->icode, target);
+ return rs6000_expand_zeroop_builtin (icode, target);
gcc_unreachable ();
}
@@ -17103,15 +17120,6 @@ rs6000_init_builtins (void)
if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
rs6000_common_init_builtins ();
- ftype = build_function_type_list (ieee128_float_type_node,
- const_str_type_node, NULL_TREE);
- def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
- def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
-
- ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
- def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
- def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
-
ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
@@ -17527,6 +17535,10 @@ altivec_init_builtins (void)
VSX_BUILTIN_LD_ELEMREV_V4SF);
def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
VSX_BUILTIN_LD_ELEMREV_V4SI);
+ def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
+ VSX_BUILTIN_LD_ELEMREV_V8HI);
+ def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
+ VSX_BUILTIN_LD_ELEMREV_V16QI);
def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
VSX_BUILTIN_ST_ELEMREV_V2DF);
def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
@@ -17535,42 +17547,10 @@ altivec_init_builtins (void)
VSX_BUILTIN_ST_ELEMREV_V4SF);
def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
VSX_BUILTIN_ST_ELEMREV_V4SI);
-
- def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
- VSX_BUILTIN_XL_BE_V8HI);
- def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
- VSX_BUILTIN_XL_BE_V4SI);
- def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
- VSX_BUILTIN_XL_BE_V2DI);
- def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
- VSX_BUILTIN_XL_BE_V4SF);
- def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
- VSX_BUILTIN_XL_BE_V2DF);
- def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
- VSX_BUILTIN_XL_BE_V16QI);
-
- if (TARGET_P9_VECTOR)
- {
- def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
- VSX_BUILTIN_LD_ELEMREV_V8HI);
- def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
- VSX_BUILTIN_LD_ELEMREV_V16QI);
- def_builtin ("__builtin_vsx_st_elemrev_v8hi",
- void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
- def_builtin ("__builtin_vsx_st_elemrev_v16qi",
- void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
- }
- else
- {
- rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
- = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
- rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
- = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
- rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
- = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
- rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
- = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
- }
+ def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
+ VSX_BUILTIN_ST_ELEMREV_V8HI);
+ def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
+ VSX_BUILTIN_ST_ELEMREV_V16QI);
def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
VSX_BUILTIN_VEC_LD);
@@ -17582,6 +17562,8 @@ altivec_init_builtins (void)
VSX_BUILTIN_VEC_XL_BE);
def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
VSX_BUILTIN_VEC_XST);
+ def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
+ VSX_BUILTIN_VEC_XST_BE);
def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
@@ -18065,7 +18047,7 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
are type correct. */
switch (builtin)
{
- /* unsigned 1 argument functions. */
+ /* unsigned 1 argument functions. */
case CRYPTO_BUILTIN_VSBOX:
case P8V_BUILTIN_VGBBD:
case MISC_BUILTIN_CDTBCD:
@@ -18074,7 +18056,7 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
h.uns_p[1] = 1;
break;
- /* unsigned 2 argument functions. */
+ /* unsigned 2 argument functions. */
case ALTIVEC_BUILTIN_VMULEUB:
case ALTIVEC_BUILTIN_VMULEUH:
case ALTIVEC_BUILTIN_VMULEUW:
@@ -18109,7 +18091,7 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
h.uns_p[2] = 1;
break;
- /* unsigned 3 argument functions. */
+ /* unsigned 3 argument functions. */
case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
@@ -18140,7 +18122,7 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
h.uns_p[3] = 1;
break;
- /* signed permute functions with unsigned char mask. */
+ /* signed permute functions with unsigned char mask. */
case ALTIVEC_BUILTIN_VPERM_16QI:
case ALTIVEC_BUILTIN_VPERM_8HI:
case ALTIVEC_BUILTIN_VPERM_4SI:
@@ -18156,14 +18138,14 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
h.uns_p[3] = 1;
break;
- /* unsigned args, signed return. */
+ /* unsigned args, signed return. */
case VSX_BUILTIN_XVCVUXDSP:
case VSX_BUILTIN_XVCVUXDDP_UNS:
case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
h.uns_p[1] = 1;
break;
- /* signed args, unsigned return. */
+ /* signed args, unsigned return. */
case VSX_BUILTIN_XVCVDPUXDS_UNS:
case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
case MISC_BUILTIN_UNPACK_TD:
@@ -18171,14 +18153,31 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
h.uns_p[0] = 1;
break;
- /* unsigned arguments for 128-bit pack instructions. */
+ /* unsigned arguments, bool return (compares). */
+ case ALTIVEC_BUILTIN_VCMPEQUB:
+ case ALTIVEC_BUILTIN_VCMPEQUH:
+ case ALTIVEC_BUILTIN_VCMPEQUW:
+ case P8V_BUILTIN_VCMPEQUD:
+ case VSX_BUILTIN_CMPGE_U16QI:
+ case VSX_BUILTIN_CMPGE_U8HI:
+ case VSX_BUILTIN_CMPGE_U4SI:
+ case VSX_BUILTIN_CMPGE_U2DI:
+ case ALTIVEC_BUILTIN_VCMPGTUB:
+ case ALTIVEC_BUILTIN_VCMPGTUH:
+ case ALTIVEC_BUILTIN_VCMPGTUW:
+ case P8V_BUILTIN_VCMPGTUD:
+ h.uns_p[1] = 1;
+ h.uns_p[2] = 1;
+ break;
+
+ /* unsigned arguments for 128-bit pack instructions. */
case MISC_BUILTIN_PACK_TD:
case MISC_BUILTIN_PACK_V1TI:
h.uns_p[1] = 1;
h.uns_p[2] = 1;
break;
- /* unsigned second arguments (vector shift right). */
+ /* unsigned second arguments (vector shift right). */
case ALTIVEC_BUILTIN_VSRB:
case ALTIVEC_BUILTIN_VSRH:
case ALTIVEC_BUILTIN_VSRW:
@@ -26638,6 +26637,7 @@ rs6000_get_separate_components (void)
&& !(info->savres_strategy & REST_MULTIPLE));
/* Component 0 is the save/restore of LR (done via GPR0).
+ Component 2 is the save of the TOC (GPR2).
Components 13..31 are the save/restore of GPR13..GPR31.
Components 46..63 are the save/restore of FPR14..FPR31. */
@@ -26712,6 +26712,10 @@ rs6000_get_separate_components (void)
bitmap_set_bit (components, 0);
}
+ /* Optimize saving the TOC. This is component 2. */
+ if (cfun->machine->save_toc_in_prologue)
+ bitmap_set_bit (components, 2);
+
return components;
}
@@ -26750,6 +26754,12 @@ rs6000_components_for_bb (basic_block bb)
|| bitmap_bit_p (kill, LR_REGNO))
bitmap_set_bit (components, 0);
+ /* The TOC save. */
+ if (bitmap_bit_p (in, TOC_REGNUM)
+ || bitmap_bit_p (gen, TOC_REGNUM)
+ || bitmap_bit_p (kill, TOC_REGNUM))
+ bitmap_set_bit (components, 2);
+
return components;
}
@@ -26804,6 +26814,14 @@ rs6000_emit_prologue_components (sbitmap components)
add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
}
+ /* Prologue for TOC. */
+ if (bitmap_bit_p (components, 2))
+ {
+ rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
+ rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
+ }
+
/* Prologue for the GPRs. */
int offset = info->gp_save_offset;
if (info->push_p)
@@ -26928,6 +26946,9 @@ rs6000_set_handled_components (sbitmap components)
if (bitmap_bit_p (components, 0))
cfun->machine->lr_is_wrapped_separately = true;
+
+ if (bitmap_bit_p (components, 2))
+ cfun->machine->toc_is_wrapped_separately = true;
}
/* VRSAVE is a bit vector representing which AltiVec registers
@@ -27885,7 +27906,8 @@ rs6000_emit_prologue (void)
unwinder to interpret it. R2 changes, apart from the
calls_eh_return case earlier in this function, are handled by
linux-unwind.h frob_update_context. */
- if (rs6000_save_toc_in_prologue_p ())
+ if (rs6000_save_toc_in_prologue_p ()
+ && !cfun->machine->toc_is_wrapped_separately)
{
rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
@@ -35040,6 +35062,8 @@ rs6000_insn_cost (rtx_insn *insn, bool speed)
case TYPE_SYNC:
case TYPE_LOAD_L:
+ case TYPE_MFCR:
+ case TYPE_MFCRF:
cost = COSTS_N_INSNS (n + 2);
break;
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index ed5ff397e07..276ad8a32e8 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -182,7 +182,7 @@
cmp,
branch,jmpreg,mfjmpr,mtjmpr,trap,isync,sync,load_l,store_c,
cr_logical,delayed_cr,mfcr,mfcrf,mtcr,
- fpcompare,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,
+ fpcompare,fp,fpsimple,dmul,qmul,sdiv,ddiv,ssqrt,dsqrt,
vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,
vecfloat,vecfdiv,vecdouble,mffgpr,mftgpr,crypto,
veclogical,veccmpfx,vecexts,vecmove,
@@ -323,6 +323,9 @@
; of whole values in GPRs.
(define_mode_iterator GPR [SI (DI "TARGET_POWERPC64")])
+; And again, for patterns that need two (potentially) different integer modes.
+(define_mode_iterator GPR2 [SI (DI "TARGET_POWERPC64")])
+
; Any supported integer mode.
(define_mode_iterator INT [QI HI SI DI TI PTI])
@@ -2429,13 +2432,15 @@
[(set_attr "type" "store")])
(define_insn_and_split "bswaphi2_reg"
- [(set (match_operand:HI 0 "gpc_reg_operand" "=&r")
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=&r,wo")
(bswap:HI
- (match_operand:HI 1 "gpc_reg_operand" "r")))
- (clobber (match_scratch:SI 2 "=&r"))]
+ (match_operand:HI 1 "gpc_reg_operand" "r,wo")))
+ (clobber (match_scratch:SI 2 "=&r,X"))]
""
- "#"
- "reload_completed"
+ "@
+ #
+ xxbrh %x0,%x1"
+ "reload_completed && int_reg_operand (operands[0], HImode)"
[(set (match_dup 3)
(and:SI (lshiftrt:SI (match_dup 4)
(const_int 8))
@@ -2451,18 +2456,20 @@
operands[3] = simplify_gen_subreg (SImode, operands[0], HImode, 0);
operands[4] = simplify_gen_subreg (SImode, operands[1], HImode, 0);
}
- [(set_attr "length" "12")
- (set_attr "type" "*")])
+ [(set_attr "length" "12,4")
+ (set_attr "type" "*,vecperm")])
;; We are always BITS_BIG_ENDIAN, so the bit positions below in
;; zero_extract insns do not change for -mlittle.
(define_insn_and_split "bswapsi2_reg"
- [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,wo")
(bswap:SI
- (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ (match_operand:SI 1 "gpc_reg_operand" "r,wo")))]
""
- "#"
- "reload_completed"
+ "@
+ #
+ xxbrw %x0,%x1"
+ "reload_completed && int_reg_operand (operands[0], SImode)"
[(set (match_dup 0) ; DABC
(rotate:SI (match_dup 1)
(const_int 24)))
@@ -2478,7 +2485,9 @@
(const_int 255))
(and:SI (match_dup 0)
(const_int -256))))]
- "")
+ ""
+ [(set_attr "length" "12,4")
+ (set_attr "type" "*,vecperm")])
;; On systems with LDBRX/STDBRX generate the loads/stores directly, just like
;; we do for L{H,W}BRX and ST{H,W}BRX above. If not, we have to generate more
@@ -2504,6 +2513,8 @@
emit_insn (gen_bswapdi2_load (dest, src));
else if (MEM_P (dest))
emit_insn (gen_bswapdi2_store (dest, src));
+ else if (TARGET_P9_VECTOR)
+ emit_insn (gen_bswapdi2_xxbrd (dest, src));
else
emit_insn (gen_bswapdi2_reg (dest, src));
DONE;
@@ -2534,12 +2545,19 @@
"stdbrx %1,%y0"
[(set_attr "type" "store")])
+(define_insn "bswapdi2_xxbrd"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=wo")
+ (bswap:DI (match_operand:DI 1 "gpc_reg_operand" "wo")))]
+ "TARGET_P9_VECTOR"
+ "xxbrd %x0,%x1"
+ [(set_attr "type" "vecperm")])
+
(define_insn "bswapdi2_reg"
[(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
(clobber (match_scratch:DI 2 "=&r"))
(clobber (match_scratch:DI 3 "=&r"))]
- "TARGET_POWERPC64 && TARGET_LDBRX"
+ "TARGET_POWERPC64 && TARGET_LDBRX && !TARGET_P9_VECTOR"
"#"
[(set_attr "length" "36")])
@@ -2688,7 +2706,7 @@
(bswap:DI (match_operand:DI 1 "gpc_reg_operand" "")))
(clobber (match_operand:DI 2 "gpc_reg_operand" ""))
(clobber (match_operand:DI 3 "gpc_reg_operand" ""))]
- "TARGET_POWERPC64 && reload_completed"
+ "TARGET_POWERPC64 && !TARGET_P9_VECTOR && reload_completed"
[(const_int 0)]
"
{
@@ -11780,13 +11798,9 @@
(clobber (match_operand:GPR 0 "gpc_reg_operand"))]
""
{
- /* Use ISEL if the user asked for it. */
- if (TARGET_ISEL)
- rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
-
/* Expanding EQ and NE directly to some machine instructions does not help
but does hurt combine. So don't. */
- else if (GET_CODE (operands[1]) == EQ)
+ if (GET_CODE (operands[1]) == EQ)
emit_insn (gen_eq<mode>3 (operands[0], operands[2], operands[3]));
else if (<MODE>mode == Pmode
&& GET_CODE (operands[1]) == NE)
@@ -11798,7 +11812,11 @@
emit_insn (gen_xor<mode>3 (operands[0], tmp, const1_rtx));
}
- /* Expanding the unsigned comparisons however helps a lot: all the neg_ltu
+ /* If ISEL is fast, expand to it. */
+ else if (TARGET_ISEL)
+ rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
+
+ /* Expanding the unsigned comparisons helps a lot: all the neg_ltu
etc. combinations magically work out just right. */
else if (<MODE>mode == Pmode
&& unsigned_comparison_operator (operands[1], VOIDmode))
@@ -12280,18 +12298,102 @@
"")
+(define_code_iterator cmp [eq ne lt ltu gt gtu le leu ge geu])
+(define_code_attr UNS [(eq "CC")
+ (ne "CC")
+ (lt "CC") (ltu "CCUNS")
+ (gt "CC") (gtu "CCUNS")
+ (le "CC") (leu "CCUNS")
+ (ge "CC") (geu "CCUNS")])
+(define_code_attr UNSu_ [(eq "")
+ (ne "")
+ (lt "") (ltu "u_")
+ (gt "") (gtu "u_")
+ (le "") (leu "u_")
+ (ge "") (geu "u_")])
+(define_code_attr UNSIK [(eq "I")
+ (ne "I")
+ (lt "I") (ltu "K")
+ (gt "I") (gtu "K")
+ (le "I") (leu "K")
+ (ge "I") (geu "K")])
+
+(define_insn_and_split "<code><GPR:mode><GPR2:mode>2_isel"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (cmp:GPR (match_operand:GPR2 1 "gpc_reg_operand" "r")
+ (match_operand:GPR2 2 "reg_or_<cmp:UNSu_>short_operand" "r<cmp:UNSIK>")))
+ (clobber (match_scratch:GPR 3 "=r"))
+ (clobber (match_scratch:GPR 4 "=r"))
+ (clobber (match_scratch:<UNS> 5 "=y"))]
+ "TARGET_ISEL
+ && !(<CODE> == EQ && operands[2] == const0_rtx)
+ && !(<CODE> == NE && operands[2] == const0_rtx
+ && <GPR:MODE>mode == Pmode && <GPR2:MODE>mode == Pmode)"
+ "#"
+ "&& 1"
+ [(pc)]
+{
+ if (<CODE> == NE || <CODE> == LE || <CODE> == GE
+ || <CODE> == LEU || <CODE> == GEU)
+ operands[3] = const0_rtx;
+ else
+ {
+ if (GET_CODE (operands[3]) == SCRATCH)
+ operands[3] = gen_reg_rtx (<GPR:MODE>mode);
+ emit_move_insn (operands[3], const0_rtx);
+ }
+
+ if (GET_CODE (operands[4]) == SCRATCH)
+ operands[4] = gen_reg_rtx (<GPR:MODE>mode);
+ emit_move_insn (operands[4], const1_rtx);
+
+ if (GET_CODE (operands[5]) == SCRATCH)
+ operands[5] = gen_reg_rtx (<UNS>mode);
+
+ rtx c1 = gen_rtx_COMPARE (<UNS>mode, operands[1], operands[2]);
+ emit_insn (gen_rtx_SET (operands[5], c1));
+
+ rtx c2 = gen_rtx_fmt_ee (<CODE>, <GPR:MODE>mode, operands[5], const0_rtx);
+ rtx x = gen_rtx_IF_THEN_ELSE (<GPR:MODE>mode, c2, operands[4], operands[3]);
+ emit_move_insn (operands[0], x);
+
+ DONE;
+}
+ [(set (attr "cost")
+ (if_then_else (match_test "<CODE> == NE || <CODE> == LE || <CODE> == GE
+ || <CODE> == LEU || <CODE> == GEU")
+ (const_string "9")
+ (const_string "10")))])
+
(define_mode_attr scc_eq_op2 [(SI "rKLI")
(DI "rKJI")])
-(define_insn_and_split "eq<mode>3"
+(define_expand "eq<mode>3"
+ [(parallel [
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))
+ (clobber (match_scratch:GPR 3 "=r"))
+ (clobber (match_scratch:GPR 4 "=r"))])]
+ ""
+{
+ if (TARGET_ISEL && operands[2] != const0_rtx)
+ {
+ emit_insn (gen_eq<mode><mode>2_isel (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+})
+
+(define_insn_and_split "*eq<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))
(clobber (match_scratch:GPR 3 "=r"))
(clobber (match_scratch:GPR 4 "=r"))]
- ""
+ "!(TARGET_ISEL && operands[2] != const0_rtx)"
"#"
- ""
+ "&& 1"
[(set (match_dup 4)
(clz:GPR (match_dup 3)))
(set (match_dup 0)
@@ -12311,16 +12413,34 @@
(const_string "8")
(const_string "12")))])
-(define_insn_and_split "ne<mode>3"
+(define_expand "ne<mode>3"
+ [(parallel [
+ (set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (ne:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>")))
+ (clobber (match_scratch:P 3 "=r"))
+ (clobber (match_scratch:P 4 "=r"))
+ (clobber (reg:P CA_REGNO))])]
+ ""
+{
+ if (TARGET_ISEL && operands[2] != const0_rtx)
+ {
+ emit_insn (gen_ne<mode><mode>2_isel (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+})
+
+(define_insn_and_split "*ne<mode>3"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(ne:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>")))
(clobber (match_scratch:P 3 "=r"))
(clobber (match_scratch:P 4 "=r"))
(clobber (reg:P CA_REGNO))]
- "!TARGET_ISEL"
+ "!(TARGET_ISEL && operands[2] != const0_rtx)"
"#"
- ""
+ "&& 1"
[(parallel [(set (match_dup 4)
(plus:P (match_dup 3)
(const_int -1)))
@@ -12573,9 +12693,9 @@
(clobber (match_scratch:SI 3 "=r"))
(clobber (match_scratch:SI 4 "=r"))
(clobber (match_scratch:EXTSI 5 "=r"))]
- ""
+ "!TARGET_ISEL"
"#"
- ""
+ "&& 1"
[(set (match_dup 4)
(clz:SI (match_dup 3)))
(set (match_dup 5)
@@ -14230,7 +14350,7 @@
(match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmulqp %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "div<mode>3"
@@ -14332,7 +14452,7 @@
(match_operand:IEEE128 3 "altivec_register_operand" "0")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmaddqp %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*fms<mode>4_hw"
@@ -14344,7 +14464,7 @@
(match_operand:IEEE128 3 "altivec_register_operand" "0"))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmsubqp %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfma<mode>4_hw"
@@ -14356,7 +14476,7 @@
(match_operand:IEEE128 3 "altivec_register_operand" "0"))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmaddqp %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfms<mode>4_hw"
@@ -14369,7 +14489,7 @@
(match_operand:IEEE128 3 "altivec_register_operand" "0")))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmsubqp %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "extend<SFDF:mode><IEEE128:mode>2_hw"
@@ -14644,7 +14764,7 @@
UNSPEC_MUL_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmulqpo %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "div<mode>3_odd"
@@ -14677,7 +14797,7 @@
UNSPEC_FMA_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmaddqpo %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*fms<mode>4_odd"
@@ -14690,7 +14810,7 @@
UNSPEC_FMA_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmsubqpo %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfma<mode>4_odd"
@@ -14703,7 +14823,7 @@
UNSPEC_FMA_ROUND_TO_ODD)))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmaddqpo %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfms<mode>4_odd"
@@ -14717,7 +14837,7 @@
UNSPEC_FMA_ROUND_TO_ODD)))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmsubqpo %0,%1,%2"
- [(set_attr "type" "vecfloat")
+ [(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "trunc<mode>df2_odd"
diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md
index 35be5dead64..00d76563f37 100644
--- a/gcc/config/rs6000/vsx.md
+++ b/gcc/config/rs6000/vsx.md
@@ -73,6 +73,13 @@
(TF "FLOAT128_VECTOR_P (TFmode)")
TI])
+(define_mode_attr VSX_XXBR [(V8HI "h")
+ (V4SI "w")
+ (V4SF "w")
+ (V2DF "d")
+ (V2DI "d")
+ (V1TI "q")])
+
;; Map into the appropriate load/store name based on the type
(define_mode_attr VSm [(V16QI "vw4")
(V8HI "vw4")
@@ -273,6 +280,9 @@
(define_mode_iterator VSINT_84 [V4SI V2DI DI SI])
(define_mode_iterator VSINT_842 [V8HI V4SI V2DI])
+;; Vector reverse byte modes
+(define_mode_iterator VEC_REVB [V8HI V4SI V2DI V4SF V2DF V1TI])
+
;; Iterator for ISA 3.0 vector extract/insert of small integer vectors.
;; VSX_EXTRACT_I2 doesn't include V4SImode because SI extracts can be
;; done on ISA 2.07 and not just ISA 3.0.
@@ -408,6 +418,10 @@
UNSPEC_VCMPNEZW
UNSPEC_XXEXTRACTUW
UNSPEC_XXINSERTW
+ UNSPEC_VSX_FIRST_MATCH_INDEX
+ UNSPEC_VSX_FIRST_MATCH_EOS_INDEX
+ UNSPEC_VSX_FIRST_MISMATCH_INDEX
+ UNSPEC_VSX_FIRST_MISMATCH_EOS_INDEX
])
;; VSX moves
@@ -1108,7 +1122,7 @@
"lxvw4x %x0,%y1"
[(set_attr "type" "vecload")])
-(define_insn "vsx_ld_elemrev_v8hi"
+(define_expand "vsx_ld_elemrev_v8hi"
[(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
(vec_select:V8HI
(match_operand:V8HI 1 "memory_operand" "Z")
@@ -1116,22 +1130,94 @@
(const_int 5) (const_int 4)
(const_int 3) (const_int 2)
(const_int 1) (const_int 0)])))]
+ "VECTOR_MEM_VSX_P (V8HImode) && !BYTES_BIG_ENDIAN"
+{
+ if (!TARGET_P9_VECTOR)
+ {
+ rtx tmp = gen_reg_rtx (V4SImode);
+ rtx subreg, subreg2, perm[16], pcv;
+ /* 2 is leftmost element in register */
+ unsigned int reorder[16] = {13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2};
+ int i;
+
+ subreg = simplify_gen_subreg (V4SImode, operands[1], V8HImode, 0);
+ emit_insn (gen_vsx_ld_elemrev_v4si (tmp, subreg));
+ subreg2 = simplify_gen_subreg (V8HImode, tmp, V4SImode, 0);
+
+ for (i = 0; i < 16; ++i)
+ perm[i] = GEN_INT (reorder[i]);
+
+ pcv = force_reg (V16QImode,
+ gen_rtx_CONST_VECTOR (V16QImode,
+ gen_rtvec_v (16, perm)));
+ emit_insn (gen_altivec_vperm_v8hi_direct (operands[0], subreg2,
+ subreg2, pcv));
+ DONE;
+ }
+})
+
+(define_insn "*vsx_ld_elemrev_v8hi_internal"
+ [(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
+ (vec_select:V8HI
+ (match_operand:V8HI 1 "memory_operand" "Z")
+ (parallel [(const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V8HImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
"lxvh8x %x0,%y1"
[(set_attr "type" "vecload")])
-(define_insn "vsx_ld_elemrev_v16qi"
+(define_expand "vsx_ld_elemrev_v16qi"
+ [(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
+ (vec_select:V16QI
+ (match_operand:V16QI 1 "memory_operand" "Z")
+ (parallel [(const_int 15) (const_int 14)
+ (const_int 13) (const_int 12)
+ (const_int 11) (const_int 10)
+ (const_int 9) (const_int 8)
+ (const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
+ "VECTOR_MEM_VSX_P (V16QImode) && !BYTES_BIG_ENDIAN"
+{
+ if (!TARGET_P9_VECTOR)
+ {
+ rtx tmp = gen_reg_rtx (V4SImode);
+ rtx subreg, subreg2, perm[16], pcv;
+ /* 3 is leftmost element in register */
+ unsigned int reorder[16] = {12,13,14,15,8,9,10,11,4,5,6,7,0,1,2,3};
+ int i;
+
+ subreg = simplify_gen_subreg (V4SImode, operands[1], V16QImode, 0);
+ emit_insn (gen_vsx_ld_elemrev_v4si (tmp, subreg));
+ subreg2 = simplify_gen_subreg (V16QImode, tmp, V4SImode, 0);
+
+ for (i = 0; i < 16; ++i)
+ perm[i] = GEN_INT (reorder[i]);
+
+ pcv = force_reg (V16QImode,
+ gen_rtx_CONST_VECTOR (V16QImode,
+ gen_rtvec_v (16, perm)));
+ emit_insn (gen_altivec_vperm_v16qi_direct (operands[0], subreg2,
+ subreg2, pcv));
+ DONE;
+ }
+})
+
+(define_insn "*vsx_ld_elemrev_v16qi_internal"
[(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
(vec_select:V16QI
- (match_operand:V16QI 1 "memory_operand" "Z")
- (parallel [(const_int 15) (const_int 14)
- (const_int 13) (const_int 12)
- (const_int 11) (const_int 10)
- (const_int 9) (const_int 8)
- (const_int 7) (const_int 6)
- (const_int 5) (const_int 4)
- (const_int 3) (const_int 2)
- (const_int 1) (const_int 0)])))]
+ (match_operand:V16QI 1 "memory_operand" "Z")
+ (parallel [(const_int 15) (const_int 14)
+ (const_int 13) (const_int 12)
+ (const_int 11) (const_int 10)
+ (const_int 9) (const_int 8)
+ (const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V16QImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
"lxvb16x %x0,%y1"
[(set_attr "type" "vecload")])
@@ -1139,8 +1225,8 @@
(define_insn "vsx_st_elemrev_v2df"
[(set (match_operand:V2DF 0 "memory_operand" "=Z")
(vec_select:V2DF
- (match_operand:V2DF 1 "vsx_register_operand" "wa")
- (parallel [(const_int 1) (const_int 0)])))]
+ (match_operand:V2DF 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V2DFmode) && !BYTES_BIG_ENDIAN"
"stxvd2x %x1,%y0"
[(set_attr "type" "vecstore")])
@@ -1148,8 +1234,8 @@
(define_insn "vsx_st_elemrev_v2di"
[(set (match_operand:V2DI 0 "memory_operand" "=Z")
(vec_select:V2DI
- (match_operand:V2DI 1 "vsx_register_operand" "wa")
- (parallel [(const_int 1) (const_int 0)])))]
+ (match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V2DImode) && !BYTES_BIG_ENDIAN"
"stxvd2x %x1,%y0"
[(set_attr "type" "vecstore")])
@@ -1157,9 +1243,9 @@
(define_insn "vsx_st_elemrev_v4sf"
[(set (match_operand:V4SF 0 "memory_operand" "=Z")
(vec_select:V4SF
- (match_operand:V4SF 1 "vsx_register_operand" "wa")
- (parallel [(const_int 3) (const_int 2)
- (const_int 1) (const_int 0)])))]
+ (match_operand:V4SF 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V4SFmode) && !BYTES_BIG_ENDIAN"
"stxvw4x %x1,%y0"
[(set_attr "type" "vecstore")])
@@ -1174,30 +1260,98 @@
"stxvw4x %x1,%y0"
[(set_attr "type" "vecstore")])
-(define_insn "vsx_st_elemrev_v8hi"
+(define_expand "vsx_st_elemrev_v8hi"
[(set (match_operand:V8HI 0 "memory_operand" "=Z")
(vec_select:V8HI
- (match_operand:V8HI 1 "vsx_register_operand" "wa")
- (parallel [(const_int 7) (const_int 6)
- (const_int 5) (const_int 4)
- (const_int 3) (const_int 2)
- (const_int 1) (const_int 0)])))]
+ (match_operand:V8HI 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
+ "VECTOR_MEM_VSX_P (V8HImode) && !BYTES_BIG_ENDIAN"
+{
+ if (!TARGET_P9_VECTOR)
+ {
+ rtx subreg, perm[16], pcv;
+ rtx tmp = gen_reg_rtx (V8HImode);
+ /* 2 is leftmost element in register */
+ unsigned int reorder[16] = {13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2};
+ int i;
+
+ for (i = 0; i < 16; ++i)
+ perm[i] = GEN_INT (reorder[i]);
+
+ pcv = force_reg (V16QImode,
+ gen_rtx_CONST_VECTOR (V16QImode,
+ gen_rtvec_v (16, perm)));
+ emit_insn (gen_altivec_vperm_v8hi_direct (tmp, operands[1],
+ operands[1], pcv));
+ subreg = simplify_gen_subreg (V4SImode, tmp, V8HImode, 0);
+ emit_insn (gen_vsx_st_elemrev_v4si (subreg, operands[0]));
+ DONE;
+ }
+})
+
+(define_insn "*vsx_st_elemrev_v8hi_internal"
+ [(set (match_operand:V8HI 0 "memory_operand" "=Z")
+ (vec_select:V8HI
+ (match_operand:V8HI 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V8HImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
"stxvh8x %x1,%y0"
[(set_attr "type" "vecstore")])
-(define_insn "vsx_st_elemrev_v16qi"
+(define_expand "vsx_st_elemrev_v16qi"
[(set (match_operand:V16QI 0 "memory_operand" "=Z")
(vec_select:V16QI
- (match_operand:V16QI 1 "vsx_register_operand" "wa")
- (parallel [(const_int 15) (const_int 14)
- (const_int 13) (const_int 12)
- (const_int 11) (const_int 10)
- (const_int 9) (const_int 8)
- (const_int 7) (const_int 6)
- (const_int 5) (const_int 4)
- (const_int 3) (const_int 2)
- (const_int 1) (const_int 0)])))]
+ (match_operand:V16QI 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 15) (const_int 14)
+ (const_int 13) (const_int 12)
+ (const_int 11) (const_int 10)
+ (const_int 9) (const_int 8)
+ (const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
+ "VECTOR_MEM_VSX_P (V16QImode) && !BYTES_BIG_ENDIAN"
+{
+ if (!TARGET_P9_VECTOR)
+ {
+ rtx subreg, perm[16], pcv;
+ rtx tmp = gen_reg_rtx (V16QImode);
+ /* 3 is leftmost element in register */
+ unsigned int reorder[16] = {12,13,14,15,8,9,10,11,4,5,6,7,0,1,2,3};
+ int i;
+
+ for (i = 0; i < 16; ++i)
+ perm[i] = GEN_INT (reorder[i]);
+
+ pcv = force_reg (V16QImode,
+ gen_rtx_CONST_VECTOR (V16QImode,
+ gen_rtvec_v (16, perm)));
+ emit_insn (gen_altivec_vperm_v16qi_direct (tmp, operands[1],
+ operands[1], pcv));
+ subreg = simplify_gen_subreg (V4SImode, tmp, V16QImode, 0);
+ emit_insn (gen_vsx_st_elemrev_v4si (subreg, operands[0]));
+ DONE;
+ }
+})
+
+(define_insn "*vsx_st_elemrev_v16qi_internal"
+ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
+ (vec_select:V16QI
+ (match_operand:V16QI 1 "vsx_register_operand" "wa")
+ (parallel [(const_int 15) (const_int 14)
+ (const_int 13) (const_int 12)
+ (const_int 11) (const_int 10)
+ (const_int 9) (const_int 8)
+ (const_int 7) (const_int 6)
+ (const_int 5) (const_int 4)
+ (const_int 3) (const_int 2)
+ (const_int 1) (const_int 0)])))]
"VECTOR_MEM_VSX_P (V16QImode) && !BYTES_BIG_ENDIAN && TARGET_P9_VECTOR"
"stxvb16x %x1,%y0"
[(set_attr "type" "vecstore")])
@@ -4054,9 +4208,9 @@
;; ISA 3.0 Binary Floating-Point Support
;; VSX Scalar Extract Exponent Quad-Precision
-(define_insn "xsxexpqp"
+(define_insn "xsxexpqp_<mode>"
[(set (match_operand:DI 0 "altivec_register_operand" "=v")
- (unspec:DI [(match_operand:KF 1 "altivec_register_operand" "v")]
+ (unspec:DI [(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_VSX_SXEXPDP))]
"TARGET_P9_VECTOR"
"xsxexpqp %0,%1"
@@ -4072,9 +4226,9 @@
[(set_attr "type" "integer")])
;; VSX Scalar Extract Significand Quad-Precision
-(define_insn "xsxsigqp"
+(define_insn "xsxsigqp_<mode>"
[(set (match_operand:TI 0 "altivec_register_operand" "=v")
- (unspec:TI [(match_operand:KF 1 "altivec_register_operand" "v")]
+ (unspec:TI [(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_VSX_SXSIG))]
"TARGET_P9_VECTOR"
"xsxsigqp %0,%1"
@@ -4090,20 +4244,21 @@
[(set_attr "type" "integer")])
;; VSX Scalar Insert Exponent Quad-Precision Floating Point Argument
-(define_insn "xsiexpqpf"
- [(set (match_operand:KF 0 "altivec_register_operand" "=v")
- (unspec:KF [(match_operand:KF 1 "altivec_register_operand" "v")
- (match_operand:DI 2 "altivec_register_operand" "v")]
+(define_insn "xsiexpqpf_<mode>"
+ [(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
+ (unspec:IEEE128
+ [(match_operand:IEEE128 1 "altivec_register_operand" "v")
+ (match_operand:DI 2 "altivec_register_operand" "v")]
UNSPEC_VSX_SIEXPQP))]
"TARGET_P9_VECTOR"
"xsiexpqp %0,%1,%2"
[(set_attr "type" "vecmove")])
;; VSX Scalar Insert Exponent Quad-Precision
-(define_insn "xsiexpqp"
- [(set (match_operand:KF 0 "altivec_register_operand" "=v")
- (unspec:KF [(match_operand:TI 1 "altivec_register_operand" "v")
- (match_operand:DI 2 "altivec_register_operand" "v")]
+(define_insn "xsiexpqp_<mode>"
+ [(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
+ (unspec:IEEE128 [(match_operand:TI 1 "altivec_register_operand" "v")
+ (match_operand:DI 2 "altivec_register_operand" "v")]
UNSPEC_VSX_SIEXPQP))]
"TARGET_P9_VECTOR"
"xsiexpqp %0,%1,%2"
@@ -4162,11 +4317,11 @@
;; (Has side effect of setting the lt bit if operand 1 is negative,
;; setting the eq bit if any of the conditions tested by operand 2
;; are satisfied, and clearing the gt and undordered bits to zero.)
-(define_expand "xststdcqp"
+(define_expand "xststdcqp_<mode>"
[(set (match_dup 3)
(compare:CCFP
- (unspec:KF
- [(match_operand:KF 1 "altivec_register_operand" "v")
+ (unspec:IEEE128
+ [(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:SI 2 "u7bit_cint_operand" "n")]
UNSPEC_VSX_STSTDC)
(const_int 0)))
@@ -4200,11 +4355,11 @@
})
;; The VSX Scalar Test Negative Quad-Precision
-(define_expand "xststdcnegqp"
+(define_expand "xststdcnegqp_<mode>"
[(set (match_dup 2)
(compare:CCFP
- (unspec:KF
- [(match_operand:KF 1 "altivec_register_operand" "v")
+ (unspec:IEEE128
+ [(match_operand:IEEE128 1 "altivec_register_operand" "v")
(const_int 0)]
UNSPEC_VSX_STSTDC)
(const_int 0)))
@@ -4234,11 +4389,12 @@
operands[3] = CONST0_RTX (SImode);
})
-(define_insn "*xststdcqp"
+(define_insn "*xststdcqp_<mode>"
[(set (match_operand:CCFP 0 "" "=y")
(compare:CCFP
- (unspec:KF [(match_operand:KF 1 "altivec_register_operand" "v")
- (match_operand:SI 2 "u7bit_cint_operand" "n")]
+ (unspec:IEEE128
+ [(match_operand:IEEE128 1 "altivec_register_operand" "v")
+ (match_operand:SI 2 "u7bit_cint_operand" "n")]
UNSPEC_VSX_STSTDC)
(const_int 0)))]
"TARGET_P9_VECTOR"
@@ -4335,6 +4491,149 @@
"vcmpnez<VSX_EXTRACT_WIDTH>. %0,%1,%2"
[(set_attr "type" "vecsimple")])
+;; Return first position of match between vectors
+(define_expand "first_match_index_<mode>"
+ [(match_operand:SI 0 "register_operand")
+ (unspec:SI [(match_operand:VSX_EXTRACT_I 1 "register_operand")
+ (match_operand:VSX_EXTRACT_I 2 "register_operand")]
+ UNSPEC_VSX_FIRST_MATCH_INDEX)]
+ "TARGET_P9_VECTOR"
+{
+ int sh;
+
+ rtx cmp_result = gen_reg_rtx (<MODE>mode);
+ rtx not_result = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_vcmpnez<VSX_EXTRACT_WIDTH> (cmp_result, operands[1],
+ operands[2]));
+ emit_insn (gen_one_cmpl<mode>2 (not_result, cmp_result));
+
+ sh = GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) / 2;
+
+ if (<MODE>mode == V16QImode)
+ emit_insn (gen_vctzlsbb_<mode> (operands[0], not_result));
+ else
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_vctzlsbb_<mode> (tmp, not_result));
+ emit_insn (gen_ashrsi3 (operands[0], tmp, GEN_INT (sh)));
+ }
+ DONE;
+})
+
+;; Return first position of match between vectors or end of string (EOS)
+(define_expand "first_match_or_eos_index_<mode>"
+ [(match_operand:SI 0 "register_operand")
+ (unspec: SI [(match_operand:VSX_EXTRACT_I 1 "register_operand")
+ (match_operand:VSX_EXTRACT_I 2 "register_operand")]
+ UNSPEC_VSX_FIRST_MATCH_EOS_INDEX)]
+ "TARGET_P9_VECTOR"
+{
+ int sh;
+ rtx cmpz1_result = gen_reg_rtx (<MODE>mode);
+ rtx cmpz2_result = gen_reg_rtx (<MODE>mode);
+ rtx cmpz_result = gen_reg_rtx (<MODE>mode);
+ rtx and_result = gen_reg_rtx (<MODE>mode);
+ rtx result = gen_reg_rtx (<MODE>mode);
+ rtx vzero = gen_reg_rtx (<MODE>mode);
+
+ /* Vector with zeros in elements that correspond to zeros in operands. */
+ emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
+ emit_insn (gen_vcmpne<VSX_EXTRACT_WIDTH> (cmpz1_result, operands[1], vzero));
+ emit_insn (gen_vcmpne<VSX_EXTRACT_WIDTH> (cmpz2_result, operands[2], vzero));
+ emit_insn (gen_and<mode>3 (and_result, cmpz1_result, cmpz2_result));
+
+ /* Vector with ones in elments that do not match. */
+ emit_insn (gen_vcmpnez<VSX_EXTRACT_WIDTH> (cmpz_result, operands[1],
+ operands[2]));
+
+ /* Create vector with ones in elements where there was a zero in one of
+ the source elements or the elements that match. */
+ emit_insn (gen_nand<mode>3 (result, and_result, cmpz_result));
+ sh = GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) / 2;
+
+ if (<MODE>mode == V16QImode)
+ emit_insn (gen_vctzlsbb_<mode> (operands[0], result));
+ else
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_vctzlsbb_<mode> (tmp, result));
+ emit_insn (gen_ashrsi3 (operands[0], tmp, GEN_INT (sh)));
+ }
+ DONE;
+})
+
+;; Return first position of mismatch between vectors
+(define_expand "first_mismatch_index_<mode>"
+ [(match_operand:SI 0 "register_operand")
+ (unspec: SI [(match_operand:VSX_EXTRACT_I 1 "register_operand")
+ (match_operand:VSX_EXTRACT_I 2 "register_operand")]
+ UNSPEC_VSX_FIRST_MISMATCH_INDEX)]
+ "TARGET_P9_VECTOR"
+{
+ int sh;
+ rtx cmp_result = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_vcmpne<VSX_EXTRACT_WIDTH> (cmp_result, operands[1],
+ operands[2]));
+ sh = GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) / 2;
+
+ if (<MODE>mode == V16QImode)
+ emit_insn (gen_vctzlsbb_<mode> (operands[0], cmp_result));
+ else
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_vctzlsbb_<mode> (tmp, cmp_result));
+ emit_insn (gen_ashrsi3 (operands[0], tmp, GEN_INT (sh)));
+ }
+ DONE;
+})
+
+;; Return first position of mismatch between vectors or end of string (EOS)
+(define_expand "first_mismatch_or_eos_index_<mode>"
+ [(match_operand:SI 0 "register_operand")
+ (unspec: SI [(match_operand:VSX_EXTRACT_I 1 "register_operand")
+ (match_operand:VSX_EXTRACT_I 2 "register_operand")]
+ UNSPEC_VSX_FIRST_MISMATCH_EOS_INDEX)]
+ "TARGET_P9_VECTOR"
+{
+ int sh;
+ rtx cmpz1_result = gen_reg_rtx (<MODE>mode);
+ rtx cmpz2_result = gen_reg_rtx (<MODE>mode);
+ rtx cmpz_result = gen_reg_rtx (<MODE>mode);
+ rtx not_cmpz_result = gen_reg_rtx (<MODE>mode);
+ rtx and_result = gen_reg_rtx (<MODE>mode);
+ rtx result = gen_reg_rtx (<MODE>mode);
+ rtx vzero = gen_reg_rtx (<MODE>mode);
+
+ /* Vector with zeros in elements that correspond to zeros in operands. */
+ emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
+
+ emit_insn (gen_vcmpne<VSX_EXTRACT_WIDTH> (cmpz1_result, operands[1], vzero));
+ emit_insn (gen_vcmpne<VSX_EXTRACT_WIDTH> (cmpz2_result, operands[2], vzero));
+ emit_insn (gen_and<mode>3 (and_result, cmpz1_result, cmpz2_result));
+
+ /* Vector with ones in elments that match. */
+ emit_insn (gen_vcmpnez<VSX_EXTRACT_WIDTH> (cmpz_result, operands[1],
+ operands[2]));
+ emit_insn (gen_one_cmpl<mode>2 (not_cmpz_result, cmpz_result));
+
+ /* Create vector with ones in elements where there was a zero in one of
+ the source elements or the elements did not match. */
+ emit_insn (gen_nand<mode>3 (result, and_result, not_cmpz_result));
+ sh = GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) / 2;
+
+ if (<MODE>mode == V16QImode)
+ emit_insn (gen_vctzlsbb_<mode> (operands[0], result));
+ else
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_vctzlsbb_<mode> (tmp, result));
+ emit_insn (gen_ashrsi3 (operands[0], tmp, GEN_INT (sh)));
+ }
+ DONE;
+})
+
;; Load VSX Vector with Length
(define_expand "lxvl"
[(set (match_dup 3)
@@ -4441,12 +4740,12 @@
DONE;
})
-;; Vector Compare Not Equal Byte
+;; Vector Compare Not Equal Byte (specified/not+eq:)
(define_insn "vcmpneb"
[(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
- (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
- (match_operand:V16QI 2 "altivec_register_operand" "v")]
- UNSPEC_VCMPNEB))]
+ (not:V16QI
+ (eq:V16QI (match_operand:V16QI 1 "altivec_register_operand" "v")
+ (match_operand:V16QI 2 "altivec_register_operand" "v"))))]
"TARGET_P9_VECTOR"
"vcmpneb %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -4462,12 +4761,12 @@
"vcmpnezb %0,%1,%2"
[(set_attr "type" "vecsimple")])
-;; Vector Compare Not Equal Half Word
+;; Vector Compare Not Equal Half Word (specified/not+eq:)
(define_insn "vcmpneh"
[(set (match_operand:V8HI 0 "altivec_register_operand" "=v")
- (unspec:V8HI [(match_operand:V8HI 1 "altivec_register_operand" "v")
- (match_operand:V8HI 2 "altivec_register_operand" "v")]
- UNSPEC_VCMPNEH))]
+ (not:V8HI
+ (eq:V8HI (match_operand:V8HI 1 "altivec_register_operand" "v")
+ (match_operand:V8HI 2 "altivec_register_operand" "v"))))]
"TARGET_P9_VECTOR"
"vcmpneh %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -4482,13 +4781,12 @@
"vcmpnezh %0,%1,%2"
[(set_attr "type" "vecsimple")])
-;; Vector Compare Not Equal Word
+;; Vector Compare Not Equal Word (specified/not+eq:)
(define_insn "vcmpnew"
[(set (match_operand:V4SI 0 "altivec_register_operand" "=v")
- (unspec:V4SI
- [(match_operand:V4SI 1 "altivec_register_operand" "v")
- (match_operand:V4SI 2 "altivec_register_operand" "v")]
- UNSPEC_VCMPNEH))]
+ (not:V4SI
+ (eq:V4SI (match_operand:V4SI 1 "altivec_register_operand" "v")
+ (match_operand:V4SI 2 "altivec_register_operand" "v"))))]
"TARGET_P9_VECTOR"
"vcmpnew %0,%1,%2"
[(set_attr "type" "vecsimple")])
@@ -4514,10 +4812,10 @@
[(set_attr "type" "vecsimple")])
;; Vector Count Trailing Zero Least-Significant Bits Byte
-(define_insn "vctzlsbb"
+(define_insn "vctzlsbb_<mode>"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI
- [(match_operand:V16QI 1 "altivec_register_operand" "v")]
+ [(match_operand:VSX_EXTRACT_I 1 "altivec_register_operand" "v")]
UNSPEC_VCTZLSBB))]
"TARGET_P9_VECTOR"
"vctzlsbb %0,%1"
@@ -4776,6 +5074,37 @@
"xxbrw %x0,%x1"
[(set_attr "type" "vecperm")])
+;; Swap all bytes in each element of vector
+(define_expand "revb_<mode>"
+ [(set (match_operand:VEC_REVB 0 "vsx_register_operand")
+ (bswap:VEC_REVB (match_operand:VEC_REVB 1 "vsx_register_operand")))]
+ ""
+{
+ if (TARGET_P9_VECTOR)
+ emit_insn (gen_p9_xxbr<VSX_XXBR>_<mode> (operands[0], operands[1]));
+ else
+ {
+ /* Want to have the elements in reverse order relative
+ to the endian mode in use, i.e. in LE mode, put elements
+ in BE order. */
+ rtx sel = swap_endian_selector_for_mode(<MODE>mode);
+ emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
+ operands[1], sel));
+ }
+
+ DONE;
+})
+
+;; Reversing bytes in vector char is just a NOP.
+(define_expand "revb_v16qi"
+ [(set (match_operand:V16QI 0 "vsx_register_operand")
+ (bswap:V16QI (match_operand:V16QI 1 "vsx_register_operand")))]
+ ""
+{
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
;; Swap all bytes in each 16-bit element
(define_insn "p9_xxbrh_v8hi"
[(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
diff --git a/gcc/config/rs6000/xcoff.h b/gcc/config/rs6000/xcoff.h
index 36f40f4b11e..1eeb75c3e6c 100644
--- a/gcc/config/rs6000/xcoff.h
+++ b/gcc/config/rs6000/xcoff.h
@@ -179,7 +179,7 @@
`assemble_name' uses this. */
#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- asm_fprintf ((FILE), "%U%s", rs6000_xcoff_strip_dollar (NAME));
+ asm_fprintf ((FILE), "%U%s", rs6000_xcoff_strip_dollar (NAME))
/* This is how to output an internal label prefix. rs6000.c uses this
when generating traceback tables. */
diff --git a/gcc/config/sh/sh-mem.cc b/gcc/config/sh/sh-mem.cc
index 1a7650d25ae..c5a9a1e53e6 100644
--- a/gcc/config/sh/sh-mem.cc
+++ b/gcc/config/sh/sh-mem.cc
@@ -185,8 +185,12 @@ expand_block_move (rtx *operands)
return false;
}
-static const int prob_unlikely = REG_BR_PROB_BASE / 10;
-static const int prob_likely = REG_BR_PROB_BASE / 4;
+static const int prob_unlikely
+ = profile_probability::from_reg_br_prob_base (REG_BR_PROB_BASE / 10)
+ .to_reg_br_prob_note ();
+static const int prob_likely
+ = profile_probability::from_reg_br_prob_base (REG_BR_PROB_BASE / 4)
+ .to_reg_br_prob_note ();
/* Emit code to perform a strcmp.
diff --git a/gcc/config/sol2.h b/gcc/config/sol2.h
index f6c2fefbbc1..8174cebd104 100644
--- a/gcc/config/sol2.h
+++ b/gcc/config/sol2.h
@@ -205,8 +205,8 @@ along with GCC; see the file COPYING3. If not see
/* We don't use the standard svr4 STARTFILE_SPEC because it's wrong for us. */
#undef STARTFILE_SPEC
#ifdef HAVE_SOLARIS_CRTS
-/* Since Solaris 11.x and Solaris 12, the OS delivers crt1.o, crti.o, and
- crtn.o, with a hook for compiler-dependent stuff like profile handling. */
+/* Since Solaris 11.4, the OS delivers crt1.o, crti.o, and crtn.o, with a hook
+ for compiler-dependent stuff like profile handling. */
#define STARTFILE_SPEC "%{!shared:%{!symbolic: \
crt1.o%s \
%{p:%e-p is not supported; \
diff --git a/gcc/config/v850/v850.h b/gcc/config/v850/v850.h
index 5eb2e8828fa..54dcc097bc4 100644
--- a/gcc/config/v850/v850.h
+++ b/gcc/config/v850/v850.h
@@ -743,7 +743,7 @@ typedef enum
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
- ASM_OUTPUT_ALIGN ((FILE), (TARGET_BIG_SWITCH ? 2 : 1));
+ ASM_OUTPUT_ALIGN ((FILE), (TARGET_BIG_SWITCH ? 2 : 1))
#define WORD_REGISTER_OPERATIONS 1
diff --git a/gcc/configure b/gcc/configure
index fb40ead9204..d4461e2fdd2 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -7323,10 +7323,10 @@ fi
if test "${enable_coverage+set}" = set; then :
enableval=$enable_coverage; case "${enableval}" in
yes|noopt)
- coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O0 -fkeep-inline-functions -fkeep-static-functions"
+ coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O0 -fkeep-static-functions"
;;
opt)
- coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O2 -fkeep-inline-functions -fkeep-static-functions"
+ coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O2 -fkeep-static-functions"
;;
no)
# a.k.a. --disable-coverage
@@ -28246,7 +28246,7 @@ elif test x$gcc_cv_ld != x; then
else
case "$target" in
*-*-solaris2.1[1-9]*)
- # Solaris 11.x and Solaris 12 added PIE support.
+ # Solaris 11.3 added PIE support.
if $gcc_cv_ld -z help 2>&1 | grep -- type.*pie > /dev/null; then
gcc_cv_ld_pie=yes
fi
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 0e5167695a2..aec8df928bf 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -728,10 +728,10 @@ AC_ARG_ENABLE(coverage,
default is noopt])],
[case "${enableval}" in
yes|noopt)
- coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O0 -fkeep-inline-functions -fkeep-static-functions"
+ coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O0 -fkeep-static-functions"
;;
opt)
- coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O2 -fkeep-inline-functions -fkeep-static-functions"
+ coverage_flags="-fprofile-arcs -ftest-coverage -frandom-seed=\$@ -O2 -fkeep-static-functions"
;;
no)
# a.k.a. --disable-coverage
@@ -5106,7 +5106,7 @@ elif test x$gcc_cv_ld != x; then
else
case "$target" in
*-*-solaris2.1[[1-9]]*)
- # Solaris 11.x and Solaris 12 added PIE support.
+ # Solaris 11.3 added PIE support.
if $gcc_cv_ld -z help 2>&1 | grep -- type.*pie > /dev/null; then
gcc_cv_ld_pie=yes
fi
diff --git a/gcc/coverage.c b/gcc/coverage.c
index 8a56a677f15..ea05d94f441 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -663,8 +663,15 @@ coverage_begin_function (unsigned lineno_checksum, unsigned cfg_checksum)
gcov_write_unsigned (cfg_checksum);
gcov_write_string (IDENTIFIER_POINTER
(DECL_ASSEMBLER_NAME (current_function_decl)));
+ gcov_write_unsigned (DECL_ARTIFICIAL (current_function_decl));
gcov_write_filename (xloc.file);
gcov_write_unsigned (xloc.line);
+ gcov_write_unsigned (xloc.column);
+
+ expanded_location endloc = expand_location (cfun->function_end_locus);
+
+ /* Function can start in a single file and end in another one. */
+ gcov_write_unsigned (endloc.file == xloc.file ? endloc.line : xloc.line);
gcov_write_length (offset);
return !gcov_is_error ();
@@ -1262,6 +1269,9 @@ coverage_init (const char *filename)
gcov_write_unsigned (GCOV_NOTE_MAGIC);
gcov_write_unsigned (GCOV_VERSION);
gcov_write_unsigned (bbg_file_stamp);
+
+ /* Do not support has_unexecuted_blocks for Ada. */
+ gcov_write_unsigned (strcmp (lang_hooks.name, "GNU Ada") != 0);
}
}
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 590e3221c8c..e8c882f2253 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,106 @@
+2017-11-15 Nathan Sidwell <nathan@acm.org>
+
+ PR c++/81574
+ * lambda.c (lambda_capture_field_type): Function references are
+ always catured by reference.
+
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ * decl.c (begin_destructor_body): Use cp_build_fold_indirect_ref
+ instead of cp_build_indirect_ref.
+
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ * decl.c (begin_destructor_body): In case of VPTR sanitization
+ (with disabled recovery), zero vptr in order to catch virtual calls
+ after lifetime of an object.
+
+2017-11-14 Jason Merrill <jason@redhat.com>
+
+ Use GTY((cache)) on some hash tables.
+ * decl.c (decomp_type_table): Use tree_cache_map.
+ * init.c (nsdmi_inst): Likewise.
+ * pt.c (defarg_ints): Likewise.
+ * cp-objcp-common.c (cp_get_debug_type): Likewise.
+
+2017-11-13 Jason Merrill <jason@redhat.com>
+
+ Capture adjustments for P0588R1.
+ * semantics.c (process_outer_var_ref): Capture variables when
+ they are named; complain about non-capture uses when odr-used.
+ * expr.c (mark_use): Rvalue use looks through capture proxy.
+ * constexpr.c (potential_constant_expression_1): Improve error about
+ use of captured variable.
+ * lambda.c (need_generic_capture, dependent_capture_r)
+ (do_dependent_capture, processing_nonlambda_template): Remove.
+ * call.c (build_this): Remove uses of the above.
+ * decl.c (cp_finish_decl): Likewise.
+ * semantics.c (maybe_cleanup_point_expr)
+ (maybe_cleanup_point_expr_void, finish_goto_stmt)
+ (maybe_convert_cond): Likewise.
+ * typeck.c (check_return_expr): Likewise.
+
+ Defer folding of *&.
+ * typeck.c (cp_build_fold_indirect_ref): New.
+ (cp_build_indirect_ref_1): Split out from cp_build_indirect_ref.
+ Add 'fold' parameter.
+ * cp-tree.h: Declare cp_build_fold_indirect_ref.
+ * call.c, class.c, cp-ubsan.c, decl.c, except.c, init.c, lambda.c,
+ parser.c, rtti.c, tree.c, typeck.c, typeck2.c: Use it.
+ * parser.c (do_range_for_auto_deduction): Use RO_UNARY_STAR.
+ (cp_convert_range_for): Likewise.
+ * typeck2.c (build_x_arrow): Use RO_ARROW.
+
+ * cp-ubsan.c (cp_ubsan_check_member_access_r): Fix handling of
+ INDIRECT_REF of ADDR_EXPR.
+
+ PR c++/82360 - ICE with static_cast in template.
+ * call.c (perform_direct_initialization_if_possible): Check
+ processing_template_decl.
+ * typeck.c (build_static_cast_1): Likewise.
+
+2017-11-13 Ville Voutilainen <ville.voutilainen@gmail.com>
+
+ Remove the null check from placement new in all modes
+ * init.c (build_new_1): Don't do a null check for
+ a namespace-scope non-replaceable placement new
+ in any mode unless -fcheck-new is provided.
+
+2017-11-07 Boris Kolpackov <boris@codesynthesis.com>
+
+ * Make-lang.in (CP_PLUGIN_HEADERS): Add operators.def since included
+ in cp-tree.h.
+
+2017-11-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/82835
+ * cp-gimplify.c (cxx_omp_clause_apply_fn): For methods pass i - 1 to
+ convert_default_arg instead of i.
+
+2017-11-06 Jason Merrill <jason@redhat.com>
+
+ P0704R1 - fixing const-qualified pointers to members
+ * typeck2.c (build_m_component_ref): Also accept in lower stds with
+ a pedwarn.
+
+2017-11-06 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/65579
+ * decl2.c (finish_static_data_member_decl): If there's an initializer,
+ complete the type and re-apply the quals.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ PR middle-end/82404
+ * constexpr.c (cxx_eval_builtin_function_call): Handle
+ __builtin_unreachable call.
+ (get_function_named_in_call): Declare function earlier.
+ (constexpr_fn_retval): Skip __builtin_unreachable.
+ * cp-gimplify.c (cp_ubsan_maybe_instrument_return): Rename to
+ ...
+ (cp_maybe_instrument_return): ... this.
+ (cp_genericize): Call the function unconditionally.
+
2017-11-03 Nathan Sidwell <nathan@acm.org>
PR c++/82710
diff --git a/gcc/cp/Make-lang.in b/gcc/cp/Make-lang.in
index a46845c0c53..c852f6a38b4 100644
--- a/gcc/cp/Make-lang.in
+++ b/gcc/cp/Make-lang.in
@@ -39,7 +39,7 @@ CXX_INSTALL_NAME := $(shell echo c++|sed '$(program_transform_name)')
GXX_INSTALL_NAME := $(shell echo g++|sed '$(program_transform_name)')
CXX_TARGET_INSTALL_NAME := $(target_noncanonical)-$(shell echo c++|sed '$(program_transform_name)')
GXX_TARGET_INSTALL_NAME := $(target_noncanonical)-$(shell echo g++|sed '$(program_transform_name)')
-CP_PLUGIN_HEADERS := cp-tree.h cxx-pretty-print.h name-lookup.h type-utils.h
+CP_PLUGIN_HEADERS := cp-tree.h cxx-pretty-print.h name-lookup.h type-utils.h operators.def
#
# Define the names for selecting c++ in LANGUAGES.
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 49cda986f44..c357af4966a 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -3365,7 +3365,7 @@ build_this (tree obj)
{
/* In a template, we are only concerned about the type of the
expression, so we can take a shortcut. */
- if (processing_nonlambda_template ())
+ if (processing_template_decl)
return build_address (obj);
return cp_build_addr_expr (obj, tf_warning_or_error);
@@ -8063,7 +8063,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
if (targ)
arg = targ;
else
- arg = cp_build_indirect_ref (arg, RO_NULL, complain);
+ arg = cp_build_fold_indirect_ref (arg);
/* In C++17 we shouldn't be copying a TARGET_EXPR except into a base
subobject. */
@@ -8100,9 +8100,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
else if ((trivial || TREE_CODE (arg) == TARGET_EXPR)
&& !unsafe_copy_elision_p (fa, arg))
{
- tree to = cp_stabilize_reference (cp_build_indirect_ref (fa,
- RO_NULL,
- complain));
+ tree to = cp_stabilize_reference (cp_build_fold_indirect_ref (fa));
val = build2 (INIT_EXPR, DECL_CONTEXT (fn), to, arg);
return val;
@@ -8114,7 +8112,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
&& !DECL_DELETED_FN (fn))
{
tree to = cp_stabilize_reference
- (cp_build_indirect_ref (argarray[0], RO_NULL, complain));
+ (cp_build_fold_indirect_ref (argarray[0]));
tree type = TREE_TYPE (to);
tree as_base = CLASSTYPE_AS_BASE (type);
tree arg = argarray[1];
@@ -8127,7 +8125,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
}
else if (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (as_base)))
{
- arg = cp_build_indirect_ref (arg, RO_NULL, complain);
+ arg = cp_build_fold_indirect_ref (arg);
val = build2 (MODIFY_EXPR, TREE_TYPE (to), to, arg);
/* Handle NSDMI that refer to the object being initialized. */
replace_placeholders (arg, to);
@@ -8166,7 +8164,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
return force_target_expr (DECL_CONTEXT (fn), void_node,
no_cleanup_complain);
else
- return cp_build_indirect_ref (argarray[0], RO_NULL, complain);
+ return cp_build_fold_indirect_ref (argarray[0]);
}
}
@@ -10647,6 +10645,16 @@ perform_direct_initialization_if_possible (tree type,
LOOKUP_NORMAL, complain);
if (!conv || conv->bad_p)
expr = NULL_TREE;
+ else if (processing_template_decl && conv->kind != ck_identity)
+ {
+ /* In a template, we are only concerned about determining the
+ type of non-dependent expressions, so we do not have to
+ perform the actual conversion. But for initializers, we
+ need to be able to perform it at instantiation
+ (or instantiate_non_dependent_expr) time. */
+ expr = build1 (IMPLICIT_CONV_EXPR, type, expr);
+ IMPLICIT_CONV_EXPR_DIRECT_INIT (expr) = true;
+ }
else
expr = convert_like_real (conv, expr, NULL_TREE, 0,
/*issue_conversion_warnings=*/false,
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 98e62c6ad45..586a32c436f 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -425,7 +425,7 @@ build_base_path (enum tree_code code,
interesting to the optimizers anyway. */
&& !has_empty)
{
- expr = cp_build_indirect_ref (expr, RO_NULL, complain);
+ expr = cp_build_fold_indirect_ref (expr);
expr = build_simple_base_path (expr, binfo);
if (rvalue)
expr = move (expr);
@@ -452,7 +452,7 @@ build_base_path (enum tree_code code,
t = TREE_TYPE (TYPE_VFIELD (current_class_type));
t = build_pointer_type (t);
v_offset = fold_convert (t, current_vtt_parm);
- v_offset = cp_build_indirect_ref (v_offset, RO_NULL, complain);
+ v_offset = cp_build_fold_indirect_ref (v_offset);
}
else
{
@@ -465,8 +465,7 @@ build_base_path (enum tree_code code,
if (t == NULL_TREE)
t = expr;
}
- v_offset = build_vfield_ref (cp_build_indirect_ref (t, RO_NULL,
- complain),
+ v_offset = build_vfield_ref (cp_build_fold_indirect_ref (t),
TREE_TYPE (TREE_TYPE (expr)));
}
@@ -477,7 +476,7 @@ build_base_path (enum tree_code code,
v_offset = build1 (NOP_EXPR,
build_pointer_type (ptrdiff_type_node),
v_offset);
- v_offset = cp_build_indirect_ref (v_offset, RO_NULL, complain);
+ v_offset = cp_build_fold_indirect_ref (v_offset);
TREE_CONSTANT (v_offset) = 1;
offset = convert_to_integer (ptrdiff_type_node,
@@ -516,7 +515,7 @@ build_base_path (enum tree_code code,
indout:
if (!want_pointer)
{
- expr = cp_build_indirect_ref (expr, RO_NULL, complain);
+ expr = cp_build_fold_indirect_ref (expr);
if (rvalue)
expr = move (expr);
}
@@ -552,7 +551,7 @@ build_simple_base_path (tree expr, tree binfo)
in the back end. */
temp = unary_complex_lvalue (ADDR_EXPR, expr);
if (temp)
- expr = cp_build_indirect_ref (temp, RO_NULL, tf_warning_or_error);
+ expr = cp_build_fold_indirect_ref (temp);
return expr;
}
@@ -745,8 +744,7 @@ build_vfn_ref (tree instance_ptr, tree idx)
{
tree aref;
- aref = build_vtbl_ref_1 (cp_build_indirect_ref (instance_ptr, RO_NULL,
- tf_warning_or_error),
+ aref = build_vtbl_ref_1 (cp_build_fold_indirect_ref (instance_ptr),
idx);
/* When using function descriptors, the address of the
diff --git a/gcc/cp/constexpr.c b/gcc/cp/constexpr.c
index fdc296908af..bf8ee003419 100644
--- a/gcc/cp/constexpr.c
+++ b/gcc/cp/constexpr.c
@@ -628,6 +628,20 @@ build_constexpr_constructor_member_initializers (tree type, tree body)
return error_mark_node;
}
+/* We have an expression tree T that represents a call, either CALL_EXPR
+ or AGGR_INIT_EXPR. If the call is lexically to a named function,
+ retrun the _DECL for that function. */
+
+static tree
+get_function_named_in_call (tree t)
+{
+ tree fun = cp_get_callee (t);
+ if (fun && TREE_CODE (fun) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (fun, 0)) == FUNCTION_DECL)
+ fun = TREE_OPERAND (fun, 0);
+ return fun;
+}
+
/* Subroutine of register_constexpr_fundef. BODY is the body of a function
declared to be constexpr, or a sub-statement thereof. Returns the
return value if suitable, error_mark_node for a statement not allowed in
@@ -682,6 +696,15 @@ constexpr_fn_retval (tree body)
case USING_STMT:
return NULL_TREE;
+ case CALL_EXPR:
+ {
+ tree fun = get_function_named_in_call (body);
+ if (fun != NULL_TREE
+ && DECL_FUNCTION_CODE (fun) == BUILT_IN_UNREACHABLE)
+ return NULL_TREE;
+ }
+ /* Fallthru. */
+
default:
return error_mark_node;
}
@@ -1098,20 +1121,6 @@ save_fundef_copy (tree fun, tree copy)
}
/* We have an expression tree T that represents a call, either CALL_EXPR
- or AGGR_INIT_EXPR. If the call is lexically to a named function,
- retrun the _DECL for that function. */
-
-static tree
-get_function_named_in_call (tree t)
-{
- tree fun = cp_get_callee (t);
- if (fun && TREE_CODE (fun) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (fun, 0)) == FUNCTION_DECL)
- fun = TREE_OPERAND (fun, 0);
- return fun;
-}
-
-/* We have an expression tree T that represents a call, either CALL_EXPR
or AGGR_INIT_EXPR. Return the Nth argument. */
static inline tree
@@ -1180,9 +1189,18 @@ cxx_eval_builtin_function_call (const constexpr_ctx *ctx, tree t, tree fun,
{
if (!*non_constant_p && !ctx->quiet)
{
- new_call = build_call_array_loc (EXPR_LOCATION (t), TREE_TYPE (t),
- CALL_EXPR_FN (t), nargs, args);
- error ("%q+E is not a constant expression", new_call);
+ /* Do not allow__builtin_unreachable in constexpr function.
+ The __builtin_unreachable call with BUILTINS_LOCATION
+ comes from cp_maybe_instrument_return. */
+ if (DECL_FUNCTION_CODE (fun) == BUILT_IN_UNREACHABLE
+ && EXPR_LOCATION (t) == BUILTINS_LOCATION)
+ error ("constexpr call flows off the end of the function");
+ else
+ {
+ new_call = build_call_array_loc (EXPR_LOCATION (t), TREE_TYPE (t),
+ CALL_EXPR_FN (t), nargs, args);
+ error ("%q+E is not a constant expression", new_call);
+ }
}
*non_constant_p = true;
return t;
@@ -1268,8 +1286,6 @@ cxx_bind_parameters_in_call (const constexpr_ctx *ctx, tree t,
&& is_dummy_object (x))
{
x = ctx->object;
- /* We don't use cp_build_addr_expr here because we don't want to
- capture the object argument during constexpr evaluation. */
x = build_address (x);
}
bool lval = false;
@@ -5271,7 +5287,25 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
case VAR_DECL:
if (DECL_HAS_VALUE_EXPR_P (t))
- return RECUR (DECL_VALUE_EXPR (t), rval);
+ {
+ if (now && is_normal_capture_proxy (t))
+ {
+ /* -- in a lambda-expression, a reference to this or to a
+ variable with automatic storage duration defined outside that
+ lambda-expression, where the reference would be an
+ odr-use. */
+ if (flags & tf_error)
+ {
+ tree cap = DECL_CAPTURED_VARIABLE (t);
+ error ("lambda capture of %qE is not a constant expression",
+ cap);
+ if (!want_rval && decl_constant_var_p (cap))
+ inform (input_location, "because it is used as a glvalue");
+ }
+ return false;
+ }
+ return RECUR (DECL_VALUE_EXPR (t), rval);
+ }
if (want_rval
&& !var_in_maybe_constexpr_fn (t)
&& !type_dependent_expression_p (t)
diff --git a/gcc/cp/cp-gimplify.c b/gcc/cp/cp-gimplify.c
index 262485a5c1f..7c7c0409af8 100644
--- a/gcc/cp/cp-gimplify.c
+++ b/gcc/cp/cp-gimplify.c
@@ -1556,10 +1556,11 @@ cp_genericize_tree (tree* t_p, bool handle_invisiref_parm_p)
/* If a function that should end with a return in non-void
function doesn't obviously end with return, add ubsan
- instrumentation code to verify it at runtime. */
+ instrumentation code to verify it at runtime. If -fsanitize=return
+ is not enabled, instrument __builtin_unreachable. */
static void
-cp_ubsan_maybe_instrument_return (tree fndecl)
+cp_maybe_instrument_return (tree fndecl)
{
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
|| DECL_CONSTRUCTOR_P (fndecl)
@@ -1600,7 +1601,16 @@ cp_ubsan_maybe_instrument_return (tree fndecl)
tree *p = &DECL_SAVED_TREE (fndecl);
if (TREE_CODE (*p) == BIND_EXPR)
p = &BIND_EXPR_BODY (*p);
- t = ubsan_instrument_return (DECL_SOURCE_LOCATION (fndecl));
+
+ location_t loc = DECL_SOURCE_LOCATION (fndecl);
+ if (sanitize_flags_p (SANITIZE_RETURN, fndecl))
+ t = ubsan_instrument_return (loc);
+ else
+ {
+ tree fndecl = builtin_decl_explicit (BUILT_IN_UNREACHABLE);
+ t = build_call_expr_loc (BUILTINS_LOCATION, fndecl, 0);
+ }
+
append_to_statement_list (t, p);
}
@@ -1674,9 +1684,7 @@ cp_genericize (tree fndecl)
walk_tree's hash functionality. */
cp_genericize_tree (&DECL_SAVED_TREE (fndecl), true);
- if (sanitize_flags_p (SANITIZE_RETURN)
- && current_function_decl != NULL_TREE)
- cp_ubsan_maybe_instrument_return (fndecl);
+ cp_maybe_instrument_return (fndecl);
/* Do everything else. */
c_genericize (fndecl);
@@ -1709,6 +1717,7 @@ cxx_omp_clause_apply_fn (tree fn, tree arg1, tree arg2)
if (arg2)
defparm = TREE_CHAIN (defparm);
+ bool is_method = TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE;
if (TREE_CODE (TREE_TYPE (arg1)) == ARRAY_TYPE)
{
tree inner_type = TREE_TYPE (arg1);
@@ -1757,8 +1766,8 @@ cxx_omp_clause_apply_fn (tree fn, tree arg1, tree arg2)
for (parm = defparm; parm && parm != void_list_node;
parm = TREE_CHAIN (parm), i++)
argarray[i] = convert_default_arg (TREE_VALUE (parm),
- TREE_PURPOSE (parm), fn, i,
- tf_warning_or_error);
+ TREE_PURPOSE (parm), fn,
+ i - is_method, tf_warning_or_error);
t = build_call_a (fn, i, argarray);
t = fold_convert (void_type_node, t);
t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
@@ -1790,8 +1799,8 @@ cxx_omp_clause_apply_fn (tree fn, tree arg1, tree arg2)
for (parm = defparm; parm && parm != void_list_node;
parm = TREE_CHAIN (parm), i++)
argarray[i] = convert_default_arg (TREE_VALUE (parm),
- TREE_PURPOSE (parm),
- fn, i, tf_warning_or_error);
+ TREE_PURPOSE (parm), fn,
+ i - is_method, tf_warning_or_error);
t = build_call_a (fn, i, argarray);
t = fold_convert (void_type_node, t);
return fold_build_cleanup_point_expr (TREE_TYPE (t), t);
diff --git a/gcc/cp/cp-objcp-common.c b/gcc/cp/cp-objcp-common.c
index e051d66b67b..9a398e0218c 100644
--- a/gcc/cp/cp-objcp-common.c
+++ b/gcc/cp/cp-objcp-common.c
@@ -122,19 +122,7 @@ cxx_types_compatible_p (tree x, tree y)
return same_type_ignoring_top_level_qualifiers_p (x, y);
}
-struct debug_type_hasher : ggc_cache_ptr_hash<tree_map>
-{
- static hashval_t hash (tree_map *m) { return tree_map_hash (m); }
- static bool equal (tree_map *a, tree_map *b) { return tree_map_eq (a, b); }
-
- static int
- keep_cache_entry (tree_map *&e)
- {
- return ggc_marked_p (e->base.from);
- }
-};
-
-static GTY((cache)) hash_table<debug_type_hasher> *debug_type_hash;
+static GTY((cache)) tree_cache_map *debug_type_map;
/* Return a type to use in the debug info instead of TYPE, or NULL_TREE to
keep TYPE. */
@@ -142,38 +130,29 @@ static GTY((cache)) hash_table<debug_type_hasher> *debug_type_hash;
tree
cp_get_debug_type (const_tree type)
{
+ tree dtype = NULL_TREE;
+
if (TYPE_PTRMEMFUNC_P (type) && !typedef_variant_p (type))
+ dtype = build_offset_type (TYPE_PTRMEMFUNC_OBJECT_TYPE (type),
+ TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (type)));
+
+ /* We cannot simply return the debug type here because the function uses
+ the type canonicalization hashtable, which is GC-ed, so its behavior
+ depends on the actual collection points. Since we are building these
+ types on the fly for the debug info only, they would not be attached
+ to any GC root and always be swept, so we would make the contents of
+ the debug info depend on the collection points. */
+ if (dtype)
{
- if (debug_type_hash == NULL)
- debug_type_hash = hash_table<debug_type_hasher>::create_ggc (512);
-
- /* We cannot simply use build_offset_type here because the function uses
- the type canonicalization hashtable, which is GC-ed, so its behavior
- depends on the actual collection points. Since we are building these
- types on the fly for the debug info only, they would not be attached
- to any GC root and always be swept, so we would make the contents of
- the debug info depend on the collection points. */
- struct tree_map in, *h, **slot;
-
- in.base.from = CONST_CAST_TREE (type);
- in.hash = htab_hash_pointer (type);
- slot = debug_type_hash->find_slot_with_hash (&in, in.hash, INSERT);
- if (*slot)
- return (*slot)->to;
-
- tree t = build_offset_type (TYPE_PTRMEMFUNC_OBJECT_TYPE (type),
- TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (type)));
-
- h = ggc_alloc<tree_map> ();
- h->base.from = CONST_CAST_TREE (type);
- h->hash = htab_hash_pointer (type);
- h->to = t;
- *slot = h;
-
- return t;
+ tree ktype = CONST_CAST_TREE (type);
+ if (debug_type_map == NULL)
+ debug_type_map = tree_cache_map::create_ggc (512);
+ else if (tree *slot = debug_type_map->get (ktype))
+ return *slot;
+ debug_type_map->put (ktype, dtype);
}
- return NULL_TREE;
+ return dtype;
}
/* Return -1 if dwarf ATTR shouldn't be added for DECL, or the attribute
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 874cbcbd2bd..6051348048f 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -5124,7 +5124,6 @@ extern GTY(()) vec<tree, va_gc> *static_decls;
/* An array of vtable-needing types that have no key function, or have
an emitted key function. */
extern GTY(()) vec<tree, va_gc> *keyed_classes;
-
/* Here's where we control how name mangling takes place. */
@@ -6462,7 +6461,6 @@ extern int uses_template_parms (tree);
extern bool uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern bool need_generic_capture (void);
-extern bool processing_nonlambda_template (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
@@ -7056,6 +7054,7 @@ extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
+extern tree cp_build_fold_indirect_ref (tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
diff --git a/gcc/cp/cp-ubsan.c b/gcc/cp/cp-ubsan.c
index cd2b60ad488..c87c0303f57 100644
--- a/gcc/cp/cp-ubsan.c
+++ b/gcc/cp/cp-ubsan.c
@@ -205,7 +205,7 @@ cp_ubsan_check_member_access_r (tree *stmt_p, int *walk_subtrees, void *data)
if (TREE_CODE (t) == ADDR_EXPR)
{
*walk_subtrees = 0;
- t = TREE_OPERAND (stmt, 0);
+ t = TREE_OPERAND (t, 0);
cp_walk_tree (&t, cp_ubsan_check_member_access_r, data, ucmd->pset);
}
break;
@@ -298,8 +298,7 @@ cp_ubsan_dfs_initialize_vtbl_ptrs (tree binfo, void *data)
/* Compute the location of the vtpr. */
tree vtbl_ptr
- = build_vfield_ref (cp_build_indirect_ref (base_ptr, RO_NULL,
- tf_warning_or_error),
+ = build_vfield_ref (cp_build_fold_indirect_ref (base_ptr),
TREE_TYPE (binfo));
gcc_assert (vtbl_ptr != error_mark_node);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 49b871564d6..cc5c153c12b 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -6844,8 +6844,6 @@ cp_finish_decl (tree decl, tree init, bool init_const_expr_p,
DECL_INITIAL (decl) = NULL_TREE;
}
- init = do_dependent_capture (init);
-
/* Generally, initializers in templates are expanded when the
template is instantiated. But, if DECL is a variable constant
then it can be used in future constant expressions, so its value
@@ -7285,12 +7283,13 @@ get_tuple_decomp_init (tree decl, unsigned i)
/* It's impossible to recover the decltype of a tuple decomposition variable
based on the actual type of the variable, so store it in a hash table. */
-static GTY(()) hash_map<tree,tree> *decomp_type_table;
+
+static GTY((cache)) tree_cache_map *decomp_type_table;
static void
store_decomp_type (tree v, tree t)
{
if (!decomp_type_table)
- decomp_type_table = hash_map<tree,tree>::create_ggc (13);
+ decomp_type_table = tree_cache_map::create_ggc (13);
decomp_type_table->put (v, t);
}
@@ -14903,7 +14902,7 @@ start_preparsed_function (tree decl1, tree attrs, int flags)
gcc_assert (TYPE_PTR_P (TREE_TYPE (t)));
cp_function_chain->x_current_class_ref
- = cp_build_indirect_ref (t, RO_NULL, tf_warning_or_error);
+ = cp_build_fold_indirect_ref (t);
/* Set this second to avoid shortcut in cp_build_indirect_ref. */
cp_function_chain->x_current_class_ptr = t;
@@ -15251,7 +15250,25 @@ begin_destructor_body (void)
if (flag_lifetime_dse
/* Clobbering an empty base is harmful if it overlays real data. */
&& !is_empty_class (current_class_type))
- finish_decl_cleanup (NULL_TREE, build_clobber_this ());
+ {
+ if (sanitize_flags_p (SANITIZE_VPTR)
+ && (flag_sanitize_recover & SANITIZE_VPTR) == 0
+ && TYPE_CONTAINS_VPTR_P (current_class_type))
+ {
+ tree binfo = TYPE_BINFO (current_class_type);
+ tree ref
+ = cp_build_fold_indirect_ref (current_class_ptr);
+
+ tree vtbl_ptr = build_vfield_ref (ref, TREE_TYPE (binfo));
+ tree vtbl = build_zero_cst (TREE_TYPE (vtbl_ptr));
+ tree stmt = cp_build_modify_expr (input_location, vtbl_ptr,
+ NOP_EXPR, vtbl,
+ tf_warning_or_error);
+ finish_decl_cleanup (NULL_TREE, stmt);
+ }
+ else
+ finish_decl_cleanup (NULL_TREE, build_clobber_this ());
+ }
/* And insert cleanups for our bases and members so that they
will be properly destroyed if we throw. */
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index a23b96c53e7..0b183085e0e 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -787,6 +787,15 @@ finish_static_data_member_decl (tree decl,
&& TYPE_DOMAIN (TREE_TYPE (decl)) == NULL_TREE)
SET_VAR_HAD_UNKNOWN_BOUND (decl);
+ if (init)
+ {
+ /* Similarly to start_decl_1, we want to complete the type in order
+ to do the right thing in cp_apply_type_quals_to_decl, possibly
+ clear TYPE_QUAL_CONST (c++/65579). */
+ tree type = TREE_TYPE (decl) = complete_type (TREE_TYPE (decl));
+ cp_apply_type_quals_to_decl (cp_type_quals (type), decl);
+ }
+
cp_finish_decl (decl, init, init_const_expr_p, asmspec_tree, flags);
}
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index ecc8941984b..47f267ffb93 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -664,7 +664,7 @@ build_throw (tree exp)
CLEANUP_EH_ONLY (allocate_expr) = 1;
object = build_nop (build_pointer_type (temp_type), ptr);
- object = cp_build_indirect_ref (object, RO_NULL, tf_warning_or_error);
+ object = cp_build_fold_indirect_ref (object);
/* And initialize the exception object. */
if (CLASS_TYPE_P (temp_type))
diff --git a/gcc/cp/expr.c b/gcc/cp/expr.c
index 23e30cf789c..81b9a5b1dc9 100644
--- a/gcc/cp/expr.c
+++ b/gcc/cp/expr.c
@@ -111,6 +111,14 @@ mark_use (tree expr, bool rvalue_p, bool read_p,
{
case VAR_DECL:
case PARM_DECL:
+ if (rvalue_p && is_normal_capture_proxy (expr))
+ {
+ /* Look through capture by copy. */
+ tree cap = DECL_CAPTURED_VARIABLE (expr);
+ if (TREE_CODE (TREE_TYPE (cap)) == TREE_CODE (TREE_TYPE (expr))
+ && decl_constant_var_p (cap))
+ return RECUR (cap);
+ }
if (outer_automatic_var_p (expr)
&& decl_constant_var_p (expr))
{
@@ -146,6 +154,14 @@ mark_use (tree expr, bool rvalue_p, bool read_p,
{
/* Try to look through the reference. */
tree ref = TREE_OPERAND (expr, 0);
+ if (rvalue_p && is_normal_capture_proxy (ref))
+ {
+ /* Look through capture by reference. */
+ tree cap = DECL_CAPTURED_VARIABLE (ref);
+ if (TREE_CODE (TREE_TYPE (cap)) != REFERENCE_TYPE
+ && decl_constant_var_p (cap))
+ return RECUR (cap);
+ }
tree r = mark_rvalue_use (ref, loc, reject_builtin);
if (r != ref)
expr = convert_from_reference (r);
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index 9e6e3aff779..c76460d6ee6 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -535,7 +535,7 @@ perform_target_ctor (tree init)
/* Return the non-static data initializer for FIELD_DECL MEMBER. */
-static GTY(()) hash_map<tree, tree> *nsdmi_inst;
+static GTY((cache)) tree_cache_map *nsdmi_inst;
tree
get_nsdmi (tree member, bool in_ctor, tsubst_flags_t complain)
@@ -590,7 +590,7 @@ get_nsdmi (tree member, bool in_ctor, tsubst_flags_t complain)
if (init != error_mark_node)
{
if (!nsdmi_inst)
- nsdmi_inst = hash_map<tree,tree>::create_ggc (37);
+ nsdmi_inst = tree_cache_map::create_ggc (37);
nsdmi_inst->put (member, init);
}
@@ -1260,8 +1260,7 @@ emit_mem_initializers (tree mem_inits)
base_addr = build_base_path (PLUS_EXPR, current_class_ptr,
subobject, 1, tf_warning_or_error);
expand_aggr_init_1 (subobject, NULL_TREE,
- cp_build_indirect_ref (base_addr, RO_NULL,
- tf_warning_or_error),
+ cp_build_fold_indirect_ref (base_addr),
arguments,
flags,
tf_warning_or_error);
@@ -1351,7 +1350,7 @@ expand_virtual_init (tree binfo, tree decl)
/* Compute the value to use, when there's a VTT. */
vtt_parm = current_vtt_parm;
vtbl2 = fold_build_pointer_plus (vtt_parm, vtt_index);
- vtbl2 = cp_build_indirect_ref (vtbl2, RO_NULL, tf_warning_or_error);
+ vtbl2 = cp_build_fold_indirect_ref (vtbl2);
vtbl2 = convert (TREE_TYPE (vtbl), vtbl2);
/* The actual initializer is the VTT value only in the subobject
@@ -1361,8 +1360,7 @@ expand_virtual_init (tree binfo, tree decl)
}
/* Compute the location of the vtpr. */
- vtbl_ptr = build_vfield_ref (cp_build_indirect_ref (decl, RO_NULL,
- tf_warning_or_error),
+ vtbl_ptr = build_vfield_ref (cp_build_fold_indirect_ref (decl),
TREE_TYPE (binfo));
gcc_assert (vtbl_ptr != error_mark_node);
@@ -2758,7 +2756,7 @@ malloc_alignment ()
static bool
std_placement_new_fn_p (tree alloc_fn)
{
- if ((cxx_dialect > cxx14) && DECL_NAMESPACE_SCOPE_P (alloc_fn))
+ if (DECL_NAMESPACE_SCOPE_P (alloc_fn))
{
tree first_arg = TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (alloc_fn)));
if ((TREE_VALUE (first_arg) == ptr_type_node)
@@ -3268,7 +3266,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
alloc_node, cookie_ptr);
size_ptr_type = build_pointer_type (sizetype);
cookie_ptr = fold_convert (size_ptr_type, cookie_ptr);
- cookie = cp_build_indirect_ref (cookie_ptr, RO_NULL, complain);
+ cookie = cp_build_fold_indirect_ref (cookie_ptr);
cookie_expr = build2 (MODIFY_EXPR, sizetype, cookie, nelts);
@@ -3280,7 +3278,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
NEGATE_EXPR, sizetype,
size_in_bytes (sizetype)));
- cookie = cp_build_indirect_ref (cookie_ptr, RO_NULL, complain);
+ cookie = cp_build_fold_indirect_ref (cookie_ptr);
cookie = build2 (MODIFY_EXPR, sizetype, cookie,
size_in_bytes (elt_type));
cookie_expr = build2 (COMPOUND_EXPR, TREE_TYPE (cookie_expr),
@@ -3326,7 +3324,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
the initializer anyway since we're going to throw it away and
rebuild it at instantiation time, so just build up a single
constructor call to get any appropriate diagnostics. */
- init_expr = cp_build_indirect_ref (data_addr, RO_NULL, complain);
+ init_expr = cp_build_fold_indirect_ref (data_addr);
if (type_build_ctor_call (elt_type))
init_expr = build_special_member_call (init_expr,
complete_ctor_identifier,
@@ -3384,7 +3382,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
}
else
{
- init_expr = cp_build_indirect_ref (data_addr, RO_NULL, complain);
+ init_expr = cp_build_fold_indirect_ref (data_addr);
if (type_build_ctor_call (type) && !explicit_value_init_p)
{
@@ -4507,7 +4505,7 @@ build_vec_init (tree base, tree maxindex, tree init,
{
atype = build_pointer_type (atype);
stmt_expr = build1 (NOP_EXPR, atype, stmt_expr);
- stmt_expr = cp_build_indirect_ref (stmt_expr, RO_NULL, complain);
+ stmt_expr = cp_build_fold_indirect_ref (stmt_expr);
TREE_NO_WARNING (stmt_expr) = 1;
}
@@ -4661,8 +4659,7 @@ build_delete (tree otype, tree addr, special_function_kind auto_delete,
/* Make sure the destructor is callable. */
if (type_build_dtor_call (type))
{
- expr = build_dtor_call (cp_build_indirect_ref (addr, RO_NULL,
- complain),
+ expr = build_dtor_call (cp_build_fold_indirect_ref (addr),
sfk_complete_destructor, flags, complain);
if (expr == error_mark_node)
return error_mark_node;
@@ -4738,7 +4735,7 @@ build_delete (tree otype, tree addr, special_function_kind auto_delete,
complain);
}
- expr = build_dtor_call (cp_build_indirect_ref (addr, RO_NULL, complain),
+ expr = build_dtor_call (cp_build_fold_indirect_ref (addr),
auto_delete, flags, complain);
if (expr == error_mark_node)
return error_mark_node;
@@ -4918,7 +4915,7 @@ build_vec_delete (tree base, tree maxindex,
sizetype, TYPE_SIZE_UNIT (sizetype));
cookie_addr = fold_build_pointer_plus (fold_convert (size_ptr_type, base),
cookie_addr);
- maxindex = cp_build_indirect_ref (cookie_addr, RO_NULL, complain);
+ maxindex = cp_build_fold_indirect_ref (cookie_addr);
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
diff --git a/gcc/cp/lambda.c b/gcc/cp/lambda.c
index bb6c68a100a..4480c67dc5f 100644
--- a/gcc/cp/lambda.c
+++ b/gcc/cp/lambda.c
@@ -245,7 +245,8 @@ lambda_capture_field_type (tree expr, bool explicit_init_p,
{
type = non_reference (unlowered_expr_type (expr));
- if (!is_this && by_reference_p)
+ if (!is_this
+ && (by_reference_p || TREE_CODE (type) == FUNCTION_TYPE))
type = build_reference_type (type);
}
@@ -557,8 +558,7 @@ add_capture (tree lambda, tree id, tree orig_init, bool by_reference_p,
{
gcc_assert (POINTER_TYPE_P (type));
type = TREE_TYPE (type);
- initializer = cp_build_indirect_ref (initializer, RO_NULL,
- tf_warning_or_error);
+ initializer = cp_build_fold_indirect_ref (initializer);
}
if (dependent_type_p (type))
@@ -862,8 +862,7 @@ maybe_resolve_dummy (tree object, bool add_capture_p)
if (tree lam = resolvable_dummy_lambda (object))
if (tree cap = lambda_expr_this_capture (lam, add_capture_p))
if (cap != error_mark_node)
- object = build_x_indirect_ref (EXPR_LOCATION (object), cap,
- RO_NULL, tf_warning_or_error);
+ object = build_fold_indirect_ref (cap);
return object;
}
@@ -987,121 +986,6 @@ generic_lambda_fn_p (tree callop)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (callop)));
}
-/* Returns true iff we need to consider default capture for an enclosing
- generic lambda. */
-
-bool
-need_generic_capture (void)
-{
- if (!processing_template_decl)
- return false;
-
- tree outer_closure = NULL_TREE;
- for (tree t = current_class_type; t;
- t = decl_type_context (TYPE_MAIN_DECL (t)))
- {
- tree lam = CLASSTYPE_LAMBDA_EXPR (t);
- if (!lam || LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam) == CPLD_NONE)
- /* No default capture. */
- break;
- outer_closure = t;
- }
-
- if (!outer_closure)
- /* No lambda. */
- return false;
- else if (dependent_type_p (outer_closure))
- /* The enclosing context isn't instantiated. */
- return false;
- else
- return true;
-}
-
-/* A lambda-expression...is said to implicitly capture the entity...if the
- compound-statement...names the entity in a potentially-evaluated
- expression where the enclosing full-expression depends on a generic lambda
- parameter declared within the reaching scope of the lambda-expression. */
-
-static tree
-dependent_capture_r (tree *tp, int *walk_subtrees, void *data)
-{
- hash_set<tree> *pset = (hash_set<tree> *)data;
-
- if (TYPE_P (*tp))
- *walk_subtrees = 0;
-
- if (outer_automatic_var_p (*tp))
- {
- tree t = process_outer_var_ref (*tp, tf_warning_or_error, /*force*/true);
- if (t != *tp
- && TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
- && TREE_CODE (TREE_TYPE (*tp)) != REFERENCE_TYPE)
- t = convert_from_reference (t);
- *tp = t;
- }
-
- if (pset->add (*tp))
- *walk_subtrees = 0;
-
- switch (TREE_CODE (*tp))
- {
- /* Don't walk into unevaluated context or another lambda. */
- case SIZEOF_EXPR:
- case ALIGNOF_EXPR:
- case TYPEID_EXPR:
- case NOEXCEPT_EXPR:
- case LAMBDA_EXPR:
- *walk_subtrees = 0;
- break;
-
- /* Don't walk into statements whose subexpressions we already
- handled. */
- case TRY_BLOCK:
- case EH_SPEC_BLOCK:
- case HANDLER:
- case IF_STMT:
- case FOR_STMT:
- case RANGE_FOR_STMT:
- case WHILE_STMT:
- case DO_STMT:
- case SWITCH_STMT:
- case STATEMENT_LIST:
- case RETURN_EXPR:
- *walk_subtrees = 0;
- break;
-
- case DECL_EXPR:
- {
- tree decl = DECL_EXPR_DECL (*tp);
- if (VAR_P (decl))
- {
- /* walk_tree_1 won't step in here. */
- cp_walk_tree (&DECL_INITIAL (decl),
- dependent_capture_r, &pset, NULL);
- *walk_subtrees = 0;
- }
- }
- break;
-
- default:
- break;
- }
-
- return NULL_TREE;
-}
-
-tree
-do_dependent_capture (tree expr, bool force)
-{
- if (!need_generic_capture ()
- || (!force && !instantiation_dependent_expression_p (expr)))
- return expr;
-
- hash_set<tree> pset;
- cp_walk_tree (&expr, dependent_capture_r, &pset, NULL);
- return expr;
-}
-
/* If the closure TYPE has a static op(), also add a conversion to function
pointer. */
@@ -1154,8 +1038,7 @@ maybe_add_lambda_conv_op (tree type)
return expression for a deduced return call op to allow for simple
implementation of the conversion operator. */
- tree instance = cp_build_indirect_ref (thisarg, RO_NULL,
- tf_warning_or_error);
+ tree instance = cp_build_fold_indirect_ref (thisarg);
tree objfn = build_min (COMPONENT_REF, NULL_TREE,
instance, DECL_NAME (callop), NULL_TREE);
int nargs = list_length (DECL_ARGUMENTS (callop)) - 1;
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 77b96376e13..1860bf0f175 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -11931,7 +11931,8 @@ do_range_for_auto_deduction (tree decl, tree range_expr)
{
iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE,
iter_type);
- iter_decl = build_x_indirect_ref (input_location, iter_decl, RO_NULL,
+ iter_decl = build_x_indirect_ref (input_location, iter_decl,
+ RO_UNARY_STAR,
tf_warning_or_error);
TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl),
iter_decl, auto_node);
@@ -12048,7 +12049,7 @@ cp_convert_range_for (tree statement, tree range_decl, tree range_expr,
/* The declaration is initialized with *__begin inside the loop body. */
cp_finish_decl (range_decl,
- build_x_indirect_ref (input_location, begin, RO_NULL,
+ build_x_indirect_ref (input_location, begin, RO_UNARY_STAR,
tf_warning_or_error),
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
@@ -20843,7 +20844,7 @@ inject_this_parameter (tree ctype, cp_cv_quals quals)
/* Clear this first to avoid shortcut in cp_build_indirect_ref. */
current_class_ptr = NULL_TREE;
current_class_ref
- = cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error);
+ = cp_build_fold_indirect_ref (this_parm);
current_class_ptr = this_parm;
}
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 710333ddaba..562b9272596 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -9500,16 +9500,6 @@ in_template_function (void)
return ret;
}
-/* Returns true iff we are currently within a template other than a
- default-capturing generic lambda, so we don't need to worry about semantic
- processing. */
-
-bool
-processing_nonlambda_template (void)
-{
- return processing_template_decl && !need_generic_capture ();
-}
-
/* Returns true if T depends on any template parameter with level LEVEL. */
bool
@@ -12024,7 +12014,7 @@ tsubst_aggr_type (tree t,
}
}
-static GTY(()) hash_map<tree, tree> *defarg_inst;
+static GTY((cache)) tree_cache_map *defarg_inst;
/* Substitute into the default argument ARG (a default argument for
FN), which has the indicated TYPE. */
@@ -12111,7 +12101,7 @@ tsubst_default_argument (tree fn, int parmnum, tree type, tree arg,
if (arg != error_mark_node && !cp_unevaluated_operand)
{
if (!defarg_inst)
- defarg_inst = hash_map<tree,tree>::create_ggc (37);
+ defarg_inst = tree_cache_map::create_ggc (37);
defarg_inst->put (parm, arg);
}
diff --git a/gcc/cp/rtti.c b/gcc/cp/rtti.c
index 10ecbfd9589..b158507d7a8 100644
--- a/gcc/cp/rtti.c
+++ b/gcc/cp/rtti.c
@@ -206,8 +206,7 @@ build_headof (tree exp)
index = build_int_cst (NULL_TREE,
-2 * TARGET_VTABLE_DATA_ENTRY_DISTANCE);
- offset = build_vtbl_ref (cp_build_indirect_ref (exp, RO_NULL,
- tf_warning_or_error),
+ offset = build_vtbl_ref (cp_build_fold_indirect_ref (exp),
index);
type = cp_build_qualified_type (ptr_type_node,
@@ -303,7 +302,7 @@ get_tinfo_decl_dynamic (tree exp, tsubst_flags_t complain)
/* Otherwise return the type_info for the static type of the expr. */
t = get_tinfo_ptr (TYPE_MAIN_VARIANT (type));
- return cp_build_indirect_ref (t, RO_NULL, complain);
+ return cp_build_fold_indirect_ref (t);
}
static bool
@@ -365,7 +364,7 @@ build_typeid (tree exp, tsubst_flags_t complain)
exp = cp_build_addr_expr (exp, complain);
exp = save_expr (exp);
cond = cp_convert (boolean_type_node, exp, complain);
- exp = cp_build_indirect_ref (exp, RO_NULL, complain);
+ exp = cp_build_fold_indirect_ref (exp);
}
exp = get_tinfo_decl_dynamic (exp, complain);
@@ -529,7 +528,7 @@ get_typeid (tree type, tsubst_flags_t complain)
if (!type)
return error_mark_node;
- return cp_build_indirect_ref (get_tinfo_ptr (type), RO_NULL, complain);
+ return cp_build_fold_indirect_ref (get_tinfo_ptr (type));
}
/* Check whether TEST is null before returning RESULT. If TEST is used in
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 664952e749c..51489d17ad5 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -410,8 +410,6 @@ maybe_cleanup_point_expr (tree expr)
{
if (!processing_template_decl && stmts_are_full_exprs_p ())
expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr);
- else
- expr = do_dependent_capture (expr);
return expr;
}
@@ -425,8 +423,6 @@ maybe_cleanup_point_expr_void (tree expr)
{
if (!processing_template_decl && stmts_are_full_exprs_p ())
expr = fold_build_cleanup_point_expr (void_type_node, expr);
- else
- expr = do_dependent_capture (expr);
return expr;
}
@@ -633,8 +629,6 @@ finish_goto_stmt (tree destination)
= fold_build_cleanup_point_expr (TREE_TYPE (destination),
destination);
}
- else
- destination = do_dependent_capture (destination);
}
check_goto (destination);
@@ -656,7 +650,7 @@ maybe_convert_cond (tree cond)
/* Wait until we instantiate templates before doing conversion. */
if (processing_template_decl)
- return do_dependent_capture (cond);
+ return cond;
if (warn_sequence_point)
verify_sequence_points (cond);
@@ -3291,10 +3285,14 @@ outer_automatic_var_p (tree decl)
}
/* DECL satisfies outer_automatic_var_p. Possibly complain about it or
- rewrite it for lambda capture. */
+ rewrite it for lambda capture.
+
+ If ODR_USE is true, we're being called from mark_use, and we complain about
+ use of constant variables. If ODR_USE is false, we're being called for the
+ id-expression, and we do lambda capture. */
tree
-process_outer_var_ref (tree decl, tsubst_flags_t complain, bool force_use)
+process_outer_var_ref (tree decl, tsubst_flags_t complain, bool odr_use)
{
if (cp_unevaluated_operand)
/* It's not a use (3.2) if we're in an unevaluated context. */
@@ -3315,12 +3313,6 @@ process_outer_var_ref (tree decl, tsubst_flags_t complain, bool force_use)
if (parsing_nsdmi ())
containing_function = NULL_TREE;
- /* Core issue 696: Only an odr-use of an outer automatic variable causes a
- capture (or error), and a constant variable can decay to a prvalue
- constant without odr-use. So don't capture yet. */
- if (decl_constant_var_p (decl) && !force_use)
- return decl;
-
if (containing_function && LAMBDA_FUNCTION_P (containing_function))
{
/* Check whether we've already built a proxy. */
@@ -3336,7 +3328,7 @@ process_outer_var_ref (tree decl, tsubst_flags_t complain, bool force_use)
return d;
else
/* We need to capture an outer proxy. */
- return process_outer_var_ref (d, complain, force_use);
+ return process_outer_var_ref (d, complain, odr_use);
}
}
@@ -3382,12 +3374,19 @@ process_outer_var_ref (tree decl, tsubst_flags_t complain, bool force_use)
error ("cannot capture member %qD of anonymous union", decl);
return error_mark_node;
}
- if (context == containing_function)
+ /* Do lambda capture when processing the id-expression, not when
+ odr-using a variable. */
+ if (!odr_use && context == containing_function)
{
decl = add_default_capture (lambda_stack,
/*id=*/DECL_NAME (decl),
initializer);
}
+ /* Only an odr-use of an outer automatic variable causes an
+ error, and a constant variable can decay to a prvalue
+ constant without odr-use. So don't complain yet. */
+ else if (!odr_use && decl_constant_var_p (decl))
+ return decl;
else if (lambda_expr)
{
if (complain & tf_error)
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index b63f2ae4c5d..c60d54ab01f 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -3841,7 +3841,7 @@ tree
build_dummy_object (tree type)
{
tree decl = build1 (CONVERT_EXPR, build_pointer_type (type), void_node);
- return cp_build_indirect_ref (decl, RO_NULL, tf_warning_or_error);
+ return cp_build_fold_indirect_ref (decl);
}
/* We've gotten a reference to a member of TYPE. Return *this if appropriate,
@@ -5011,7 +5011,7 @@ stabilize_expr (tree exp, tree* initp)
exp = cp_build_addr_expr (exp, tf_warning_or_error);
init_expr = get_target_expr (exp);
exp = TARGET_EXPR_SLOT (init_expr);
- exp = cp_build_indirect_ref (exp, RO_NULL, tf_warning_or_error);
+ exp = cp_build_fold_indirect_ref (exp);
if (xval)
exp = move (exp);
}
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 7db8719d50d..cfbeaddd030 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -2356,7 +2356,7 @@ build_class_member_access_expr (cp_expr object, tree member,
{
tree temp = unary_complex_lvalue (ADDR_EXPR, object);
if (temp)
- object = cp_build_indirect_ref (temp, RO_NULL, complain);
+ object = cp_build_fold_indirect_ref (temp);
}
/* In [expr.ref], there is an explicit list of the valid choices for
@@ -3035,20 +3035,19 @@ build_x_indirect_ref (location_t loc, tree expr, ref_operator errorstring,
return rval;
}
-/* Helper function called from c-common. */
-tree
-build_indirect_ref (location_t /*loc*/,
- tree ptr, ref_operator errorstring)
-{
- return cp_build_indirect_ref (ptr, errorstring, tf_warning_or_error);
-}
+/* The implementation of the above, and of indirection implied by other
+ constructs. If DO_FOLD is true, fold away INDIRECT_REF of ADDR_EXPR. */
-tree
-cp_build_indirect_ref (tree ptr, ref_operator errorstring,
- tsubst_flags_t complain)
+static tree
+cp_build_indirect_ref_1 (tree ptr, ref_operator errorstring,
+ tsubst_flags_t complain, bool do_fold)
{
tree pointer, type;
+ /* RO_NULL should only be used with the folding entry points below, not
+ cp_build_indirect_ref. */
+ gcc_checking_assert (errorstring != RO_NULL || do_fold);
+
if (ptr == current_class_ptr
|| (TREE_CODE (ptr) == NOP_EXPR
&& TREE_OPERAND (ptr, 0) == current_class_ptr
@@ -3092,7 +3091,7 @@ cp_build_indirect_ref (tree ptr, ref_operator errorstring,
error ("%qT is not a pointer-to-object type", type);
return error_mark_node;
}
- else if (TREE_CODE (pointer) == ADDR_EXPR
+ else if (do_fold && TREE_CODE (pointer) == ADDR_EXPR
&& same_type_p (t, TREE_TYPE (TREE_OPERAND (pointer, 0))))
/* The POINTER was something like `&x'. We simplify `*&x' to
`x'. */
@@ -3141,6 +3140,34 @@ cp_build_indirect_ref (tree ptr, ref_operator errorstring,
return error_mark_node;
}
+/* Entry point used by c-common, which expects folding. */
+
+tree
+build_indirect_ref (location_t /*loc*/,
+ tree ptr, ref_operator errorstring)
+{
+ return cp_build_indirect_ref_1 (ptr, errorstring, tf_warning_or_error, true);
+}
+
+/* Entry point used by internal indirection needs that don't correspond to any
+ syntactic construct. */
+
+tree
+cp_build_fold_indirect_ref (tree pointer)
+{
+ return cp_build_indirect_ref_1 (pointer, RO_NULL, tf_warning_or_error, true);
+}
+
+/* Entry point used by indirection needs that correspond to some syntactic
+ construct. */
+
+tree
+cp_build_indirect_ref (tree ptr, ref_operator errorstring,
+ tsubst_flags_t complain)
+{
+ return cp_build_indirect_ref_1 (ptr, errorstring, complain, false);
+}
+
/* This handles expressions of the form "a[i]", which denotes
an array reference.
@@ -3477,13 +3504,13 @@ get_member_function_from_ptrfunc (tree *instance_ptrptr, tree function,
/* Next extract the vtable pointer from the object. */
vtbl = build1 (NOP_EXPR, build_pointer_type (vtbl_ptr_type_node),
instance_ptr);
- vtbl = cp_build_indirect_ref (vtbl, RO_NULL, complain);
+ vtbl = cp_build_fold_indirect_ref (vtbl);
if (vtbl == error_mark_node)
return error_mark_node;
/* Finally, extract the function pointer from the vtable. */
e2 = fold_build_pointer_plus_loc (input_location, vtbl, idx);
- e2 = cp_build_indirect_ref (e2, RO_NULL, complain);
+ e2 = cp_build_fold_indirect_ref (e2);
if (e2 == error_mark_node)
return error_mark_node;
TREE_CONSTANT (e2) = 1;
@@ -6823,6 +6850,9 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
{
tree base;
+ if (processing_template_decl)
+ return expr;
+
/* There is a standard conversion from "D*" to "B*" even if "B"
is ambiguous or inaccessible. If this is really a
static_cast, then we check both for inaccessibility and
@@ -6867,6 +6897,8 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
&& reference_related_p (TREE_TYPE (type), intype)
&& (c_cast_p || at_least_as_qualified_p (TREE_TYPE (type), intype)))
{
+ if (processing_template_decl)
+ return expr;
if (clk == clk_ordinary)
{
/* Handle the (non-bit-field) lvalue case here by casting to
@@ -6914,6 +6946,9 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
c_cast_p, complain);
if (result)
{
+ if (processing_template_decl)
+ return expr;
+
result = convert_from_reference (result);
/* [expr.static.cast]
@@ -6955,7 +6990,11 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
|| SCALAR_FLOAT_TYPE_P (type))
&& (INTEGRAL_OR_ENUMERATION_TYPE_P (intype)
|| SCALAR_FLOAT_TYPE_P (intype)))
- return ocp_convert (type, expr, CONV_C_CAST, LOOKUP_NORMAL, complain);
+ {
+ if (processing_template_decl)
+ return expr;
+ return ocp_convert (type, expr, CONV_C_CAST, LOOKUP_NORMAL, complain);
+ }
if (TYPE_PTR_P (type) && TYPE_PTR_P (intype)
&& CLASS_TYPE_P (TREE_TYPE (type))
@@ -6968,6 +7007,9 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
{
tree base;
+ if (processing_template_decl)
+ return expr;
+
if (!c_cast_p
&& check_for_casting_away_constness (intype, type, STATIC_CAST_EXPR,
complain))
@@ -7022,6 +7064,8 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
STATIC_CAST_EXPR,
complain))
return error_mark_node;
+ if (processing_template_decl)
+ return expr;
return convert_ptrmem (type, expr, /*allow_inverse_p=*/1,
c_cast_p, complain);
}
@@ -7041,6 +7085,8 @@ build_static_cast_1 (tree type, tree expr, bool c_cast_p,
&& check_for_casting_away_constness (intype, type, STATIC_CAST_EXPR,
complain))
return error_mark_node;
+ if (processing_template_decl)
+ return expr;
return build_nop (type, expr);
}
@@ -9115,7 +9161,7 @@ check_return_expr (tree retval, bool *no_warning)
dependent:
/* We should not have changed the return value. */
gcc_assert (retval == saved_retval);
- return do_dependent_capture (retval, /*force*/true);
+ return retval;
}
/* The fabled Named Return Value optimization, as per [class.copy]/15:
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index 190cfd4535b..76c2392209a 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -1792,7 +1792,7 @@ build_x_arrow (location_t loc, tree expr, tsubst_flags_t complain)
return expr;
}
- return cp_build_indirect_ref (last_rval, RO_NULL, complain);
+ return cp_build_indirect_ref (last_rval, RO_ARROW, complain);
}
if (complain & tf_error)
@@ -1893,7 +1893,7 @@ build_m_component_ref (tree datum, tree component, tsubst_flags_t complain)
value stored in the pointer-to-data-member. */
ptype = build_pointer_type (type);
datum = fold_build_pointer_plus (fold_convert (ptype, datum), component);
- datum = cp_build_indirect_ref (datum, RO_NULL, complain);
+ datum = cp_build_fold_indirect_ref (datum);
if (datum == error_mark_node)
return error_mark_node;
@@ -1922,17 +1922,26 @@ build_m_component_ref (tree datum, tree component, tsubst_flags_t complain)
ptrmem_type);
return error_mark_node;
}
- else if (!lval
- && !FUNCTION_RVALUE_QUALIFIED (type)
- && (cxx_dialect < cxx2a
- || ((type_memfn_quals (type)
- & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE))
- != TYPE_QUAL_CONST)))
+ else if (!lval && !FUNCTION_RVALUE_QUALIFIED (type))
{
- if (complain & tf_error)
- error ("pointer-to-member-function type %qT requires an lvalue",
- ptrmem_type);
- return error_mark_node;
+ if ((type_memfn_quals (type)
+ & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE))
+ != TYPE_QUAL_CONST)
+ {
+ if (complain & tf_error)
+ error ("pointer-to-member-function type %qT requires "
+ "an lvalue", ptrmem_type);
+ return error_mark_node;
+ }
+ else if (cxx_dialect < cxx2a)
+ {
+ if (complain & tf_warning_or_error)
+ pedwarn (input_location, OPT_Wpedantic,
+ "pointer-to-member-function type %qT requires "
+ "an lvalue before C++2a", ptrmem_type);
+ else
+ return error_mark_node;
+ }
}
}
return build2 (OFFSET_REF, type, datum, component);
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 9c8f206c909..c491bded100 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -1440,15 +1440,8 @@ expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
for (; p; p = p->next)
{
- /* Return these right away to avoid returning stack pointer based
- expressions for frame pointer and vice versa, which is something
- that would confuse DSE. See the comment in cselib_expand_value_rtx_1
- for more details. */
if (REG_P (p->loc)
- && (REGNO (p->loc) == STACK_POINTER_REGNUM
- || REGNO (p->loc) == FRAME_POINTER_REGNUM
- || REGNO (p->loc) == HARD_FRAME_POINTER_REGNUM
- || REGNO (p->loc) == cfa_base_preserved_regno))
+ && REGNO (p->loc) == cfa_base_preserved_regno)
return p->loc;
/* Avoid infinite recursion trying to expand a reg into a
the same reg. */
@@ -1614,26 +1607,7 @@ cselib_expand_value_rtx_1 (rtx orig, struct expand_value_data *evd,
rtx result;
unsigned regno = REGNO (orig);
- /* The only thing that we are not willing to do (this
- is requirement of dse and if others potential uses
- need this function we should add a parm to control
- it) is that we will not substitute the
- STACK_POINTER_REGNUM, FRAME_POINTER or the
- HARD_FRAME_POINTER.
-
- These expansions confuses the code that notices that
- stores into the frame go dead at the end of the
- function and that the frame is not effected by calls
- to subroutines. If you allow the
- STACK_POINTER_REGNUM substitution, then dse will
- think that parameter pushing also goes dead which is
- wrong. If you allow the FRAME_POINTER or the
- HARD_FRAME_POINTER then you lose the opportunity to
- make the frame assumptions. */
- if (regno == STACK_POINTER_REGNUM
- || regno == FRAME_POINTER_REGNUM
- || regno == HARD_FRAME_POINTER_REGNUM
- || regno == cfa_base_preserved_regno)
+ if (regno == cfa_base_preserved_regno)
return orig;
bitmap_set_bit (evd->regs_active, regno);
@@ -2389,6 +2363,15 @@ cselib_record_set (rtx dest, cselib_val *src_elt, cselib_val *dest_addr_elt)
if (REG_P (dest))
{
+ /* Do not record equivalences for the frame pointer, since that is
+ ultimately set from the stack pointer. We need to maintain
+ the invariant (relied on by alias.c) that references to a given
+ region of the stack consistently use the frame pointer or
+ consistently use the stack pointer; we cannot mix the two. */
+ if (dest == hard_frame_pointer_rtx
+ || dest == frame_pointer_rtx)
+ return;
+
unsigned int dreg = REGNO (dest);
if (dreg < FIRST_PSEUDO_REGISTER)
{
diff --git a/gcc/debug.h b/gcc/debug.h
index 19b27848ca8..277d990c20f 100644
--- a/gcc/debug.h
+++ b/gcc/debug.h
@@ -241,8 +241,7 @@ extern void dwarf2out_vms_end_prologue (unsigned int, const char *);
extern void dwarf2out_vms_begin_epilogue (unsigned int, const char *);
extern void dwarf2out_end_epilogue (unsigned int, const char *);
extern void dwarf2out_frame_finish (void);
-/* Decide whether we want to emit frame unwind information for the current
- translation unit. */
+extern bool dwarf2out_do_eh_frame (void);
extern bool dwarf2out_do_frame (void);
extern bool dwarf2out_do_cfi_asm (void);
extern void dwarf2out_switch_text_section (void);
diff --git a/gcc/defaults.h b/gcc/defaults.h
index 768c9879df9..978ec98ffba 100644
--- a/gcc/defaults.h
+++ b/gcc/defaults.h
@@ -170,7 +170,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
do { \
fputs (user_label_prefix, (FILE)); \
fputs ((NAME), (FILE)); \
- } while (0);
+ } while (0)
#endif
/* Allow target to print debug info labels specially. This is useful for
diff --git a/gcc/diagnostic.c b/gcc/diagnostic.c
index 813bca6f65d..86201333d00 100644
--- a/gcc/diagnostic.c
+++ b/gcc/diagnostic.c
@@ -291,6 +291,25 @@ diagnostic_get_color_for_kind (diagnostic_t kind)
return diagnostic_kind_color[kind];
}
+/* Return a formatted line and column ':%line:%column'. Elided if
+ zero. The result is a statically allocated buffer. */
+
+static const char *
+maybe_line_and_column (int line, int col)
+{
+ static char result[32];
+
+ if (line)
+ {
+ size_t l = snprintf (result, sizeof (result),
+ col ? ":%d:%d" : ":%d", line, col);
+ gcc_checking_assert (l < sizeof (result));
+ }
+ else
+ result[0] = 0;
+ return result;
+}
+
/* Return a malloc'd string describing a location e.g. "foo.c:42:10".
The caller is responsible for freeing the memory. */
@@ -301,19 +320,13 @@ diagnostic_get_location_text (diagnostic_context *context,
pretty_printer *pp = context->printer;
const char *locus_cs = colorize_start (pp_show_color (pp), "locus");
const char *locus_ce = colorize_stop (pp_show_color (pp));
+ const char *file = s.file ? s.file : progname;
+ int line = strcmp (file, N_("<built-in>")) ? s.line : 0;
+ int col = context->show_column ? s.column : 0;
- if (s.file == NULL)
- return build_message_string ("%s%s:%s", locus_cs, progname, locus_ce);
-
- if (!strcmp (s.file, N_("<built-in>")))
- return build_message_string ("%s%s:%s", locus_cs, s.file, locus_ce);
-
- if (context->show_column)
- return build_message_string ("%s%s:%d:%d:%s", locus_cs, s.file, s.line,
- s.column, locus_ce);
- else
- return build_message_string ("%s%s:%d:%s", locus_cs, s.file, s.line,
- locus_ce);
+ const char *line_col = maybe_line_and_column (line, col);
+ return build_message_string ("%s%s%s:%s", locus_cs, file,
+ line_col, locus_ce);
}
/* Return a malloc'd string describing a location and the severity of the
@@ -575,21 +588,20 @@ diagnostic_report_current_module (diagnostic_context *context, location_t where)
if (! MAIN_FILE_P (map))
{
map = INCLUDED_FROM (line_table, map);
- if (context->show_column)
- pp_verbatim (context->printer,
- "In file included from %r%s:%d:%d%R", "locus",
- LINEMAP_FILE (map),
- LAST_SOURCE_LINE (map), LAST_SOURCE_COLUMN (map));
- else
- pp_verbatim (context->printer,
- "In file included from %r%s:%d%R", "locus",
- LINEMAP_FILE (map), LAST_SOURCE_LINE (map));
+ const char *line_col
+ = maybe_line_and_column (LAST_SOURCE_LINE (map),
+ context->show_column
+ ? LAST_SOURCE_COLUMN (map) : 0);
+ pp_verbatim (context->printer,
+ "In file included from %r%s%s%R", "locus",
+ LINEMAP_FILE (map), line_col);
while (! MAIN_FILE_P (map))
{
map = INCLUDED_FROM (line_table, map);
+ line_col = maybe_line_and_column (LAST_SOURCE_LINE (map), 0);
pp_verbatim (context->printer,
- ",\n from %r%s:%d%R", "locus",
- LINEMAP_FILE (map), LAST_SOURCE_LINE (map));
+ ",\n from %r%s%s%R", "locus",
+ LINEMAP_FILE (map), line_col);
}
pp_verbatim (context->printer, ":");
pp_newline (context->printer);
@@ -1663,7 +1675,14 @@ test_diagnostic_get_location_text ()
assert_location_text ("PROGNAME:", NULL, 0, 0, true);
assert_location_text ("<built-in>:", "<built-in>", 42, 10, true);
assert_location_text ("foo.c:42:10:", "foo.c", 42, 10, true);
+ assert_location_text ("foo.c:42:", "foo.c", 42, 0, true);
+ assert_location_text ("foo.c:", "foo.c", 0, 10, true);
assert_location_text ("foo.c:42:", "foo.c", 42, 10, false);
+ assert_location_text ("foo.c:", "foo.c", 0, 10, false);
+
+ maybe_line_and_column (INT_MAX, INT_MAX);
+ maybe_line_and_column (INT_MIN, INT_MIN);
+
progname = old_progname;
}
diff --git a/gcc/doc/cpp.texi b/gcc/doc/cpp.texi
index 8cafb6554f8..94437d5403e 100644
--- a/gcc/doc/cpp.texi
+++ b/gcc/doc/cpp.texi
@@ -1675,20 +1675,27 @@ macro. We could define @code{eprintf} like this, instead:
@end smallexample
@noindent
-This formulation looks more descriptive, but unfortunately it is less
-flexible: you must now supply at least one argument after the format
-string. In standard C, you cannot omit the comma separating the named
-argument from the variable arguments. Furthermore, if you leave the
-variable argument empty, you will get a syntax error, because
-there will be an extra comma after the format string.
+This formulation looks more descriptive, but historically it was less
+flexible: you had to supply at least one argument after the format
+string. In standard C, you could not omit the comma separating the
+named argument from the variable arguments. (Note that this
+restriction has been lifted in C++2a, and never existed in GNU C; see
+below.)
+
+Furthermore, if you left the variable argument empty, you would have
+gotten a syntax error, because there would have been an extra comma
+after the format string.
@smallexample
eprintf("success!\n", );
@expansion{} fprintf(stderr, "success!\n", );
@end smallexample
-GNU CPP has a pair of extensions which deal with this problem. First,
-you are allowed to leave the variable argument out entirely:
+This has been fixed in C++2a, and GNU CPP also has a pair of
+extensions which deal with this problem.
+
+First, in GNU CPP, and in C++ beginning in C++2a, you are allowed to
+leave the variable argument out entirely:
@smallexample
eprintf ("success!\n")
@@ -1696,8 +1703,24 @@ eprintf ("success!\n")
@end smallexample
@noindent
-Second, the @samp{##} token paste operator has a special meaning when
-placed between a comma and a variable argument. If you write
+Second, C++2a introduces the @code{@w{__VA_OPT__}} function macro.
+This macro may only appear in the definition of a variadic macro. If
+the variable argument has any tokens, then a @code{@w{__VA_OPT__}}
+invocation expands to its argument; but if the variable argument does
+not have any tokens, the @code{@w{__VA_OPT__}} expands to nothing:
+
+@smallexample
+#define eprintf(format, @dots{}) \\
+ fprintf (stderr, format __VA_OPT__(,) __VA_ARGS__)
+@end smallexample
+
+@code{@w{__VA_OPT__}} is also available in GNU C and GNU C++.
+
+Historically, GNU CPP has also had another extension to handle the
+trailing comma: the @samp{##} token paste operator has a special
+meaning when placed between a comma and a variable argument. Despite
+the introduction of @code{@w{__VA_OPT__}}, this extension remains
+supported in GNU CPP, for backward compatibility. If you write
@smallexample
#define eprintf(format, @dots{}) fprintf (stderr, format, ##__VA_ARGS__)
@@ -1730,6 +1753,9 @@ of macro. It may also be forbidden in open text; the standard is
ambiguous. We recommend you avoid using it except for its defined
purpose.
+Likewise, C++ forbids @code{@w{__VA_OPT__}} anywhere outside the
+replacement list of a variadic macro.
+
Variadic macros became a standard part of the C language with C99.
GNU CPP previously supported them
with a named variable argument
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 8aa443f87fb..711264c132a 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -5969,7 +5969,7 @@ The @code{deprecated} attribute can also be used for functions and
types (@pxref{Common Function Attributes},
@pxref{Common Type Attributes}).
-@item nonstring (@var{nonstring})
+@item nonstring
@cindex @code{nonstring} variable attribute
The @code{nonstring} variable attribute specifies that an object or member
declaration with type array of @code{char} or pointer to @code{char} is
@@ -11684,6 +11684,63 @@ future revisions.
@end deftypefn
+@deftypefn {Built-in Function} @var{type} __builtin_tgmath (@var{functions}, @var{arguments})
+
+The built-in function @code{__builtin_tgmath}, available only for C
+and Objective-C, calls a function determined according to the rules of
+@code{<tgmath.h>} macros. It is intended to be used in
+implementations of that header, so that expansions of macros from that
+header only expand each of their arguments once, to avoid problems
+when calls to such macros are nested inside the arguments of other
+calls to such macros; in addition, it results in better diagnostics
+for invalid calls to @code{<tgmath.h>} macros than implementations
+using other GNU C language features. For example, the @code{pow}
+type-generic macro might be defined as:
+
+@smallexample
+#define pow(a, b) __builtin_tgmath (powf, pow, powl, \
+ cpowf, cpow, cpowl, a, b)
+@end smallexample
+
+The arguments to @code{__builtin_tgmath} are at least two pointers to
+functions, followed by the arguments to the type-generic macro (which
+will be passed as arguments to the selected function). All the
+pointers to functions must be pointers to prototyped functions, none
+of which may have variable arguments, and all of which must have the
+same number of parameters; the number of parameters of the first
+function determines how many arguments to @code{__builtin_tgmath} are
+interpreted as function pointers, and how many as the arguments to the
+called function.
+
+The types of the specified functions must all be different, but
+related to each other in the same way as a set of functions that may
+be selected between by a macro in @code{<tgmath.h>}. This means that
+the functions are parameterized by a floating-point type @var{t},
+different for each such function. The function return types may all
+be the same type, or they may be @var{t} for each function, or they
+may be the real type corresponding to @var{t} for each function (if
+some of the types @var{t} are complex). Likewise, for each parameter
+position, the type of the parameter in that position may always be the
+same type, or may be @var{t} for each function (this case must apply
+for at least one parameter position), or may be the real type
+corresponding to @var{t} for each function.
+
+The standard rules for @code{<tgmath.h>} macros are used to find a
+common type @var{u} from the types of the arguments for parameters
+whose types vary between the functions; complex integer types (a GNU
+extension) are treated like @code{_Complex double} for this purpose.
+If the function return types vary, or are all the same integer type,
+the function called is the one for which @var{t} is @var{u}, and it is
+an error if there is no such function. If the function return types
+are all the same floating-point type, the type-generic macro is taken
+to be one of those from TS 18661 that rounds the result to a narrower
+type; if there is a function for which @var{t} is @var{u}, it is
+called, and otherwise the first function, if any, for which @var{t}
+has at least the range and precision of @var{u} is called, and it is
+an error if there is no such function.
+
+@end deftypefn
+
@deftypefn {Built-in Function} @var{type} __builtin_complex (@var{real}, @var{imag})
The built-in function @code{__builtin_complex} is provided for use in
@@ -15547,36 +15604,19 @@ Additional built-in functions are available for the 64-bit PowerPC
family of processors, for efficient use of 128-bit floating point
(@code{__float128}) values.
-The following floating-point built-in functions are available with
-@code{-mfloat128} and Altivec support. All of them implement the
-function that is part of the name.
+Previous versions of GCC supported some 'q' builtins for IEEE 128-bit
+floating point. These functions are now mapped into the equivalent
+'f128' builtin functions.
@smallexample
-__float128 __builtin_fabsq (__float128)
-__float128 __builtin_copysignq (__float128, __float128)
+__builtin_fabsq is mapped into __builtin_fabsf128
+__builtin_copysignq is mapped into __builtin_copysignf128
+__builtin_infq is mapped into __builtin_inff128
+__builtin_huge_valq is mapped into __builtin_huge_valf128
+__builtin_nanq is mapped into __builtin_nanf128
+__builtin_nansq is mapped into __builtin_nansf128
@end smallexample
-The following built-in functions are available with @code{-mfloat128}
-and Altivec support.
-
-@table @code
-@item __float128 __builtin_infq (void)
-Similar to @code{__builtin_inf}, except the return type is @code{__float128}.
-@findex __builtin_infq
-
-@item __float128 __builtin_huge_valq (void)
-Similar to @code{__builtin_huge_val}, except the return type is @code{__float128}.
-@findex __builtin_huge_valq
-
-@item __float128 __builtin_nanq (void)
-Similar to @code{__builtin_nan}, except the return type is @code{__float128}.
-@findex __builtin_nanq
-
-@item __float128 __builtin_nansq (void)
-Similar to @code{__builtin_nans}, except the return type is @code{__float128}.
-@findex __builtin_nansq
-@end table
-
The following built-in functions are available on Linux 64-bit systems
that use the ISA 3.0 instruction set.
@@ -15892,6 +15932,51 @@ signed int vec_cntlz_lsbb (vector unsigned char);
signed int vec_cnttz_lsbb (vector signed char);
signed int vec_cnttz_lsbb (vector unsigned char);
+unsigned int vec_first_match_index (vector signed char, vector signed char);
+unsigned int vec_first_match_index (vector unsigned char,
+ vector unsigned char);
+unsigned int vec_first_match_index (vector signed int, vector signed int);
+unsigned int vec_first_match_index (vector unsigned int, vector unsigned int);
+unsigned int vec_first_match_index (vector signed short, vector signed short);
+unsigned int vec_first_match_index (vector unsigned short,
+ vector unsigned short);
+unsigned int vec_first_match_or_eos_index (vector signed char,
+ vector signed char);
+unsigned int vec_first_match_or_eos_index (vector unsigned char,
+ vector unsigned char);
+unsigned int vec_first_match_or_eos_index (vector signed int,
+ vector signed int);
+unsigned int vec_first_match_or_eos_index (vector unsigned int,
+ vector unsigned int);
+unsigned int vec_first_match_or_eos_index (vector signed short,
+ vector signed short);
+unsigned int vec_first_match_or_eos_index (vector unsigned short,
+ vector unsigned short);
+unsigned int vec_first_mismatch_index (vector signed char,
+ vector signed char);
+unsigned int vec_first_mismatch_index (vector unsigned char,
+ vector unsigned char);
+unsigned int vec_first_mismatch_index (vector signed int,
+ vector signed int);
+unsigned int vec_first_mismatch_index (vector unsigned int,
+ vector unsigned int);
+unsigned int vec_first_mismatch_index (vector signed short,
+ vector signed short);
+unsigned int vec_first_mismatch_index (vector unsigned short,
+ vector unsigned short);
+unsigned int vec_first_mismatch_or_eos_index (vector signed char,
+ vector signed char);
+unsigned int vec_first_mismatch_or_eos_index (vector unsigned char,
+ vector unsigned char);
+unsigned int vec_first_mismatch_or_eos_index (vector signed int,
+ vector signed int);
+unsigned int vec_first_mismatch_or_eos_index (vector unsigned int,
+ vector unsigned int);
+unsigned int vec_first_mismatch_or_eos_index (vector signed short,
+ vector signed short);
+unsigned int vec_first_mismatch_or_eos_index (vector unsigned short,
+ vector unsigned short);
+
vector unsigned short vec_pack_to_short_fp32 (vector float, vector float);
vector signed char vec_xl_be (signed long long, signed char *);
diff --git a/gcc/doc/gcov.texi b/gcc/doc/gcov.texi
index 5c4ba8a51a7..8bf422e58d8 100644
--- a/gcc/doc/gcov.texi
+++ b/gcc/doc/gcov.texi
@@ -193,7 +193,7 @@ Write counts in human readable format (like 24k).
@smallexample
file:@var{source_file_name}
-function:@var{line_number},@var{execution_count},@var{function_name}
+function:@var{start_line_number},@var{end_line_number},@var{execution_count},@var{function_name}
lcount:@var{line number},@var{execution_count},@var{has_unexecuted_block}
branch:@var{line_number},@var{branch_coverage_type}
@@ -201,24 +201,55 @@ Where the @var{branch_coverage_type} is
notexec (Branch not executed)
taken (Branch executed and taken)
nottaken (Branch executed, but not taken)
+@end smallexample
There can be multiple @var{file} entries in an intermediate gcov
file. All entries following a @var{file} pertain to that source file
-until the next @var{file} entry.
-@end smallexample
+until the next @var{file} entry. If there are multiple functions that
+start on a single line, then corresponding lcount is repeated multiple
+times.
Here is a sample when @option{-i} is used in conjunction with @option{-b} option:
@smallexample
-file:array.cc
-function:11,1,_Z3sumRKSt6vectorIPiSaIS0_EE
-function:22,1,main
-lcount:11,1,0
-lcount:12,1,0
-lcount:14,1,0
-branch:14,taken
-lcount:26,1,0
-branch:28,nottaken
+file:tmp.cpp
+function:7,7,0,_ZN3FooIcEC2Ev
+function:7,7,1,_ZN3FooIiEC2Ev
+function:8,8,0,_ZN3FooIcE3incEv
+function:8,8,2,_ZN3FooIiE3incEv
+function:18,37,1,main
+lcount:7,0,1
+lcount:7,1,0
+lcount:8,0,1
+lcount:8,2,0
+lcount:18,1,0
+lcount:21,1,0
+branch:21,taken
+branch:21,nottaken
+lcount:23,1,0
+branch:23,taken
+branch:23,nottaken
+lcount:24,1,0
+branch:24,taken
+branch:24,nottaken
+lcount:25,1,0
+lcount:27,11,0
+branch:27,taken
+branch:27,taken
+lcount:28,10,0
+lcount:30,1,1
+branch:30,nottaken
+branch:30,taken
+lcount:32,1,0
+branch:32,nottaken
+branch:32,taken
+lcount:33,0,1
+branch:33,notexec
+branch:33,notexec
+lcount:35,1,0
+branch:35,taken
+branch:35,nottaken
+lcount:36,1,0
@end smallexample
@item -k
@@ -343,7 +374,7 @@ marked @samp{$$$$$} or @samp{%%%%%}, depending on whether a basic block
is reachable via non-exceptional or exceptional paths.
Executed basic blocks having a statement with zero @var{execution_count}
end with @samp{*} character and are colored with magenta color with @option{-k}
-option.
+option. The functionality is not supported in Ada.
Note that GCC can completely remove the bodies of functions that are
not needed -- for instance if they are inlined everywhere. Such functions
@@ -391,79 +422,158 @@ source file compiled with @option{-fprofile-arcs}, an accompanying
Running @command{gcov} with your program's source file names as arguments
will now produce a listing of the code along with frequency of execution
-for each line. For example, if your program is called @file{tmp.c}, this
+for each line. For example, if your program is called @file{tmp.cpp}, this
is what you see when you use the basic @command{gcov} facility:
@smallexample
-$ gcc -fprofile-arcs -ftest-coverage tmp.c
+$ g++ -fprofile-arcs -ftest-coverage tmp.cpp
$ a.out
-$ gcov tmp.c
-File 'tmp.c'
-Lines executed:90.00% of 10
-Creating 'tmp.c.gcov'
+$ gcov tmp.cpp -m
+File 'tmp.cpp'
+Lines executed:92.86% of 14
+Creating 'tmp.cpp.gcov'
@end smallexample
-The file @file{tmp.c.gcov} contains output from @command{gcov}.
+The file @file{tmp.cpp.gcov} contains output from @command{gcov}.
Here is a sample:
@smallexample
- -: 0:Source:tmp.c
+ -: 0:Source:tmp.cpp
-: 0:Graph:tmp.gcno
-: 0:Data:tmp.gcda
-: 0:Runs:1
-: 0:Programs:1
-: 1:#include <stdio.h>
-: 2:
- -: 3:int main (void)
- 1: 4:@{
- 1: 5: int i, total;
- -: 6:
- 1: 7: total = 0;
- -: 8:
- 11: 9: for (i = 0; i < 10; i++)
- 10: 10: total += i;
- -: 11:
- 1: 12: if (total != 45)
- #####: 13: printf ("Failure\n");
- -: 14: else
- 1: 15: printf ("Success\n");
- 1: 16: return 0;
- -: 17:@}
+ -: 3:template<class T>
+ -: 4:class Foo
+ -: 5:@{
+ -: 6: public:
+ 1*: 7: Foo(): b (1000) @{@}
+------------------
+Foo<char>::Foo():
+ #####: 7: Foo(): b (1000) @{@}
+------------------
+Foo<int>::Foo():
+ 1: 7: Foo(): b (1000) @{@}
+------------------
+ 2*: 8: void inc () @{ b++; @}
+------------------
+Foo<char>::inc():
+ #####: 8: void inc () @{ b++; @}
+------------------
+Foo<int>::inc():
+ 2: 8: void inc () @{ b++; @}
+------------------
+ -: 9:
+ -: 10: private:
+ -: 11: int b;
+ -: 12:@};
+ -: 13:
+ -: 14:template class Foo<int>;
+ -: 15:template class Foo<char>;
+ -: 16:
+ -: 17:int
+ 1: 18:main (void)
+ -: 19:@{
+ -: 20: int i, total;
+ 1: 21: Foo<int> counter;
+ -: 22:
+ 1: 23: counter.inc();
+ 1: 24: counter.inc();
+ 1: 25: total = 0;
+ -: 26:
+ 11: 27: for (i = 0; i < 10; i++)
+ 10: 28: total += i;
+ -: 29:
+ 1*: 30: int v = total > 100 ? 1 : 2;
+ -: 31:
+ 1: 32: if (total != 45)
+ #####: 33: printf ("Failure\n");
+ -: 34: else
+ 1: 35: printf ("Success\n");
+ 1: 36: return 0;
+ -: 37:@}
@end smallexample
+Note that line 7 is shown in the report multiple times. First occurrence
+presents total number of execution of the line and the next two belong
+to instances of class Foo constructors. As you can also see, line 30 contains
+some unexecuted basic blocks and thus execution count has asterisk symbol.
+
When you use the @option{-a} option, you will get individual block
counts, and the output looks like this:
@smallexample
- -: 0:Source:tmp.c
+ -: 0:Source:tmp.cpp
-: 0:Graph:tmp.gcno
-: 0:Data:tmp.gcda
-: 0:Runs:1
-: 0:Programs:1
-: 1:#include <stdio.h>
-: 2:
- -: 3:int main (void)
- 1: 4:@{
- 1: 4-block 0
- 1: 5: int i, total;
- -: 6:
- 1: 7: total = 0;
- -: 8:
- 11: 9: for (i = 0; i < 10; i++)
- 11: 9-block 0
- 10: 10: total += i;
- 10: 10-block 0
- -: 11:
- 1: 12: if (total != 45)
- 1: 12-block 0
- #####: 13: printf ("Failure\n");
- $$$$$: 13-block 0
- -: 14: else
- 1: 15: printf ("Success\n");
- 1: 15-block 0
- 1: 16: return 0;
- 1: 16-block 0
- -: 17:@}
+ -: 3:template<class T>
+ -: 4:class Foo
+ -: 5:@{
+ -: 6: public:
+ 1*: 7: Foo(): b (1000) @{@}
+------------------
+Foo<char>::Foo():
+ #####: 7: Foo(): b (1000) @{@}
+------------------
+Foo<int>::Foo():
+ 1: 7: Foo(): b (1000) @{@}
+------------------
+ 2*: 8: void inc () @{ b++; @}
+------------------
+Foo<char>::inc():
+ #####: 8: void inc () @{ b++; @}
+------------------
+Foo<int>::inc():
+ 2: 8: void inc () @{ b++; @}
+------------------
+ -: 9:
+ -: 10: private:
+ -: 11: int b;
+ -: 12:@};
+ -: 13:
+ -: 14:template class Foo<int>;
+ -: 15:template class Foo<char>;
+ -: 16:
+ -: 17:int
+ 1: 18:main (void)
+ -: 19:@{
+ -: 20: int i, total;
+ 1: 21: Foo<int> counter;
+ 1: 21-block 0
+ -: 22:
+ 1: 23: counter.inc();
+ 1: 23-block 0
+ 1: 24: counter.inc();
+ 1: 24-block 0
+ 1: 25: total = 0;
+ -: 26:
+ 11: 27: for (i = 0; i < 10; i++)
+ 1: 27-block 0
+ 11: 27-block 1
+ 10: 28: total += i;
+ 10: 28-block 0
+ -: 29:
+ 1*: 30: int v = total > 100 ? 1 : 2;
+ 1: 30-block 0
+ %%%%%: 30-block 1
+ 1: 30-block 2
+ -: 31:
+ 1: 32: if (total != 45)
+ 1: 32-block 0
+ #####: 33: printf ("Failure\n");
+ %%%%%: 33-block 0
+ -: 34: else
+ 1: 35: printf ("Success\n");
+ 1: 35-block 0
+ 1: 36: return 0;
+ 1: 36-block 0
+ -: 37:@}
@end smallexample
In this mode, each basic block is only shown on one line -- the last
@@ -477,53 +587,94 @@ block, the branch and call counts of the block will be shown, if the
Because of the way GCC instruments calls, a call count can be shown
after a line with no individual blocks.
-As you can see, line 13 contains a basic block that was not executed.
+As you can see, line 33 contains a basic block that was not executed.
@need 450
When you use the @option{-b} option, your output looks like this:
@smallexample
-$ gcov -b tmp.c
-File 'tmp.c'
-Lines executed:90.00% of 10
-Branches executed:80.00% of 5
-Taken at least once:80.00% of 5
-Calls executed:50.00% of 2
-Creating 'tmp.c.gcov'
-@end smallexample
-
-Here is a sample of a resulting @file{tmp.c.gcov} file:
-
-@smallexample
- -: 0:Source:tmp.c
+ -: 0:Source:tmp.cpp
-: 0:Graph:tmp.gcno
-: 0:Data:tmp.gcda
-: 0:Runs:1
-: 0:Programs:1
-: 1:#include <stdio.h>
-: 2:
- -: 3:int main (void)
-function main called 1 returned 1 blocks executed 75%
- 1: 4:@{
- 1: 5: int i, total;
- -: 6:
- 1: 7: total = 0;
- -: 8:
- 11: 9: for (i = 0; i < 10; i++)
+ -: 3:template<class T>
+ -: 4:class Foo
+ -: 5:@{
+ -: 6: public:
+ 1*: 7: Foo(): b (1000) @{@}
+------------------
+Foo<char>::Foo():
+function Foo<char>::Foo() called 0 returned 0% blocks executed 0%
+ #####: 7: Foo(): b (1000) @{@}
+------------------
+Foo<int>::Foo():
+function Foo<int>::Foo() called 1 returned 100% blocks executed 100%
+ 1: 7: Foo(): b (1000) @{@}
+------------------
+ 2*: 8: void inc () @{ b++; @}
+------------------
+Foo<char>::inc():
+function Foo<char>::inc() called 0 returned 0% blocks executed 0%
+ #####: 8: void inc () @{ b++; @}
+------------------
+Foo<int>::inc():
+function Foo<int>::inc() called 2 returned 100% blocks executed 100%
+ 2: 8: void inc () @{ b++; @}
+------------------
+ -: 9:
+ -: 10: private:
+ -: 11: int b;
+ -: 12:@};
+ -: 13:
+ -: 14:template class Foo<int>;
+ -: 15:template class Foo<char>;
+ -: 16:
+ -: 17:int
+function main called 1 returned 100% blocks executed 81%
+ 1: 18:main (void)
+ -: 19:@{
+ -: 20: int i, total;
+ 1: 21: Foo<int> counter;
+call 0 returned 100%
+branch 1 taken 100% (fallthrough)
+branch 2 taken 0% (throw)
+ -: 22:
+ 1: 23: counter.inc();
+call 0 returned 100%
+branch 1 taken 100% (fallthrough)
+branch 2 taken 0% (throw)
+ 1: 24: counter.inc();
+call 0 returned 100%
+branch 1 taken 100% (fallthrough)
+branch 2 taken 0% (throw)
+ 1: 25: total = 0;
+ -: 26:
+ 11: 27: for (i = 0; i < 10; i++)
branch 0 taken 91% (fallthrough)
branch 1 taken 9%
- 10: 10: total += i;
- -: 11:
- 1: 12: if (total != 45)
+ 10: 28: total += i;
+ -: 29:
+ 1*: 30: int v = total > 100 ? 1 : 2;
+branch 0 taken 0% (fallthrough)
+branch 1 taken 100%
+ -: 31:
+ 1: 32: if (total != 45)
branch 0 taken 0% (fallthrough)
branch 1 taken 100%
- #####: 13: printf ("Failure\n");
+ #####: 33: printf ("Failure\n");
call 0 never executed
- -: 14: else
- 1: 15: printf ("Success\n");
-call 0 called 1 returned 100%
- 1: 16: return 0;
- -: 17:@}
+branch 1 never executed
+branch 2 never executed
+ -: 34: else
+ 1: 35: printf ("Success\n");
+call 0 returned 100%
+branch 1 taken 100% (fallthrough)
+branch 2 taken 0% (throw)
+ 1: 36: return 0;
+ -: 37:@}
@end smallexample
For each function, a line is printed showing how many times the function
diff --git a/gcc/doc/generic.texi b/gcc/doc/generic.texi
index bb42269ef53..ad26e167cc4 100644
--- a/gcc/doc/generic.texi
+++ b/gcc/doc/generic.texi
@@ -1740,6 +1740,13 @@ a value from @code{enum annot_expr_kind}.
@tindex VEC_PACK_FIX_TRUNC_EXPR
@tindex VEC_COND_EXPR
@tindex SAD_EXPR
+@tindex REDUC_MAX_EXPR
+@tindex REDUC_MIN_EXPR
+@tindex REDUC_PLUS_EXPR
+@tindex REDUC_AND_EXPR
+@tindex REDUC_IOR_EXPR
+@tindex REDUC_XOR_EXPR
+@tindex FOLD_LEFT_PLUS_EXPR
@table @code
@item VEC_DUPLICATE_EXPR
@@ -1841,6 +1848,26 @@ operand must be at lease twice of the size of the vector element of the
first and second one. The SAD is calculated between the first and second
operands, added to the third operand, and returned.
+@item REDUC_MAX_EXPR
+@itemx REDUC_MIN_EXPR
+@itemx REDUC_PLUS_EXPR
+@itemx REDUC_AND_EXPR
+@itemx REDUC_IOR_EXPR
+@itemx REDUC_XOR_EXPR
+These nodes represent operations that take a vector input and repeatedly
+apply a binary operator on pairs of elements until only one scalar remains.
+For example, @samp{REDUC_PLUS_EXPR <@var{x}>} returns the sum of
+the elements in @var{x} and @samp{REDUC_MAX_EXPR <@var{x}>} returns
+the maximum element in @var{x}. The associativity of the operation
+is unspecified; for example, @samp{REDUC_PLUS_EXPR <@var{x}>} could
+sum floating-point @var{x} in forward order, in reverse order,
+using a tree, or in some other way.
+
+@item FOLD_LEFT_PLUS_EXPR
+This node takes two arguments: a scalar of type @var{t} and a vector
+of @var{t}s. It successively adds each element of the vector to the
+scalar and returns the result. The operation is strictly in-order:
+there is no reassociation.
@end table
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 4e96a3942c2..6e6793ebe4f 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -314,7 +314,7 @@ Objective-C and Objective-C++ Dialects}.
-Wsizeof-pointer-memaccess -Wsizeof-array-argument @gol
-Wstack-protector -Wstack-usage=@var{len} -Wstrict-aliasing @gol
-Wstrict-aliasing=n -Wstrict-overflow -Wstrict-overflow=@var{n} @gol
--Wstringop-overflow=@var{n} @gol
+-Wstringop-overflow=@var{n} -Wstringop-truncation @gol
-Wsuggest-attribute=@r{[}pure@r{|}const@r{|}noreturn@r{|}format@r{|}malloc@r{]} @gol
-Wsuggest-final-types @gol -Wsuggest-final-methods -Wsuggest-override @gol
-Wmissing-format-attribute -Wsubobject-linkage @gol
@@ -1204,8 +1204,8 @@ See RS/6000 and PowerPC Options.
-mprefetchwt1 -mclflushopt -mxsavec -mxsaves @gol
-msse4a -m3dnow -m3dnowa -mpopcnt -mabm -mbmi -mtbm -mfma4 -mxop @gol
-mlzcnt -mbmi2 -mfxsr -mxsave -mxsaveopt -mrtm -mlwp -mmpx @gol
--mmwaitx -mclzero -mpku -mthreads @gol
--mcet -mibt -mshstk @gol
+-mmwaitx -mclzero -mpku -mthreads -mgfni @gol
+-mcet -mibt -mshstk -mforce-indirect-call -mavx512vbmi2 @gol
-mms-bitfields -mno-align-stringops -minline-all-stringops @gol
-minline-stringops-dynamically -mstringop-strategy=@var{alg} @gol
-mmemcpy-strategy=@var{strategy} -mmemset-strategy=@var{strategy} @gol
@@ -5214,6 +5214,55 @@ whether to issue a warning. Similarly to @option{-Wstringop-overflow=3} this
setting of the option may result in warnings for benign code.
@end table
+@item -Wstringop-truncation
+@opindex Wstringop-truncation
+@opindex Wno-stringop-truncation
+Warn for calls to bounded string manipulation functions such as @code{strncat},
+@code{strncpy}, and @code{stpncpy} that may either truncate the copied string
+or leave the destination unchanged.
+
+In the following example, the call to @code{strncat} specifies a bound that
+is less than the length of the source string. As a result, the copy of
+the source will be truncated and so the call is diagnosed. To avoid the
+warning use @code{bufsize - strlen (buf) - 1)} as the bound.
+
+@smallexample
+void append (char *buf, size_t bufsize)
+@{
+ strncat (buf, ".txt", 3);
+@}
+@end smallexample
+
+As another example, the following call to @code{strncpy} results in copying
+to @code{d} just the characters preceding the terminating NUL, without
+appending the NUL to the end. Assuming the result of @code{strncpy} is
+necessarily a NUL-terminated string is a common mistake, and so the call
+is diagnosed. To avoid the warning when the result is not expected to be
+NUL-terminated, call @code{memcpy} instead.
+
+@smallexample
+void copy (char *d, const char *s)
+@{
+ strncpy (d, s, strlen (s));
+@}
+@end smallexample
+
+In the following example, the call to @code{strncpy} specifies the size
+of the destination buffer as the bound. If the length of the source
+string is equal to or greater than this size the result of the copy will
+not be NUL-terminated. Therefore, the call is also diagnosed. To avoid
+the warning, specify @code{sizeof buf - 1} as the bound and set the last
+element of the buffer to @code{NUL}.
+
+@smallexample
+void copy (const char *s)
+@{
+ char buf[80];
+ strncpy (buf, s, sizeof buf);
+ @dots{}
+@}
+@end smallexample
+
@item -Wsuggest-attribute=@r{[}pure@r{|}const@r{|}noreturn@r{|}format@r{|}cold@r{|}malloc@r{]}
@opindex Wsuggest-attribute=
@opindex Wno-suggest-attribute=
@@ -6230,11 +6279,26 @@ not an array, but a pointer. This warning is enabled by @option{-Wall}.
@opindex Wsizeof-pointer-memaccess
@opindex Wno-sizeof-pointer-memaccess
Warn for suspicious length parameters to certain string and memory built-in
-functions if the argument uses @code{sizeof}. This warning warns e.g.@:
-about @code{memset (ptr, 0, sizeof (ptr));} if @code{ptr} is not an array,
-but a pointer, and suggests a possible fix, or about
-@code{memcpy (&foo, ptr, sizeof (&foo));}. This warning is enabled by
-@option{-Wall}.
+functions if the argument uses @code{sizeof}. This warning triggers for
+example for @code{memset (ptr, 0, sizeof (ptr));} if @code{ptr} is not
+an array, but a pointer, and suggests a possible fix, or about
+@code{memcpy (&foo, ptr, sizeof (&foo));}. @option{-Wsizeof-pointer-memaccess}
+also warns about calls to bounded string copy functions like @code{strncat}
+or @code{strncpy} that specify as the bound a @code{sizeof} expression of
+the source array. For example, in the following function the call to
+@code{strncat} specifies the size of the source string as the bound. That
+is almost certainly a mistake and so the call is diagnosed.
+@smallexample
+void make_file (const char *name)
+@{
+ char path[PATH_MAX];
+ strncpy (path, name, sizeof path - 1);
+ strncat (path, ".text", sizeof ".text");
+ @dots{}
+@}
+@end smallexample
+
+The @option{-Wsizeof-pointer-memaccess} option is enabled by @option{-Wall}.
@item -Wsizeof-array-argument
@opindex Wsizeof-array-argument
@@ -15493,7 +15557,7 @@ Permissible names are:
@samp{armv6}, @samp{armv6j}, @samp{armv6k}, @samp{armv6kz}, @samp{armv6t2},
@samp{armv6z}, @samp{armv6zk},
@samp{armv7}, @samp{armv7-a}, @samp{armv7ve},
-@samp{armv8-a}, @samp{armv8.1-a}, @samp{armv8.2-a},
+@samp{armv8-a}, @samp{armv8.1-a}, @samp{armv8.2-a}, @samp{armv8.3-a},
@samp{armv7-r},
@samp{armv8-r},
@samp{armv6-m}, @samp{armv6s-m},
@@ -25968,12 +26032,18 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}.
@need 200
@itemx -mcet
@opindex mcet
+@need 200
+@itemx -mavx512vbmi2
+@opindex mavx512vbmi2
+@need 200
+@itemx -mgfni
+@opindex mgfni
These switches enable the use of instructions in the MMX, SSE,
SSE2, SSE3, SSSE3, SSE4.1, AVX, AVX2, AVX512F, AVX512PF, AVX512ER, AVX512CD,
SHA, AES, PCLMUL, FSGSBASE, RDRND, F16C, FMA, SSE4A, FMA4, XOP, LWP, ABM,
-AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA AVX512VBMI, BMI, BMI2, FXSR,
-XSAVE, XSAVEOPT, LZCNT, RTM, MPX, MWAITX, PKU, IBT, SHSTK,
-3DNow!@: or enhanced 3DNow!@: extended instruction sets. Each has a
+AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, BMI, BMI2,
+FXSR, XSAVE, XSAVEOPT, LZCNT, RTM, MPX, MWAITX, PKU, IBT, SHSTK, AVX512VBMI2,
+GFNI, 3DNow!@: or enhanced 3DNow!@: extended instruction sets. Each has a
corresponding @option{-mno-} option to disable use of these instructions.
These extensions are also available as built-in functions: see
@@ -26195,6 +26265,12 @@ You can control this behavior for specific functions by
using the function attributes @code{ms_abi} and @code{sysv_abi}.
@xref{Function Attributes}.
+@item -mforce-indirect-call
+@opindex mforce-indirect-call
+Force all calls to functions to be indirect. This is useful
+when using Intel Processor Trace where it generates more precise timing
+information for function calls.
+
@item -mcall-ms2sysv-xlogues
@opindex mcall-ms2sysv-xlogues
@opindex mno-call-ms2sysv-xlogues
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index e4fed29a95b..02248364359 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -4855,6 +4855,23 @@ loads for vectors of mode @var{n}.
This pattern is not allowed to @code{FAIL}.
+@cindex @code{vec_mask_load_lanes@var{m}@var{n}} instruction pattern
+@item @samp{vec_mask_load_lanes@var{m}@var{n}}
+Like @samp{vec_load_lanes@var{m}@var{n}}, but takes an additional
+mask operand (operand 2) that specifies which elements of the destination
+vectors should be loaded. Other elements of the destination
+vectors are set to zero. The operation is equivalent to:
+
+@smallexample
+int c = GET_MODE_SIZE (@var{m}) / GET_MODE_SIZE (@var{n});
+for (j = 0; j < GET_MODE_NUNITS (@var{n}); j++)
+ if (operand2[j])
+ for (i = 0; i < c; i++)
+ operand0[i][j] = operand1[j * c + i];
+@end smallexample
+
+This pattern is not allowed to @code{FAIL}.
+
@cindex @code{vec_store_lanes@var{m}@var{n}} instruction pattern
@item @samp{vec_store_lanes@var{m}@var{n}}
Equivalent to @samp{vec_load_lanes@var{m}@var{n}}, with the memory
@@ -4872,15 +4889,80 @@ for a memory operand 0 and register operand 1.
This pattern is not allowed to @code{FAIL}.
-@cindex @code{vec_gather_load@var{m}} instruction pattern
-@item @samp{vec_gather_load@var{m}}
-Load several memory locations into a vector. Operand 2 contains a vector of offsets to be loaded consecutively from the base address in Operand 0. Operands 0 and 1 are vectors with mode m. The operation is equivalent to:
+@cindex @code{vec_mask_store_lanes@var{m}@var{n}} instruction pattern
+@item @samp{vec_mask_store_lanes@var{m}@var{n}}
+Like @samp{vec_store_lanes@var{m}@var{n}}, but takes an additional
+mask operand (operand 2) that specifies which elements of the source
+vectors should be stored. The operation is equivalent to:
@smallexample
- for (int i=0; i<count; i++)
- Operand0[i] = Operand1[Operand2[i]];
+int c = GET_MODE_SIZE (@var{m}) / GET_MODE_SIZE (@var{n});
+for (j = 0; j < GET_MODE_NUNITS (@var{n}); j++)
+ if (operand2[j])
+ for (i = 0; i < c; i++)
+ operand0[j * c + i] = operand1[i][j];
@end smallexample
+This pattern is not allowed to @code{FAIL}.
+
+@cindex @code{gather_load@var{m}} instruction pattern
+@item @samp{gather_load@var{m}}
+Load several separate memory locations into a vector of mode @var{m}.
+Operand 1 is a scalar base address and operand 2 is a vector of
+offsets from that base. Operand 0 is a destination vector with the
+same number of elements as the offset. For each element index @var{i}:
+
+@itemize @bullet
+@item
+extend the offset element @var{i} to address width, using zero
+extension if operand 3 is 1 and sign extension if operand 3 is zero;
+@item
+multiply the extended offset by operand 4;
+@item
+add the result to the base; and
+@item
+load the value at that address into element @var{i} of operand 0.
+@end itemize
+
+The value of operand 3 does not matter if the offsets are already
+address width.
+
+@cindex @code{mask_gather_load@var{m}} instruction pattern
+@item @samp{mask_gather_load@var{m}}
+Like @samp{gather_load@var{m}}, but takes an extra mask operand as
+operand 5. Bit @var{i} of the mask is set if element @var{i}
+of the result should be loaded from memory and clear if element @var{i}
+of the result should be set to zero.
+
+@cindex @code{scatter_store@var{m}} instruction pattern
+@item @samp{scatter_store@var{m}}
+Store a vector of mode @var{m} into several distinct memory locations.
+Operand 0 is a scalar base address and operand 1 is a vector of offsets
+from that base. Operand 4 is the vector of values that should be stored,
+which has the same number of elements as the offset. For each element
+index @var{i}:
+
+@itemize @bullet
+@item
+extend the offset element @var{i} to address width, using zero
+extension if operand 2 is 1 and sign extension if operand 2 is zero;
+@item
+multiply the extended offset by operand 3;
+@item
+add the result to the base; and
+@item
+store element @var{i} of operand 4 to that address.
+@end itemize
+
+The value of operand 2 does not matter if the offsets are already
+address width.
+
+@cindex @code{mask_scatter_store@var{m}} instruction pattern
+@item @samp{mask_scatter_store@var{m}}
+Like @samp{scatter_store@var{m}}, but takes an extra mask operand as
+operand 5. Bit @var{i} of the mask is set if element @var{i}
+of the result should be stored to memory.
+
@cindex @code{vec_set@var{m}} instruction pattern
@item @samp{vec_set@var{m}}
Set given field in the vector value. Operand 0 is the vector to modify,
@@ -4927,6 +5009,19 @@ rounding behavior for @var{i} > 1.
This pattern is not allowed to @code{FAIL}.
+@cindex @code{while_ult@var{m}@var{n}} instruction pattern
+@item @code{while_ult@var{m}@var{n}}
+Set operand 0 to a mask that is true while incrementing operand 1
+gives a value that is less than operand 2. Operand 0 has mode @var{n}
+and operands 1 and 2 are scalar integers of mode @var{m}.
+The operation is equivalent to:
+
+@smallexample
+operand0[0] = operand1 < operand2;
+for (i = 1; i < GET_MODE_NUNITS (@var{n}); i++)
+ operand0[i] = operand0[i - 1] && (operand1 + i < operand2);
+@end smallexample
+
@cindex @code{vec_cmp@var{m}@var{n}} instruction pattern
@item @samp{vec_cmp@var{m}@var{n}}
Output a vector comparison. Operand 0 of mode @var{n} is the destination for
@@ -5035,24 +5130,21 @@ This pattern is provided mainly for targets with variable-length vectors.
Targets with fixed-length vectors can instead handle any reverse-specific
optimizations in @samp{vec_perm_const@var{m}}.
-@cindex @code{vec_interleave_hi_@var{m}} instruction pattern
-@item @samp{vec_interleave_hi_@var{m}}
-Take the ``high'' halves of vector input operands 1 and 2 and interleave
-the elements, so that element @var{x} of operand 1 is followed by
+@cindex @code{vec_interleave_lo_@var{m}} instruction pattern
+@item @samp{vec_interleave_lo_@var{m}}
+Take the lowest-indexed halves of vector input operands 1 and 2 and
+interleave the elements, so that element @var{x} of operand 1 is followed by
element @var{x} of operand 2. Store the result in vector output operand 0.
All three operands have mode @var{m}.
-Note that GCC nomenclature follows big-endian conventions, so the
-high half is the half that comes first in memory.
-
This pattern is provided mainly for targets with variable-length
vectors. Targets with fixed-length vectors can instead handle any
interleave-specific optimizations in @samp{vec_perm_const@var{m}}.
-@cindex @code{vec_interleave_lo_@var{m}} instruction pattern
-@item @samp{vec_interleave_lo_@var{m}}
-Like @samp{vec_interleave_hi_@var{m}}, but operate on the low halves
-instead of the high halves.
+@cindex @code{vec_interleave_hi_@var{m}} instruction pattern
+@item @samp{vec_interleave_hi_@var{m}}
+Like @samp{vec_interleave_lo_@var{m}}, but operate on the highest-indexed
+halves instead of the lowest-indexed halves.
@cindex @code{vec_extract_even_@var{m}} instruction pattern
@item @samp{vec_extract_even_@var{m}}
@@ -5234,6 +5326,31 @@ of a vector of mode @var{m}. Operand 1 is the vector input and operand 0
is the scalar result. The mode of the scalar result is the same as one
element of @var{m}.
+@cindex @code{extract_last_@var{m}} instruction pattern
+@item @code{extract_last_@var{m}}
+Find the last set bit in mask operand 1 and extract the associated element
+of vector operand 2. Store the result in scalar operand 0. Operand 2
+has vector mode @var{m} while operand 0 has the mode appropriate for one
+element of @var{m}. Operand 1 has the usual mask mode for vectors of mode
+@var{m}; see @code{TARGET_VECTORIZE_GET_MASK_MODE}.
+
+@cindex @code{fold_extract_last_@var{m}} instruction pattern
+@item @code{fold_extract_last_@var{m}}
+If any bits of mask operand 2 are set, find the last set bit, extract
+the associated element from vector operand 3, and store the result
+in operand 0. Store operand 1 in operand 0 otherwise. Operand 3
+has mode @var{m} and operands 0 and 1 have the mode appropriate for
+one element of @var{m}. Operand 2 has the usual mask mode for vectors
+of mode @var{m}; see @code{TARGET_VECTORIZE_GET_MASK_MODE}.
+
+@cindex @code{fold_left_plus_@var{m}} instruction pattern
+@item @code{fold_left_plus_@var{m}}
+Take scalar operand 1 and successively add each element from vector
+operand 2. Store the result in scalar operand 0. The vector has
+mode @var{m} and the scalars have the mode appropriate for one
+element of @var{m}. The operation is strictly in-order: there is
+no reassociation.
+
@cindex @code{sdot_prod@var{m}} instruction pattern
@item @samp{sdot_prod@var{m}}
@cindex @code{udot_prod@var{m}} instruction pattern
@@ -5267,9 +5384,9 @@ of a wider mode.)
@item @samp{vec_shl_insert_@var{m}}
Shift the elements in vector input operand 1 left one element (i.e.
away from element 0) and fill the vacated element 0 with the scalar
-in operand 2. Store the result in vector output operand 0. @var{m}
-is the mode of operands 0 and 1; operand 2 should have the mode
-appropriate for one element of @var{m}.
+in operand 2. Store the result in vector output operand 0. Operands
+0 and 1 have mode @var{m} and operand 2 has the mode appropriate for
+one element of @var{m}.
@cindex @code{vec_shr_@var{m}} instruction pattern
@item @samp{vec_shr_@var{m}}
@@ -6263,55 +6380,41 @@ move operand 2 or (operands 2 + operand 3) into operand 0 according to the
comparison in operand 1. If the comparison is false, operand 2 is moved into
operand 0, otherwise (operand 2 + operand 3) is moved.
-@item @samp{neg@var{mode}cc}
-Similar to @samp{mov@var{mode}cc} but for conditional negation. Conditionally
-move the negation of operand 2 or the unchanged operand 3 into operand 0
-according to the comparison in operand 1. If the comparison is true, the negation
-of operand 2 is moved into operand 0, otherwise operand 3 is moved.
-
-@cindex @code{not@var{mode}cc} instruction pattern
-@item @samp{not@var{mode}cc}
-Similar to @samp{neg@var{mode}cc} but for conditional complement.
-Conditionally move the bitwise complement of operand 2 or the unchanged
-operand 3 into operand 0 according to the comparison in operand 1.
-If the comparison is true, the complement of operand 2 is moved into
-operand 0, otherwise operand 3 is moved.
-
@cindex @code{cond_add@var{mode}} instruction pattern
+@cindex @code{cond_sub@var{mode}} instruction pattern
+@cindex @code{cond_and@var{mode}} instruction pattern
+@cindex @code{cond_ior@var{mode}} instruction pattern
+@cindex @code{cond_xor@var{mode}} instruction pattern
+@cindex @code{cond_smin@var{mode}} instruction pattern
+@cindex @code{cond_smax@var{mode}} instruction pattern
+@cindex @code{cond_umin@var{mode}} instruction pattern
+@cindex @code{cond_umax@var{mode}} instruction pattern
@item @samp{cond_add@var{mode}}
-When operand 1 is true, add operand 2 and operand 3 and store the result
-in operand 0. When operand 1 is false, ignore operand 3 and store
-operand 2 in operand 0. The operands may be scalars or vectors;
-if they are vectors, all operands have the same number of elements and
-the instruction performs an elementwise conditional addition of the form:
+@itemx @samp{cond_sub@var{mode}}
+@itemx @samp{cond_and@var{mode}}
+@itemx @samp{cond_ior@var{mode}}
+@itemx @samp{cond_xor@var{mode}}
+@itemx @samp{cond_smin@var{mode}}
+@itemx @samp{cond_smax@var{mode}}
+@itemx @samp{cond_umin@var{mode}}
+@itemx @samp{cond_umax@var{mode}}
+Perform an elementwise operation on vector operands 2 and 3,
+under the control of the vector mask in operand 1, and store the result
+in operand 0. This is equivalent to:
+
@smallexample
-op0[I] = op1[I] ? op2[I] + op3[I] : op2[I];
+for (i = 0; i < GET_MODE_NUNITS (@var{n}); i++)
+ op0[i] = op1[i] ? op2[i] @var{op} op3[i] : op2[i];
@end smallexample
-In this case, operand 1 must have the same mode as the type
-returned by @code{TARGET_VECTORIZE_GET_MASK_MODE}.
-The difference between this pattern and @code{add@var{mode}3} is that
-operand 1 of @code{add@var{mode}3} is a comparison operation while
-operand 1 of @code{cond_add@var{mode}} is a register, memory reference
-or constant.
+where, for example, @var{op} is @code{+} for @samp{cond_add@var{mode}}.
-@cindex @code{cond_sub@var{mode}} instruction pattern
-@cindex @code{cond_and@var{m}} instruction pattern
-@cindex @code{cond_ior@var{m}} instruction pattern
-@cindex @code{cond_xor@var{m}} instruction pattern
-@cindex @code{cond_smin@var{m}} instruction pattern
-@cindex @code{cond_smax@var{m}} instruction pattern
-@cindex @code{cond_umin@var{m}} instruction pattern
-@cindex @code{cond_umax@var{m}} instruction pattern
-@item @samp{cond_sub@var{mode}}
-@itemx @samp{cond_and@var{m}}
-@itemx @samp{cond_ior@var{m}}
-@itemx @samp{cond_xor@var{m}}
-@itemx @samp{cond_smin@var{m}}
-@itemx @samp{cond_smax@var{m}}
-@itemx @samp{cond_umin@var{m}}
-@itemx @samp{cond_umax@var{m}}
-Similar to @samp{cond_add@var{m}}, for other binary arithmetic operations.
+When defined for floating-point modes, the contents of @samp{op3[i]}
+are not interpreted if @var{op1[i]} is false, just like they would not
+be in a normal C @samp{?:} condition.
+
+Operands 0, 2 and 3 all have mode @var{m}, while operand 1 has the mode
+returned by @code{TARGET_VECTORIZE_GET_MASK_MODE}.
@cindex @code{cond_fma_rev@var{mode}} instruction pattern
@item @samp{cond_fma_rev@var{mode}}
@@ -6339,6 +6442,21 @@ op0[I] = op1[I] ? fma (-op3[I], op4[I], op2[I]) : op2[I];
@end smallexample
for vectors.
+@cindex @code{neg@var{mode}cc} instruction pattern
+@item @samp{neg@var{mode}cc}
+Similar to @samp{mov@var{mode}cc} but for conditional negation. Conditionally
+move the negation of operand 2 or the unchanged operand 3 into operand 0
+according to the comparison in operand 1. If the comparison is true, the negation
+of operand 2 is moved into operand 0, otherwise operand 3 is moved.
+
+@cindex @code{not@var{mode}cc} instruction pattern
+@item @samp{not@var{mode}cc}
+Similar to @samp{neg@var{mode}cc} but for conditional complement.
+Conditionally move the bitwise complement of operand 2 or the unchanged
+operand 3 into operand 0 according to the comparison in operand 1.
+If the comparison is true, the complement of operand 2 is moved into
+operand 0, otherwise operand 3 is moved.
+
@cindex @code{cstore@var{mode}4} instruction pattern
@item @samp{cstore@var{mode}4}
Store zero or nonzero in operand 0 according to whether a comparison
@@ -9779,7 +9897,7 @@ their result is ready in two cycles. The simple integer insns are
issued into the first pipeline unless it is reserved, otherwise they
are issued into the second pipeline. Integer division and
multiplication insns can be executed only in the second integer
-pipeline and their results are ready correspondingly in 8 and 4
+pipeline and their results are ready correspondingly in 9 and 4
cycles. The integer division is not pipelined, i.e.@: the subsequent
integer division insn can not be issued until the current division
insn finished. Floating point insns are fully pipelined and their
@@ -9796,7 +9914,7 @@ incurred. To describe all of this we could specify
(define_insn_reservation "mult" 4 (eq_attr "type" "mult")
"i1_pipeline, nothing*2, (port0 | port1)")
-(define_insn_reservation "div" 8 (eq_attr "type" "div")
+(define_insn_reservation "div" 9 (eq_attr "type" "div")
"i1_pipeline, div*7, div + (port0 | port1)")
(define_insn_reservation "float" 3 (eq_attr "type" "float")
diff --git a/gcc/doc/rtl.texi b/gcc/doc/rtl.texi
index f583940b944..9b73231ac21 100644
--- a/gcc/doc/rtl.texi
+++ b/gcc/doc/rtl.texi
@@ -1741,7 +1741,8 @@ low-level routines) and @code{const_poly_int_value} gives the full
@item (const_vector:@var{m} [@var{x0} @var{x1} @dots{}])
Represents a vector constant. The square brackets stand for the vector
containing the constant elements. @var{x0}, @var{x1} and so on are
-the @code{const_int}, @code{const_double} or @code{const_fixed} elements.
+the @code{const_int}, @code{const_wide_int}, @code{const_double} or
+@code{const_fixed} elements.
The number of units in a @code{const_vector} is obtained with the macro
@code{CONST_VECTOR_NUNITS} as in @code{CONST_VECTOR_NUNITS (@var{v})}.
diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
index 390bfcacccd..24e591ce1b7 100644
--- a/gcc/doc/sourcebuild.texi
+++ b/gcc/doc/sourcebuild.texi
@@ -1403,9 +1403,16 @@ Target supports hardware vectors of @code{long}.
@item vect_long_long
Target supports hardware vectors of @code{long long}.
+@item vect_fully_masked
+Target supports fully-masked (also known as fully-predicated) loops,
+so that vector loops can handle partial as well as full vectors.
+
@item vect_masked_store
Target supports vector masked stores.
+@item vect_scatter_store
+Target supports vector scatter stores.
+
@item vect_aligned_arrays
Target aligns arrays to vector alignment boundary.
@@ -1573,6 +1580,12 @@ Target supports 32- and 16-bytes vectors.
@item vect_logical_reduc
Target supports AND, IOR and XOR reduction on vectors.
+
+@item vect_fold_extract_last
+Target supports the @code{fold_extract_last} optab.
+
+@item vect_fold_left_plus
+Target supports the @code{fold_left_plus} optab.
@end table
@subsubsection Thread Local Storage attributes
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index acadc73667e..95cfd45617a 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -2774,6 +2774,17 @@ details.
With LRA, the default is to use @var{mode} unmodified.
@end deftypefn
+@deftypefn {Target Hook} void TARGET_SELECT_EARLY_REMAT_MODES (sbitmap @var{modes})
+On some targets, certain modes cannot be held in registers around a
+standard ABI call and are relatively expensive to spill to the stack.
+The early rematerialization pass can help in such cases by aggressively
+recomputing values after calls, so that they don't need to be spilled.
+
+This hook returns the set of such modes by setting the associated bits
+in @var{modes}. The default implementation selects no modes, which has
+the effect of disabling the early rematerialization pass.
+@end deftypefn
+
@deftypefn {Target Hook} bool TARGET_CLASS_LIKELY_SPILLED_P (reg_class_t @var{rclass})
A target hook which returns @code{true} if pseudos that have been assigned
to registers of class @var{rclass} would likely be spilled because
@@ -4249,14 +4260,15 @@ must have move patterns for this mode.
@deftypefn {Target Hook} opt_machine_mode TARGET_ARRAY_MODE (machine_mode @var{mode}, unsigned HOST_WIDE_INT @var{nelems})
Return the mode that GCC should use for an array that has
@var{nelems} elements, with each element having mode @var{mode}.
-Return @code{BLKmode} if an integer mode of the appropriate size should
-be used; it is the caller's reponsibility to find such a mode. Usually the
-search for the integer mode is limited to @code{MAX_FIXED_MODE_SIZE},
-but the @code{TARGET_ARRAY_MODE_SUPPORTED_P} hook allows a larger
-mode to be used in specific cases.
+Return no mode if the target has no special requirements. In the
+latter case, GCC looks for an integer mode of the appropriate size
+if available and uses BLKmode otherwise. Usually the search for the
+integer mode is limited to @code{MAX_FIXED_MODE_SIZE}, but the
+@code{TARGET_ARRAY_MODE_SUPPORTED_P} hook allows a larger mode to be
+used in specific cases.
The main use of this hook is to specify that an array of vectors should
-also have a vector mode. The default implementation returns @code{BLKmode}.
+also have a vector mode. The default implementation returns no mode.
@end deftypefn
@deftypefn {Target Hook} bool TARGET_ARRAY_MODE_SUPPORTED_P (machine_mode @var{mode}, unsigned HOST_WIDE_INT @var{nelems})
@@ -11861,13 +11873,6 @@ If defined, this function returns an appropriate alignment in bits for an atomic
ISO C11 requires atomic compound assignments that may raise floating-point exceptions to raise exceptions corresponding to the arithmetic operation whose result was successfully stored in a compare-and-exchange sequence. This requires code equivalent to calls to @code{feholdexcept}, @code{feclearexcept} and @code{feupdateenv} to be generated at appropriate points in the compare-and-exchange sequence. This hook should set @code{*@var{hold}} to an expression equivalent to the call to @code{feholdexcept}, @code{*@var{clear}} to an expression equivalent to the call to @code{feclearexcept} and @code{*@var{update}} to an expression equivalent to the call to @code{feupdateenv}. The three expressions are @code{NULL_TREE} on entry to the hook and may be left as @code{NULL_TREE} if no code is required in a particular place. The default implementation leaves all three expressions as @code{NULL_TREE}. The @code{__atomic_feraiseexcept} function from @code{libatomic} may be of use as part of the code generated in @code{*@var{update}}.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_GATHER_SCATTER_SUPPORTS_SCALE_P (bool @var{gather_p}, unsigned int @var{offset_bitsize}, unsigned int @var{scale})
-Return true if it is possible to plant a gather load or scatter store, with
-@var{gather_p} choosing between them. @var{offset_bitsize} if the size in bits
-of the offset type and @var{scale} is the amount by which the offset is
-multiplied.
-@end deftypefn
-
@deftypefn {Target Hook} void TARGET_RECORD_OFFLOAD_SYMBOL (tree)
Used when offloaded functions are seen in the compilation unit and no named
sections are available. It is called once for each symbol that must be
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 7cbce20b877..590baa1b5f1 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -2307,6 +2307,8 @@ Do not define this macro if you do not define
@hook TARGET_SECONDARY_MEMORY_NEEDED_MODE
+@hook TARGET_SELECT_EARLY_REMAT_MODES
+
@hook TARGET_CLASS_LIKELY_SPILLED_P
@hook TARGET_CLASS_MAX_NREGS
@@ -8025,8 +8027,6 @@ and the associated definitions of those functions.
@hook TARGET_ATOMIC_ASSIGN_EXPAND_FENV
-@hook TARGET_GATHER_SCATTER_SUPPORTS_SCALE_P
-
@hook TARGET_RECORD_OFFLOAD_SYMBOL
@hook TARGET_OFFLOAD_OPTIONS
diff --git a/gcc/dumpfile.h b/gcc/dumpfile.h
index 5df3bdec80e..910452b8370 100644
--- a/gcc/dumpfile.h
+++ b/gcc/dumpfile.h
@@ -93,6 +93,7 @@ enum dump_kind
#define MSG_NOTE (1 << 24) /* general optimization info */
#define MSG_ALL (MSG_OPTIMIZED_LOCATIONS | MSG_MISSED_OPTIMIZATION \
| MSG_NOTE)
+#define TDF_COMPARE_DEBUG (1 << 25) /* Dumping for -fcompare-debug. */
/* Value of TDF_NONE is used just for bits filtered by TDF_KIND_MASK. */
diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c
index ce1f1a21124..8d3fb6e59dd 100644
--- a/gcc/dwarf2cfi.c
+++ b/gcc/dwarf2cfi.c
@@ -3426,6 +3426,17 @@ debug_cfi_row (dw_cfi_row *row)
This variable is tri-state, with 0 unset, >0 true, <0 false. */
static GTY(()) signed char saved_do_cfi_asm = 0;
+/* Decide whether to emit EH frame unwind information for the current
+ translation unit. */
+
+bool
+dwarf2out_do_eh_frame (void)
+{
+ return
+ (flag_unwind_tables || flag_exceptions)
+ && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
+}
+
/* Decide whether we want to emit frame unwind information for the current
translation unit. */
@@ -3444,8 +3455,7 @@ dwarf2out_do_frame (void)
if (targetm.debug_unwind_info () == UI_DWARF2)
return true;
- if ((flag_unwind_tables || flag_exceptions)
- && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
+ if (dwarf2out_do_eh_frame ())
return true;
return false;
@@ -3480,9 +3490,7 @@ dwarf2out_do_cfi_asm (void)
/* If we can't get the assembler to emit only .debug_frame, and we don't need
dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
- if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
- && !flag_unwind_tables && !flag_exceptions
- && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
+ if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
return false;
/* Success! */
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index c0f93d763f5..845786b96d2 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -263,7 +263,6 @@ static GTY(()) int dw2_string_counter;
static GTY(()) bool have_multiple_function_sections = false;
/* Whether the default text and cold text sections have been used at all. */
-
static GTY(()) bool text_section_used = false;
static GTY(()) bool cold_text_section_used = false;
@@ -285,6 +284,9 @@ static void dwarf2out_note_section_used (void);
personality CFI. */
static GTY(()) rtx current_unit_personality;
+/* Whether an eh_frame section is required. */
+static GTY(()) bool do_eh_frame = false;
+
/* .debug_rnglists next index. */
static unsigned int rnglist_idx;
@@ -1063,10 +1065,14 @@ dwarf2out_begin_prologue (unsigned int line ATTRIBUTE_UNUSED,
dup_label = xstrdup (label);
current_function_func_begin_label = dup_label;
- /* We can elide the fde allocation if we're not emitting debug info. */
+ /* We can elide FDE allocation if we're not emitting frame unwind info. */
if (!do_frame)
return;
+ /* Unlike the debug version, the EH version of frame unwind info is a per-
+ function setting so we need to record whether we need it for the unit. */
+ do_eh_frame |= dwarf2out_do_eh_frame ();
+
/* Cater to the various TARGET_ASM_OUTPUT_MI_THUNK implementations that
emit insns as rtx but bypass the bulk of rest_of_compilation, which
would include pass_dwarf2_frame. If we've not created the FDE yet,
@@ -1183,8 +1189,7 @@ dwarf2out_frame_finish (void)
output_call_frame_info (0);
/* Output another copy for the unwinder. */
- if ((flag_unwind_tables || flag_exceptions)
- && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
+ if (do_eh_frame)
output_call_frame_info (1);
}
@@ -12496,7 +12501,8 @@ modified_type_die (tree type, int cv_quals, bool reverse,
dw_die_ref mod_scope;
/* Only these cv-qualifiers are currently handled. */
const int cv_qual_mask = (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE
- | TYPE_QUAL_RESTRICT | TYPE_QUAL_ATOMIC);
+ | TYPE_QUAL_RESTRICT | TYPE_QUAL_ATOMIC |
+ ENCODE_QUAL_ADDR_SPACE(~0U));
const bool reverse_base_type
= need_endianity_attribute_p (reverse) && is_base_type (type);
@@ -13859,10 +13865,14 @@ const_ok_for_output_1 (rtx rtl)
We should really identify / validate expressions
enclosed in CONST that can be handled by assemblers on various
targets and only handle legitimate cases here. */
- if (GET_CODE (rtl) != SYMBOL_REF)
+ switch (GET_CODE (rtl))
{
- if (GET_CODE (rtl) == NOT)
- return false;
+ case SYMBOL_REF:
+ break;
+ case NOT:
+ case NEG:
+ return false;
+ default:
return true;
}
@@ -15037,8 +15047,32 @@ mem_loc_descriptor (rtx rtl, machine_mode mode,
if (!const_ok_for_output (rtl))
{
if (GET_CODE (rtl) == CONST)
- mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0), int_mode,
- mem_mode, initialized);
+ switch (GET_CODE (XEXP (rtl, 0)))
+ {
+ case NOT:
+ op = DW_OP_not;
+ goto try_const_unop;
+ case NEG:
+ op = DW_OP_neg;
+ goto try_const_unop;
+ try_const_unop:
+ rtx arg;
+ arg = XEXP (XEXP (rtl, 0), 0);
+ if (!CONSTANT_P (arg))
+ arg = gen_rtx_CONST (int_mode, arg);
+ op0 = mem_loc_descriptor (arg, int_mode, mem_mode,
+ initialized);
+ if (op0)
+ {
+ mem_loc_result = op0;
+ add_loc_descr (&mem_loc_result, new_loc_descr (op, 0, 0));
+ }
+ break;
+ default:
+ mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0), int_mode,
+ mem_mode, initialized);
+ break;
+ }
break;
}
@@ -20815,7 +20849,7 @@ add_type_attribute (dw_die_ref object_die, tree type, int cv_quals,
return;
type_die = modified_type_die (type,
- cv_quals | TYPE_QUALS_NO_ADDR_SPACE (type),
+ cv_quals | TYPE_QUALS (type),
reverse,
context_die);
@@ -23481,6 +23515,7 @@ gen_producer_string (void)
case OPT_fltrans_output_list_:
case OPT_fresolution_:
case OPT_fdebug_prefix_map_:
+ case OPT_fcompare_debug:
/* Ignore these. */
continue;
default:
@@ -27600,8 +27635,7 @@ dwarf2out_assembly_start (void)
if (HAVE_GAS_CFI_SECTIONS_DIRECTIVE
&& dwarf2out_do_cfi_asm ()
- && (!(flag_unwind_tables || flag_exceptions)
- || targetm_common.except_unwind_info (&global_options) != UI_DWARF2))
+ && !dwarf2out_do_eh_frame ())
fprintf (asm_out_file, "\t.cfi_sections\t.debug_frame\n");
}
diff --git a/gcc/early-remat.c b/gcc/early-remat.c
index 94e87d96ffe..196a7d58384 100644
--- a/gcc/early-remat.c
+++ b/gcc/early-remat.c
@@ -399,7 +399,7 @@ struct remat_candidate_hasher : nofree_ptr_hash <remat_candidate>
/* Main class for this pass. */
class early_remat {
public:
- early_remat (function *);
+ early_remat (function *, sbitmap);
~early_remat ();
void run (void);
@@ -461,6 +461,9 @@ private:
/* The function that we're optimizing. */
function *m_fn;
+ /* The modes that we want to rematerialize. */
+ sbitmap m_selected_modes;
+
/* All rematerialization candidates, identified by their index into the
vector. */
auto_vec<remat_candidate> m_candidates;
@@ -760,6 +763,10 @@ early_remat::interesting_regno_p (unsigned int regno)
if (!reg || DF_REG_DEF_COUNT (regno) == 0)
return false;
+ /* Make sure the register has a mode that we want to rematerialize. */
+ if (!bitmap_bit_p (m_selected_modes, GET_MODE (reg)))
+ return false;
+
/* Ignore values that might sometimes be used uninitialized. We could
instead add dummy candidates for the entry block definition, and so
handle uses that are definitely not uninitialized, but the combination
@@ -767,10 +774,6 @@ early_remat::interesting_regno_p (unsigned int regno)
if (bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (m_fn)), regno))
return false;
- /* At present we only rematerialize variable-sized registers. */
- if (GET_MODE_SIZE (GET_MODE (reg)).is_constant ())
- return false;
-
return true;
}
@@ -2542,8 +2545,9 @@ early_remat::run (void)
global_phase ();
}
-early_remat::early_remat (function *fn)
+early_remat::early_remat (function *fn, sbitmap selected_modes)
: m_fn (fn),
+ m_selected_modes (selected_modes),
m_available (0),
m_required (0),
m_value_table (63)
@@ -2588,7 +2592,11 @@ public:
virtual unsigned int execute (function *f)
{
- early_remat (f).run ();
+ auto_sbitmap selected_modes (NUM_MACHINE_MODES);
+ bitmap_clear (selected_modes);
+ targetm.select_early_remat_modes (selected_modes);
+ if (!bitmap_empty_p (selected_modes))
+ early_remat (f, selected_modes).run ();
return 0;
}
}; // class pass_early_remat
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index af4a038d75a..316aaf3a8a4 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -5919,6 +5919,17 @@ gen_rtx_CONST (machine_mode mode, rtx val)
return gen_rtx_raw_CONST (mode, val);
}
+/* Return true if X is a valid element for a duplicated vector constant
+ of the given mode. */
+
+bool
+valid_for_const_vec_duplicate_p (machine_mode, rtx x)
+{
+ return (CONST_SCALAR_INT_P (x)
+ || CONST_DOUBLE_AS_FLOAT_P (x)
+ || CONST_FIXED_P (x));
+}
+
/* Temporary rtx used by gen_const_vec_duplicate_1. */
static GTY((deletable)) rtx spare_vec_duplicate;
@@ -5988,7 +5999,7 @@ gen_const_vec_duplicate (machine_mode mode, rtx elt)
rtx
gen_vec_duplicate (machine_mode mode, rtx x)
{
- if (CONSTANT_P (x))
+ if (valid_for_const_vec_duplicate_p (mode, x))
return gen_const_vec_duplicate (mode, x);
return gen_rtx_VEC_DUPLICATE (mode, x);
}
@@ -6171,6 +6182,8 @@ init_emit_regs (void)
}
mode_mem_attrs[i] = attrs;
}
+
+ split_branch_probability = profile_probability::uninitialized ();
}
/* Initialize global machine_mode variables. */
@@ -6625,7 +6638,16 @@ need_atomic_barrier_p (enum memmodel model, bool pre)
rtx
gen_int_shift_amount (machine_mode mode, poly_int64 value)
{
- return gen_int_mode (value, get_shift_amount_mode (mode));
+ /* ??? Using the inner mode should be wide enough for all useful
+ cases (e.g. QImode usually has 8 shiftable bits, while a QImode
+ shift amount has a range of [-128, 127]). But in principle
+ a target could require target-dependent behaviour for a
+ shift whose shift amount is wider than the shifted value.
+ Perhaps this should be automatically derived from the .md
+ files instead, or perhaps have a target hook. */
+ scalar_int_mode shift_mode
+ = int_mode_for_mode (GET_MODE_INNER (mode)).require ();
+ return gen_int_mode (value, shift_mode);
}
/* Initialize fields of rtl_data related to stack alignment. */
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index ac1f5ce1ede..b219762d279 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -439,6 +439,7 @@ get_max_uid (void)
return crtl->emit.x_cur_insn_uid;
}
+extern bool valid_for_const_vec_duplicate_p (machine_mode, rtx);
extern rtx gen_const_vec_duplicate (machine_mode, rtx);
extern rtx gen_vec_duplicate (machine_mode, rtx);
diff --git a/gcc/explow.c b/gcc/explow.c
index 4c99d4e2871..b31e3751c6f 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -2011,6 +2011,13 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
if (size != CONST0_RTX (Pmode)
&& targetm.stack_clash_protection_final_dynamic_probe (residual))
{
+ /* SIZE could be zero at runtime and in that case *sp could hold
+ live data. Furthermore, we don't want to probe into the red
+ zone.
+
+ Go ahead and just guard a probe at *sp on SIZE != 0 at runtime
+ if SIZE is not a compile time constant. */
+
/* Ideally we would just probe at *sp. However, if SIZE is not
a compile-time constant, but is zero at runtime, then *sp
might hold live data. So probe at *sp if we know that
@@ -2023,9 +2030,12 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
}
else
{
- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
- -GET_MODE_SIZE (word_mode)));
+ rtx label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (size, CONST0_RTX (GET_MODE (size)),
+ EQ, NULL_RTX, Pmode, 1, label);
+ emit_stack_probe (stack_pointer_rtx);
emit_insn (gen_blockage ());
+ emit_label (label);
}
}
}
diff --git a/gcc/expmed.c b/gcc/expmed.c
index 4565bcce99e..fa8972dd177 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -3397,7 +3397,7 @@ expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
rtx
expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
- int unsignedp)
+ int unsignedp, bool no_libcall)
{
enum mult_variant variant;
struct algorithm algorithm;
@@ -3533,14 +3533,16 @@ expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
{
op0 = force_reg (GET_MODE (op0), op0);
return expand_binop (mode, add_optab, op0, op0,
- target, unsignedp, OPTAB_LIB_WIDEN);
+ target, unsignedp,
+ no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
}
/* This used to use umul_optab if unsigned, but for non-widening multiply
there is no difference between signed and unsigned. */
op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
- op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
- gcc_assert (op0);
+ op0, op1, target, unsignedp,
+ no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
+ gcc_assert (op0 || no_libcall);
return op0;
}
diff --git a/gcc/expmed.h b/gcc/expmed.h
index 708d23a3617..c806cd60e95 100644
--- a/gcc/expmed.h
+++ b/gcc/expmed.h
@@ -724,7 +724,7 @@ extern void store_bit_field (rtx, poly_uint64, poly_uint64,
extern rtx extract_bit_field (rtx, poly_uint64, poly_uint64, int, rtx,
machine_mode, machine_mode, bool, rtx *);
extern rtx extract_low_bits (machine_mode, machine_mode, rtx);
-extern rtx expand_mult (machine_mode, rtx, rtx, rtx, int);
+extern rtx expand_mult (machine_mode, rtx, rtx, rtx, int, bool = false);
extern rtx expand_mult_highpart_adjust (scalar_int_mode, rtx, rtx, rtx,
rtx, int);
diff --git a/gcc/expr.c b/gcc/expr.c
index 31ff5e188e1..799c9e393cc 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -9377,7 +9377,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
return target;
}
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
{
op0 = expand_normal (treeop0);
op1 = expand_normal (treeop1);
@@ -9392,12 +9392,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
create_input_operand (&ops[1], op0, mode);
create_input_operand (&ops[2], op1, vec_mode);
if (maybe_expand_insn (icode, 3, ops))
- {
- target = ops[0].value;
- if (GET_MODE (target) != mode)
- return gen_lowpart (tmode, target);
- return target;
- }
+ return ops[0].value;
}
/* Nothing to fall back to. */
diff --git a/gcc/final.c b/gcc/final.c
index 3a127d9e7e7..e3708c2801e 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -4631,7 +4631,7 @@ rest_of_clean_state (void)
{
flag_dump_noaddr = flag_dump_unnumbered = 1;
if (flag_compare_debug_opt || flag_compare_debug)
- dump_flags |= TDF_NOUID;
+ dump_flags |= TDF_NOUID | TDF_COMPARE_DEBUG;
dump_function_header (final_output, current_function_decl,
dump_flags);
final_insns_dump_p = true;
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index e92b5efed4a..f232b5a6d2a 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -435,7 +435,7 @@ negate_expr_p (tree t)
case PLUS_EXPR:
if (HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
|| HONOR_SIGNED_ZEROS (element_mode (type))
- || (INTEGRAL_TYPE_P (type)
+ || (ANY_INTEGRAL_TYPE_P (type)
&& ! TYPE_OVERFLOW_WRAPS (type)))
return false;
/* -(A + B) -> (-B) - A. */
@@ -448,7 +448,7 @@ negate_expr_p (tree t)
/* We can't turn -(A-B) into B-A when we honor signed zeros. */
return !HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
&& !HONOR_SIGNED_ZEROS (element_mode (type))
- && (! INTEGRAL_TYPE_P (type)
+ && (! ANY_INTEGRAL_TYPE_P (type)
|| TYPE_OVERFLOW_WRAPS (type));
case MULT_EXPR:
@@ -1604,14 +1604,14 @@ const_binop (enum tree_code code, tree arg1, tree arg2)
return build_vector_from_val (TREE_TYPE (arg1), sub);
}
- if ((TREE_CODE (arg1) == INTEGER_CST || TREE_CODE (arg1) == REAL_CST)
+ if (CONSTANT_CLASS_P (arg1)
&& TREE_CODE (arg2) == VECTOR_CST)
{
tree_code subcode;
switch (code)
{
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
subcode = PLUS_EXPR;
break;
default:
@@ -9425,7 +9425,7 @@ fold_binary_loc (location_t loc,
if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
|| code == EQ_EXPR || code == NE_EXPR)
- && TREE_CODE (TREE_TYPE (arg0)) != VECTOR_TYPE
+ && !VECTOR_TYPE_P (TREE_TYPE (arg0))
&& ((truth_value_p (TREE_CODE (arg0))
&& (truth_value_p (TREE_CODE (arg1))
|| (TREE_CODE (arg1) == BIT_AND_EXPR
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index aa43ff4ebff..d3170c7370a 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,276 @@
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ * options.c (gfc_post_options):
+ Do not set default value of warn_return_type.
+ * trans-decl.c (gfc_trans_deferred_vars):
+ Compare warn_return_type for greater than zero.
+ (generate_local_decl): Likewise
+ (gfc_generate_function_code): Likewise.
+
+2017-11-13 Fritz Reese <fritzoreese@gmail.com>
+
+ PR fortran/78240
+ * decl.c (match_clist_expr): Replace gcc_assert with proper
+ handling of bad result from spec_size().
+ * resolve.c (check_data_variable): Avoid NULL dereference when passing
+ locus to gfc_error.
+
+2017-11-11 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/82932
+ * resolve.c (update_compcall_arglist): Improve error recovery,
+ remove a gcc_assert.
+
+2017-11-10 Fritz Reese <fritzoreese@gmail.com>
+
+ PR fortran/82886
+ * gfortran.h (gfc_build_init_expr): New prototype.
+ * invoke.texi (finit-derived): Update documentation.
+ * expr.c (gfc_build_init_expr): New, from gfc_build_default_init_expr.
+ (gfc_build_default_init_expr): Redirect to gfc_build_init_expr(,,false)
+ (component_initializer): Force building initializers using
+ gfc_build_init_expr(,,true).
+
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * gcc/fortran/decl.c (build_sym): Use strcpy instead of strncpy.
+
+2017-11-10 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/82934
+ * trans-stmt.c (gfc_trans_allocate): Remove the gcc_assert on
+ null string length for assumed length typespec and set
+ expr3_esize to NULL_TREE;
+
+2017-11-09 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/78619
+ * check.c (same_type_check): Introduce a new argument 'assoc'
+ with default value false. If this is true, use the symbol type
+ spec of BT_PROCEDURE expressions.
+ (gfc_check_associated): Set 'assoc' true in the call to
+ 'same_type_check'.
+
+2017-11-09 Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/78814
+ * interface.c (symbol_rank): Check for NULL pointer.
+
+2017-11-08 Steven G. Kargl <kargl@kgcc.gnu.org>
+
+ PR Fortran/82841
+ * simplify.c(gfc_simplify_transfer): Do not dereference a NULL pointer.
+ Unwrap a short line.
+
+2017-11-08 Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/82884
+ * arith.c (gfc_hollerith2character): Clear pad.
+
+2017-11-08 Janne Blomqvist <jb@gcc.gnu.org>
+
+ PR 82869
+ * convert.c (truthvalue_conversion): Use logical_type_node.
+ * trans-array.c (gfc_trans_allocate_array_storage): Likewise.
+ (gfc_trans_create_temp_array): Likewise.
+ (gfc_trans_array_ctor_element): Likewise.
+ (gfc_trans_array_constructor_value): Likewise.
+ (trans_array_constructor): Likewise.
+ (trans_array_bound_check): Likewise.
+ (gfc_conv_array_ref): Likewise.
+ (gfc_trans_scalarized_loop_end): Likewise.
+ (gfc_conv_array_extent_dim): Likewise.
+ (gfc_array_init_size): Likewise.
+ (gfc_array_allocate): Likewise.
+ (gfc_trans_array_bounds): Likewise.
+ (gfc_trans_dummy_array_bias): Likewise.
+ (gfc_conv_array_parameter): Likewise.
+ (duplicate_allocatable): Likewise.
+ (duplicate_allocatable_coarray): Likewise.
+ (structure_alloc_comps): Likewise
+ (get_std_lbound): Likewise
+ (gfc_alloc_allocatable_for_assignment): Likewise
+ * trans-decl.c (add_argument_checking): Likewise
+ (gfc_generate_function_code): Likewise
+ * trans-expr.c (gfc_copy_class_to_class): Likewise
+ (gfc_trans_class_array_init_assign): Likewise
+ (gfc_trans_class_init_assign): Likewise
+ (gfc_conv_expr_present): Likewise
+ (gfc_conv_substring): Likewise
+ (gfc_conv_cst_int_power): Likewise
+ (gfc_conv_expr_op): Likewise
+ (gfc_conv_procedure_call): Likewise
+ (fill_with_spaces): Likewise
+ (gfc_trans_string_copy): Likewise
+ (gfc_trans_alloc_subarray_assign): Likewise
+ (gfc_trans_pointer_assignment): Likewise
+ (gfc_trans_scalar_assign): Likewise
+ (fcncall_realloc_result): Likewise
+ (alloc_scalar_allocatable_for_assignment): Likewise
+ (trans_class_assignment): Likewise
+ (gfc_trans_assignment_1): Likewise
+ * trans-intrinsic.c (build_fixbound_expr): Likewise
+ (gfc_conv_intrinsic_aint): Likewise
+ (gfc_trans_same_strlen_check): Likewise
+ (conv_caf_send): Likewise
+ (trans_this_image): Likewise
+ (conv_intrinsic_image_status): Likewise
+ (trans_image_index): Likewise
+ (gfc_conv_intrinsic_bound): Likewise
+ (conv_intrinsic_cobound): Likewise
+ (gfc_conv_intrinsic_mod): Likewise
+ (gfc_conv_intrinsic_dshift): Likewise
+ (gfc_conv_intrinsic_dim): Likewise
+ (gfc_conv_intrinsic_sign): Likewise
+ (gfc_conv_intrinsic_ctime): Likewise
+ (gfc_conv_intrinsic_fdate): Likewise
+ (gfc_conv_intrinsic_ttynam): Likewise
+ (gfc_conv_intrinsic_minmax): Likewise
+ (gfc_conv_intrinsic_minmax_char): Likewise
+ (gfc_conv_intrinsic_anyall): Likewise
+ (gfc_conv_intrinsic_arith): Likewise
+ (gfc_conv_intrinsic_minmaxloc): Likewise
+ (gfc_conv_intrinsic_minmaxval): Likewise
+ (gfc_conv_intrinsic_btest): Likewise
+ (gfc_conv_intrinsic_bitcomp): Likewise
+ (gfc_conv_intrinsic_shift): Likewise
+ (gfc_conv_intrinsic_ishft): Likewise
+ (gfc_conv_intrinsic_ishftc): Likewise
+ (gfc_conv_intrinsic_leadz): Likewise
+ (gfc_conv_intrinsic_trailz): Likewise
+ (gfc_conv_intrinsic_mask): Likewise
+ (gfc_conv_intrinsic_spacing): Likewise
+ (gfc_conv_intrinsic_rrspacing): Likewise
+ (gfc_conv_intrinsic_size): Likewise
+ (gfc_conv_intrinsic_sizeof): Likewise
+ (gfc_conv_intrinsic_transfer): Likewise
+ (gfc_conv_allocated): Likewise
+ (gfc_conv_associated): Likewise
+ (gfc_conv_same_type_as): Likewise
+ (gfc_conv_intrinsic_trim): Likewise
+ (gfc_conv_intrinsic_repeat): Likewise
+ (conv_isocbinding_function): Likewise
+ (conv_intrinsic_ieee_is_normal): Likewise
+ (conv_intrinsic_ieee_is_negative): Likewise
+ (conv_intrinsic_ieee_copy_sign): Likewise
+ (conv_intrinsic_move_alloc): Likewise
+ * trans-io.c (set_parameter_value_chk): Likewise
+ (set_parameter_value_inquire): Likewise
+ (set_string): Likewise
+ * trans-openmp.c (gfc_walk_alloc_comps): Likewise
+ (gfc_omp_clause_default_ctor): Likewise
+ (gfc_omp_clause_copy_ctor): Likewise
+ (gfc_omp_clause_assign_op): Likewise
+ (gfc_omp_clause_dtor): Likewise
+ (gfc_omp_finish_clause): Likewise
+ (gfc_trans_omp_clauses): Likewise
+ (gfc_trans_omp_do): Likewise
+ * trans-stmt.c (gfc_trans_goto): Likewise
+ (gfc_trans_sync): Likewise
+ (gfc_trans_arithmetic_if): Likewise
+ (gfc_trans_simple_do): Likewise
+ (gfc_trans_do): Likewise
+ (gfc_trans_forall_loop): Likewise
+ (gfc_trans_where_2): Likewise
+ (gfc_trans_allocate): Likewise
+ (gfc_trans_deallocate): Likewise
+ * trans-types.c (gfc_init_types): Initialize logical_type_node and
+ their true/false trees.
+ (gfc_get_array_descr_info): Use logical_type_node.
+ * trans-types.h (logical_type_node): New tree.
+ (logical_true_node): Likewise.
+ (logical_false_node): Likewise.
+ * trans.c (gfc_trans_runtime_check): Use logical_type_node.
+ (gfc_call_malloc): Likewise
+ (gfc_allocate_using_malloc): Likewise
+ (gfc_allocate_allocatable): Likewise
+ (gfc_add_comp_finalizer_call): Likewise
+ (gfc_add_finalizer_call): Likewise
+ (gfc_deallocate_with_status): Likewise
+ (gfc_deallocate_scalar_with_status): Likewise
+ (gfc_call_realloc): Likewise
+
+2017-11-06 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/69739
+ * trans-expr.c (gfc_map_intrinsic_function): Return false for
+ bounds without the DIM argument instead of ICEing.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ PR middle-end/82404
+ * options.c (gfc_post_options): Set default value of
+ -Wreturn-type to false.
+
+2017-11-05 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/82471
+ * lang.opt (ffrontend-loop-interchange): New option.
+ (Wfrontend-loop-interchange): New option.
+ * options.c (gfc_post_options): Handle ffrontend-loop-interchange.
+ * frontend-passes.c (gfc_run_passes): Run
+ optimize_namespace if flag_frontend_optimize or
+ flag_frontend_loop_interchange are set.
+ (optimize_namespace): Run functions according to flags set;
+ also call index_interchange.
+ (ind_type): New function.
+ (has_var): New function.
+ (index_cost): New function.
+ (loop_comp): New function.
+
+2017-11-05 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/78641
+ * resolve.c (resolve_ordinary_assign): Do not add the _data
+ component for class valued array constructors being assigned
+ to derived type arrays.
+ * trans-array.c (gfc_trans_array_ctor_element): Take the _data
+ of class valued elements for assignment to derived type arrays.
+
+2017-11-05 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/81447
+ PR fortran/82783
+ * resolve.c (resolve_component): There is no need to resolve
+ the components of a use associated vtype.
+ (resolve_fl_derived): Unconditionally generate a vtable for any
+ module derived type, as long as the standard is F2003 or later
+ and it is not a vtype or a PDT template.
+
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * parse.c (match, matcha, matchs, matcho, matchds, matchdo): Remove
+ semicolon after "do {} while (0)".
+
+2017-11-04 Andre Vehreschild <vehre@gcc.gnu.org>
+
+ * trans-expr.c (gfc_trans_assignment_1): Character kind conversion may
+ create a loop variant temporary, too.
+ * trans-intrinsic.c (conv_caf_send): Treat char arrays as arrays and
+ not as scalars.
+ * trans.c (get_array_span): Take the character kind into account when
+ doing pointer arithmetic.
+
+2017-11-04 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/29600
+ * gfortran.h (gfc_check_f): Replace fm3l with fm4l.
+ * intrinsic.h (gfc_resolve_maxloc): Add gfc_expr * to argument
+ list in protoytpe.
+ (gfc_resolve_minloc): Likewise.
+ * check.c (gfc_check_minloc_maxloc): Handle kind argument.
+ * intrinsic.c (add_sym_3_ml): Rename to
+ (add_sym_4_ml): and handle kind argument.
+ (add_function): Replace add_sym_3ml with add_sym_4ml and add
+ extra arguments for maxloc and minloc.
+ (check_specific): Change use of check.f3ml with check.f4ml.
+ * iresolve.c (gfc_resolve_maxloc): Handle kind argument. If
+ the kind is smaller than the smallest library version available,
+ use gfc_default_integer_kind and convert afterwards.
+ (gfc_resolve_minloc): Likewise.
+
2017-11-04 Paul Thomas <pault@gcc.gnu.org>
PR fortran/81735
diff --git a/gcc/fortran/arith.c b/gcc/fortran/arith.c
index c3be14df522..3c75895e2ef 100644
--- a/gcc/fortran/arith.c
+++ b/gcc/fortran/arith.c
@@ -2604,6 +2604,7 @@ gfc_hollerith2character (gfc_expr *src, int kind)
result = gfc_copy_expr (src);
result->ts.type = BT_CHARACTER;
result->ts.kind = kind;
+ result->ts.u.pad = 0;
result->value.character.length = result->representation.length;
result->value.character.string
diff --git a/gcc/fortran/check.c b/gcc/fortran/check.c
index 759c15adaec..a147449bf70 100644
--- a/gcc/fortran/check.c
+++ b/gcc/fortran/check.c
@@ -427,15 +427,22 @@ less_than_bitsize2 (const char *arg1, gfc_expr *expr1, const char *arg2,
/* Make sure two expressions have the same type. */
static bool
-same_type_check (gfc_expr *e, int n, gfc_expr *f, int m)
+same_type_check (gfc_expr *e, int n, gfc_expr *f, int m, bool assoc = false)
{
gfc_typespec *ets = &e->ts;
gfc_typespec *fts = &f->ts;
- if (e->ts.type == BT_PROCEDURE && e->symtree->n.sym)
- ets = &e->symtree->n.sym->ts;
- if (f->ts.type == BT_PROCEDURE && f->symtree->n.sym)
- fts = &f->symtree->n.sym->ts;
+ if (assoc)
+ {
+ /* Procedure pointer component expressions have the type of the interface
+ procedure. If they are being tested for association with a procedure
+ pointer (ie. not a component), the type of the procedure must be
+ determined. */
+ if (e->ts.type == BT_PROCEDURE && e->symtree->n.sym)
+ ets = &e->symtree->n.sym->ts;
+ if (f->ts.type == BT_PROCEDURE && f->symtree->n.sym)
+ fts = &f->symtree->n.sym->ts;
+ }
if (gfc_compare_types (ets, fts))
return true;
@@ -1002,7 +1009,7 @@ gfc_check_associated (gfc_expr *pointer, gfc_expr *target)
}
t = true;
- if (!same_type_check (pointer, 0, target, 1))
+ if (!same_type_check (pointer, 0, target, 1, true))
t = false;
if (!rank_check (target, 0, pointer->rank))
t = false;
@@ -3179,7 +3186,7 @@ gfc_check_matmul (gfc_expr *matrix_a, gfc_expr *matrix_b)
bool
gfc_check_minloc_maxloc (gfc_actual_arglist *ap)
{
- gfc_expr *a, *m, *d;
+ gfc_expr *a, *m, *d, *k;
a = ap->expr;
if (!int_or_real_check (a, 0) || !array_check (a, 0))
@@ -3187,6 +3194,7 @@ gfc_check_minloc_maxloc (gfc_actual_arglist *ap)
d = ap->next->expr;
m = ap->next->next->expr;
+ k = ap->next->next->next->expr;
if (m == NULL && d != NULL && d->ts.type == BT_LOGICAL
&& ap->next->name == NULL)
@@ -3214,6 +3222,9 @@ gfc_check_minloc_maxloc (gfc_actual_arglist *ap)
gfc_current_intrinsic))
return false;
+ if (!kind_check (k, 1, BT_INTEGER))
+ return false;
+
return true;
}
diff --git a/gcc/fortran/convert.c b/gcc/fortran/convert.c
index 35203235e8f..13bff7345aa 100644
--- a/gcc/fortran/convert.c
+++ b/gcc/fortran/convert.c
@@ -29,10 +29,14 @@ along with GCC; see the file COPYING3. If not see
#include "fold-const.h"
#include "convert.h"
+#include "gfortran.h"
+#include "trans.h"
+#include "trans-types.h"
+
/* Prepare expr to be an argument of a TRUTH_NOT_EXPR,
or validate its data type for a GIMPLE `if' or `while' statement.
- The resulting type should always be `boolean_type_node'. */
+ The resulting type should always be `logical_type_node'. */
static tree
truthvalue_conversion (tree expr)
@@ -40,25 +44,29 @@ truthvalue_conversion (tree expr)
switch (TREE_CODE (TREE_TYPE (expr)))
{
case BOOLEAN_TYPE:
- if (TREE_TYPE (expr) == boolean_type_node)
+ if (TREE_TYPE (expr) == logical_type_node)
return expr;
else if (COMPARISON_CLASS_P (expr))
{
- TREE_TYPE (expr) = boolean_type_node;
+ TREE_TYPE (expr) = logical_type_node;
return expr;
}
else if (TREE_CODE (expr) == NOP_EXPR)
return fold_build1_loc (input_location, NOP_EXPR,
- boolean_type_node, TREE_OPERAND (expr, 0));
+ logical_type_node,
+ TREE_OPERAND (expr, 0));
else
- return fold_build1_loc (input_location, NOP_EXPR, boolean_type_node,
+ return fold_build1_loc (input_location, NOP_EXPR,
+ logical_type_node,
expr);
case INTEGER_TYPE:
if (TREE_CODE (expr) == INTEGER_CST)
- return integer_zerop (expr) ? boolean_false_node : boolean_true_node;
+ return integer_zerop (expr) ? logical_false_node
+ : logical_true_node;
else
- return fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ return fold_build2_loc (input_location, NE_EXPR,
+ logical_type_node,
expr, build_int_cst (TREE_TYPE (expr), 0));
default:
diff --git a/gcc/fortran/decl.c b/gcc/fortran/decl.c
index 1a2d8f004ca..e57cfded540 100644
--- a/gcc/fortran/decl.c
+++ b/gcc/fortran/decl.c
@@ -632,14 +632,13 @@ match_clist_expr (gfc_expr **result, gfc_typespec *ts, gfc_array_spec *as)
gfc_expr *expr = NULL;
match m;
locus where;
- mpz_t repeat, size;
+ mpz_t repeat, cons_size, as_size;
bool scalar;
int cmp;
gcc_assert (ts);
mpz_init_set_ui (repeat, 0);
- mpz_init (size);
scalar = !as || !as->rank;
/* We have already matched '/' - now look for a constant list, as with
@@ -733,16 +732,30 @@ match_clist_expr (gfc_expr **result, gfc_typespec *ts, gfc_array_spec *as)
expr->rank = as->rank;
expr->shape = gfc_get_shape (expr->rank);
- /* Validate sizes. */
- gcc_assert (gfc_array_size (expr, &size));
- gcc_assert (spec_size (as, &repeat));
- cmp = mpz_cmp (size, repeat);
- if (cmp < 0)
- gfc_error ("Not enough elements in array initializer at %C");
- else if (cmp > 0)
- gfc_error ("Too many elements in array initializer at %C");
+ /* Validate sizes. We built expr ourselves, so cons_size will be
+ constant (we fail above for non-constant expressions).
+ We still need to verify that the array-spec has constant size. */
+ cmp = 0;
+ gcc_assert (gfc_array_size (expr, &cons_size));
+ if (!spec_size (as, &as_size))
+ {
+ gfc_error ("Expected constant array-spec in initializer list at %L",
+ as->type == AS_EXPLICIT ? &as->upper[0]->where : &where);
+ cmp = -1;
+ }
+ else
+ {
+ /* Make sure the specs are of the same size. */
+ cmp = mpz_cmp (cons_size, as_size);
+ if (cmp < 0)
+ gfc_error ("Not enough elements in array initializer at %C");
+ else if (cmp > 0)
+ gfc_error ("Too many elements in array initializer at %C");
+ mpz_clear (as_size);
+ }
+ mpz_clear (cons_size);
if (cmp)
- goto cleanup;
+ goto cleanup;
}
/* Make sure scalar types match. */
@@ -754,7 +767,6 @@ match_clist_expr (gfc_expr **result, gfc_typespec *ts, gfc_array_spec *as)
expr->ts.u.cl->length_from_typespec = 1;
*result = expr;
- mpz_clear (size);
mpz_clear (repeat);
return MATCH_YES;
@@ -766,7 +778,6 @@ cleanup:
expr->value.constructor = NULL;
gfc_free_expr (expr);
gfc_constructor_free (array_head);
- mpz_clear (size);
mpz_clear (repeat);
return MATCH_ERROR;
}
@@ -1427,11 +1438,9 @@ build_sym (const char *name, gfc_charlen *cl, bool cl_deferred,
{
char u_name[GFC_MAX_SYMBOL_LEN + 1];
gfc_symtree *st;
- int nlen;
- nlen = strlen(name);
- gcc_assert (nlen <= GFC_MAX_SYMBOL_LEN);
- strncpy (u_name, name, nlen + 1);
+ gcc_assert (strlen(name) <= GFC_MAX_SYMBOL_LEN);
+ strcpy (u_name, name);
u_name[0] = upper;
st = gfc_find_symtree (gfc_current_ns->sym_root, u_name);
diff --git a/gcc/fortran/expr.c b/gcc/fortran/expr.c
index bc05db2fbae..09abacf83ec 100644
--- a/gcc/fortran/expr.c
+++ b/gcc/fortran/expr.c
@@ -4013,13 +4013,22 @@ gfc_check_assign_symbol (gfc_symbol *sym, gfc_component *comp, gfc_expr *rvalue)
return true;
}
+/* Invoke gfc_build_init_expr to create an initializer expression, but do not
+ * require that an expression be built. */
+
+gfc_expr *
+gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
+{
+ return gfc_build_init_expr (ts, where, false);
+}
/* Build an initializer for a local integer, real, complex, logical, or
character variable, based on the command line flags finit-local-zero,
- finit-integer=, finit-real=, finit-logical=, and finit-character=. */
+ finit-integer=, finit-real=, finit-logical=, and finit-character=.
+ With force, an initializer is ALWAYS generated. */
gfc_expr *
-gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
+gfc_build_init_expr (gfc_typespec *ts, locus *where, bool force)
{
int char_len;
gfc_expr *init_expr;
@@ -4028,13 +4037,24 @@ gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
/* Try to build an initializer expression. */
init_expr = gfc_get_constant_expr (ts->type, ts->kind, where);
+ /* If we want to force generation, make sure we default to zero. */
+ gfc_init_local_real init_real = flag_init_real;
+ int init_logical = gfc_option.flag_init_logical;
+ if (force)
+ {
+ if (init_real == GFC_INIT_REAL_OFF)
+ init_real = GFC_INIT_REAL_ZERO;
+ if (init_logical == GFC_INIT_LOGICAL_OFF)
+ init_logical = GFC_INIT_LOGICAL_FALSE;
+ }
+
/* We will only initialize integers, reals, complex, logicals, and
characters, and only if the corresponding command-line flags
were set. Otherwise, we free init_expr and return null. */
switch (ts->type)
{
case BT_INTEGER:
- if (gfc_option.flag_init_integer != GFC_INIT_INTEGER_OFF)
+ if (force || gfc_option.flag_init_integer != GFC_INIT_INTEGER_OFF)
mpz_set_si (init_expr->value.integer,
gfc_option.flag_init_integer_value);
else
@@ -4045,7 +4065,7 @@ gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
break;
case BT_REAL:
- switch (flag_init_real)
+ switch (init_real)
{
case GFC_INIT_REAL_SNAN:
init_expr->is_snan = 1;
@@ -4074,7 +4094,7 @@ gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
break;
case BT_COMPLEX:
- switch (flag_init_real)
+ switch (init_real)
{
case GFC_INIT_REAL_SNAN:
init_expr->is_snan = 1;
@@ -4106,9 +4126,9 @@ gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
break;
case BT_LOGICAL:
- if (gfc_option.flag_init_logical == GFC_INIT_LOGICAL_FALSE)
+ if (init_logical == GFC_INIT_LOGICAL_FALSE)
init_expr->value.logical = 0;
- else if (gfc_option.flag_init_logical == GFC_INIT_LOGICAL_TRUE)
+ else if (init_logical == GFC_INIT_LOGICAL_TRUE)
init_expr->value.logical = 1;
else
{
@@ -4120,7 +4140,7 @@ gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
case BT_CHARACTER:
/* For characters, the length must be constant in order to
create a default initializer. */
- if (gfc_option.flag_init_character == GFC_INIT_CHARACTER_ON
+ if ((force || gfc_option.flag_init_character == GFC_INIT_CHARACTER_ON)
&& ts->u.cl->length
&& ts->u.cl->length->expr_type == EXPR_CONSTANT)
{
@@ -4136,7 +4156,8 @@ gfc_build_default_init_expr (gfc_typespec *ts, locus *where)
gfc_free_expr (init_expr);
init_expr = NULL;
}
- if (!init_expr && gfc_option.flag_init_character == GFC_INIT_CHARACTER_ON
+ if (!init_expr
+ && (force || gfc_option.flag_init_character == GFC_INIT_CHARACTER_ON)
&& ts->u.cl->length && flag_max_stack_var_size != 0)
{
gfc_actual_arglist *arg;
@@ -4391,7 +4412,8 @@ component_initializer (gfc_typespec *ts, gfc_component *c, bool generate)
/* Treat simple components like locals. */
else
{
- init = gfc_build_default_init_expr (&c->ts, &c->loc);
+ /* We MUST give an initializer, so force generation. */
+ init = gfc_build_init_expr (&c->ts, &c->loc, true);
gfc_apply_init (&c->ts, &c->attr, init);
}
diff --git a/gcc/fortran/frontend-passes.c b/gcc/fortran/frontend-passes.c
index fcfaf9508c2..b3db18ac5f1 100644
--- a/gcc/fortran/frontend-passes.c
+++ b/gcc/fortran/frontend-passes.c
@@ -55,6 +55,7 @@ static gfc_expr* check_conjg_transpose_variable (gfc_expr *, bool *,
bool *);
static bool has_dimen_vector_ref (gfc_expr *);
static int matmul_temp_args (gfc_code **, int *,void *data);
+static int index_interchange (gfc_code **, int*, void *);
#ifdef CHECKING_P
static void check_locus (gfc_namespace *);
@@ -155,9 +156,11 @@ gfc_run_passes (gfc_namespace *ns)
check_locus (ns);
#endif
+ if (flag_frontend_optimize || flag_frontend_loop_interchange)
+ optimize_namespace (ns);
+
if (flag_frontend_optimize)
{
- optimize_namespace (ns);
optimize_reduction (ns);
if (flag_dump_fortran_optimized)
gfc_dump_parse_tree (ns, stdout);
@@ -1350,7 +1353,9 @@ simplify_io_impl_do (gfc_code **code, int *walk_subtrees,
return 0;
}
-/* Optimize a namespace, including all contained namespaces. */
+/* Optimize a namespace, including all contained namespaces.
+ flag_frontend_optimize and flag_fronend_loop_interchange are
+ handled separately. */
static void
optimize_namespace (gfc_namespace *ns)
@@ -1363,28 +1368,35 @@ optimize_namespace (gfc_namespace *ns)
in_assoc_list = false;
in_omp_workshare = false;
- gfc_code_walker (&ns->code, simplify_io_impl_do, dummy_expr_callback, NULL);
- gfc_code_walker (&ns->code, convert_do_while, dummy_expr_callback, NULL);
- gfc_code_walker (&ns->code, convert_elseif, dummy_expr_callback, NULL);
- gfc_code_walker (&ns->code, cfe_code, cfe_expr_0, NULL);
- gfc_code_walker (&ns->code, optimize_code, optimize_expr, NULL);
- if (flag_inline_matmul_limit != 0)
+ if (flag_frontend_optimize)
{
- bool found;
- do
+ gfc_code_walker (&ns->code, simplify_io_impl_do, dummy_expr_callback, NULL);
+ gfc_code_walker (&ns->code, convert_do_while, dummy_expr_callback, NULL);
+ gfc_code_walker (&ns->code, convert_elseif, dummy_expr_callback, NULL);
+ gfc_code_walker (&ns->code, cfe_code, cfe_expr_0, NULL);
+ gfc_code_walker (&ns->code, optimize_code, optimize_expr, NULL);
+ if (flag_inline_matmul_limit != 0)
{
- found = false;
- gfc_code_walker (&ns->code, matmul_to_var_code, matmul_to_var_expr,
- (void *) &found);
- }
- while (found);
+ bool found;
+ do
+ {
+ found = false;
+ gfc_code_walker (&ns->code, matmul_to_var_code, matmul_to_var_expr,
+ (void *) &found);
+ }
+ while (found);
- gfc_code_walker (&ns->code, matmul_temp_args, dummy_expr_callback,
- NULL);
- gfc_code_walker (&ns->code, inline_matmul_assign, dummy_expr_callback,
- NULL);
+ gfc_code_walker (&ns->code, matmul_temp_args, dummy_expr_callback,
+ NULL);
+ gfc_code_walker (&ns->code, inline_matmul_assign, dummy_expr_callback,
+ NULL);
+ }
}
+ if (flag_frontend_loop_interchange)
+ gfc_code_walker (&ns->code, index_interchange, dummy_expr_callback,
+ NULL);
+
/* BLOCKs are handled in the expression walker below. */
for (ns = ns->contained; ns; ns = ns->sibling)
{
@@ -4225,6 +4237,170 @@ inline_matmul_assign (gfc_code **c, int *walk_subtrees,
return 0;
}
+
+/* Code for index interchange for loops which are grouped together in DO
+ CONCURRENT or FORALL statements. This is currently only applied if the
+ iterations are grouped together in a single statement.
+
+ For this transformation, it is assumed that memory access in strides is
+ expensive, and that loops which access later indices (which access memory
+ in bigger strides) should be moved to the first loops.
+
+ For this, a loop over all the statements is executed, counting the times
+ that the loop iteration values are accessed in each index. The loop
+ indices are then sorted to minimize access to later indices from inner
+ loops. */
+
+/* Type for holding index information. */
+
+typedef struct {
+ gfc_symbol *sym;
+ gfc_forall_iterator *fa;
+ int num;
+ int n[GFC_MAX_DIMENSIONS];
+} ind_type;
+
+/* Callback function to determine if an expression is the
+ corresponding variable. */
+
+static int
+has_var (gfc_expr **e, int *walk_subtrees ATTRIBUTE_UNUSED, void *data)
+{
+ gfc_expr *expr = *e;
+ gfc_symbol *sym;
+
+ if (expr->expr_type != EXPR_VARIABLE)
+ return 0;
+
+ sym = (gfc_symbol *) data;
+ return sym == expr->symtree->n.sym;
+}
+
+/* Callback function to calculate the cost of a certain index. */
+
+static int
+index_cost (gfc_expr **e, int *walk_subtrees ATTRIBUTE_UNUSED,
+ void *data)
+{
+ ind_type *ind;
+ gfc_expr *expr;
+ gfc_array_ref *ar;
+ gfc_ref *ref;
+ int i,j;
+
+ expr = *e;
+ if (expr->expr_type != EXPR_VARIABLE)
+ return 0;
+
+ ar = NULL;
+ for (ref = expr->ref; ref; ref = ref->next)
+ {
+ if (ref->type == REF_ARRAY)
+ {
+ ar = &ref->u.ar;
+ break;
+ }
+ }
+ if (ar == NULL || ar->type != AR_ELEMENT)
+ return 0;
+
+ ind = (ind_type *) data;
+ for (i = 0; i < ar->dimen; i++)
+ {
+ for (j=0; ind[j].sym != NULL; j++)
+ {
+ if (gfc_expr_walker (&ar->start[i], has_var, (void *) (ind[j].sym)))
+ ind[j].n[i]++;
+ }
+ }
+ return 0;
+}
+
+/* Callback function for qsort, to sort the loop indices. */
+
+static int
+loop_comp (const void *e1, const void *e2)
+{
+ const ind_type *i1 = (const ind_type *) e1;
+ const ind_type *i2 = (const ind_type *) e2;
+ int i;
+
+ for (i=GFC_MAX_DIMENSIONS-1; i >= 0; i--)
+ {
+ if (i1->n[i] != i2->n[i])
+ return i1->n[i] - i2->n[i];
+ }
+ /* All other things being equal, let's not change the ordering. */
+ return i2->num - i1->num;
+}
+
+/* Main function to do the index interchange. */
+
+static int
+index_interchange (gfc_code **c, int *walk_subtrees ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ gfc_code *co;
+ co = *c;
+ int n_iter;
+ gfc_forall_iterator *fa;
+ ind_type *ind;
+ int i, j;
+
+ if (co->op != EXEC_FORALL && co->op != EXEC_DO_CONCURRENT)
+ return 0;
+
+ n_iter = 0;
+ for (fa = co->ext.forall_iterator; fa; fa = fa->next)
+ n_iter ++;
+
+ /* Nothing to reorder. */
+ if (n_iter < 2)
+ return 0;
+
+ ind = XALLOCAVEC (ind_type, n_iter + 1);
+
+ i = 0;
+ for (fa = co->ext.forall_iterator; fa; fa = fa->next)
+ {
+ ind[i].sym = fa->var->symtree->n.sym;
+ ind[i].fa = fa;
+ for (j=0; j<GFC_MAX_DIMENSIONS; j++)
+ ind[i].n[j] = 0;
+ ind[i].num = i;
+ i++;
+ }
+ ind[n_iter].sym = NULL;
+ ind[n_iter].fa = NULL;
+
+ gfc_code_walker (c, gfc_dummy_code_callback, index_cost, (void *) ind);
+ qsort ((void *) ind, n_iter, sizeof (ind_type), loop_comp);
+
+ /* Do the actual index interchange. */
+ co->ext.forall_iterator = fa = ind[0].fa;
+ for (i=1; i<n_iter; i++)
+ {
+ fa->next = ind[i].fa;
+ fa = fa->next;
+ }
+ fa->next = NULL;
+
+ if (flag_warn_frontend_loop_interchange)
+ {
+ for (i=1; i<n_iter; i++)
+ {
+ if (ind[i-1].num > ind[i].num)
+ {
+ gfc_warning (OPT_Wfrontend_loop_interchange,
+ "Interchanging loops at %L", &co->loc);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
#define WALK_SUBEXPR(NODE) \
do \
{ \
diff --git a/gcc/fortran/gfortran.h b/gcc/fortran/gfortran.h
index 2c2fc636708..a57676a2be1 100644
--- a/gcc/fortran/gfortran.h
+++ b/gcc/fortran/gfortran.h
@@ -1989,7 +1989,7 @@ gfc_intrinsic_arg;
argument lists of intrinsic functions. fX with X an integer refer
to check functions of intrinsics with X arguments. f1m is used for
the MAX and MIN intrinsics which can have an arbitrary number of
- arguments, f3ml is used for the MINLOC and MAXLOC intrinsics as
+ arguments, f4ml is used for the MINLOC and MAXLOC intrinsics as
these have special semantics. */
typedef union
@@ -1999,7 +1999,7 @@ typedef union
bool (*f1m)(gfc_actual_arglist *);
bool (*f2)(struct gfc_expr *, struct gfc_expr *);
bool (*f3)(struct gfc_expr *, struct gfc_expr *, struct gfc_expr *);
- bool (*f3ml)(gfc_actual_arglist *);
+ bool (*f4ml)(gfc_actual_arglist *);
bool (*f3red)(gfc_actual_arglist *);
bool (*f4)(struct gfc_expr *, struct gfc_expr *, struct gfc_expr *,
struct gfc_expr *);
@@ -3174,6 +3174,7 @@ bool gfc_check_pointer_assign (gfc_expr *, gfc_expr *);
bool gfc_check_assign_symbol (gfc_symbol *, gfc_component *, gfc_expr *);
gfc_expr *gfc_build_default_init_expr (gfc_typespec *, locus *);
+gfc_expr *gfc_build_init_expr (gfc_typespec *, locus *, bool);
void gfc_apply_init (gfc_typespec *, symbol_attribute *, gfc_expr *);
bool gfc_has_default_initializer (gfc_symbol *);
gfc_expr *gfc_default_initializer (gfc_typespec *);
diff --git a/gcc/fortran/interface.c b/gcc/fortran/interface.c
index 9f0fcc82f24..1b7ebf56b92 100644
--- a/gcc/fortran/interface.c
+++ b/gcc/fortran/interface.c
@@ -1262,8 +1262,13 @@ generic_correspondence (gfc_formal_arglist *f1, gfc_formal_arglist *f2,
static int
symbol_rank (gfc_symbol *sym)
{
- gfc_array_spec *as;
- as = (sym->ts.type == BT_CLASS) ? CLASS_DATA (sym)->as : sym->as;
+ gfc_array_spec *as = NULL;
+
+ if (sym->ts.type == BT_CLASS && CLASS_DATA (sym) && CLASS_DATA (sym)->as)
+ as = CLASS_DATA (sym)->as;
+ else
+ as = sym->as;
+
return as ? as->rank : 0;
}
diff --git a/gcc/fortran/intrinsic.c b/gcc/fortran/intrinsic.c
index da96e8ff30c..cb18b21a90d 100644
--- a/gcc/fortran/intrinsic.c
+++ b/gcc/fortran/intrinsic.c
@@ -687,27 +687,29 @@ add_sym_3 (const char *name, gfc_isym_id id, enum klass cl, int actual_ok, bt ty
might have to be reordered. */
static void
-add_sym_3ml (const char *name, gfc_isym_id id, enum klass cl, int actual_ok, bt type,
+add_sym_4ml (const char *name, gfc_isym_id id, enum klass cl, int actual_ok, bt type,
int kind, int standard,
bool (*check) (gfc_actual_arglist *),
- gfc_expr *(*simplify) (gfc_expr *, gfc_expr *, gfc_expr *),
- void (*resolve) (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *),
+ gfc_expr *(*simplify) (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *),
+ void (*resolve) (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *),
const char *a1, bt type1, int kind1, int optional1,
const char *a2, bt type2, int kind2, int optional2,
- const char *a3, bt type3, int kind3, int optional3)
+ const char *a3, bt type3, int kind3, int optional3,
+ const char *a4, bt type4, int kind4, int optional4)
{
gfc_check_f cf;
gfc_simplify_f sf;
gfc_resolve_f rf;
- cf.f3ml = check;
- sf.f3 = simplify;
- rf.f3 = resolve;
+ cf.f4ml = check;
+ sf.f4 = simplify;
+ rf.f4 = resolve;
add_sym (name, id, cl, actual_ok, type, kind, standard, cf, sf, rf,
a1, type1, kind1, optional1, INTENT_IN,
a2, type2, kind2, optional2, INTENT_IN,
a3, type3, kind3, optional3, INTENT_IN,
+ a4, type4, kind4, optional4, INTENT_IN,
(void *) 0);
}
@@ -2455,10 +2457,10 @@ add_functions (void)
make_generic ("maxexponent", GFC_ISYM_MAXEXPONENT, GFC_STD_F95);
- add_sym_3ml ("maxloc", GFC_ISYM_MAXLOC, CLASS_TRANSFORMATIONAL, ACTUAL_NO, BT_INTEGER, di, GFC_STD_F95,
+ add_sym_4ml ("maxloc", GFC_ISYM_MAXLOC, CLASS_TRANSFORMATIONAL, ACTUAL_NO, BT_INTEGER, di, GFC_STD_F95,
gfc_check_minloc_maxloc, NULL, gfc_resolve_maxloc,
ar, BT_REAL, dr, REQUIRED, dm, BT_INTEGER, ii, OPTIONAL,
- msk, BT_LOGICAL, dl, OPTIONAL);
+ msk, BT_LOGICAL, dl, OPTIONAL, kind, BT_INTEGER, di, OPTIONAL);
make_generic ("maxloc", GFC_ISYM_MAXLOC, GFC_STD_F95);
@@ -2531,10 +2533,10 @@ add_functions (void)
make_generic ("minexponent", GFC_ISYM_MINEXPONENT, GFC_STD_F95);
- add_sym_3ml ("minloc", GFC_ISYM_MINLOC, CLASS_TRANSFORMATIONAL, ACTUAL_NO, BT_INTEGER, di, GFC_STD_F95,
+ add_sym_4ml ("minloc", GFC_ISYM_MINLOC, CLASS_TRANSFORMATIONAL, ACTUAL_NO, BT_INTEGER, di, GFC_STD_F95,
gfc_check_minloc_maxloc, NULL, gfc_resolve_minloc,
ar, BT_REAL, dr, REQUIRED, dm, BT_INTEGER, ii, OPTIONAL,
- msk, BT_LOGICAL, dl, OPTIONAL);
+ msk, BT_LOGICAL, dl, OPTIONAL, kind, BT_INTEGER, di, OPTIONAL);
make_generic ("minloc", GFC_ISYM_MINLOC, GFC_STD_F95);
@@ -4498,7 +4500,7 @@ check_specific (gfc_intrinsic_sym *specific, gfc_expr *expr, int error_flag)
if (!do_ts29113_check (specific, *ap))
return false;
- if (specific->check.f3ml == gfc_check_minloc_maxloc)
+ if (specific->check.f4ml == gfc_check_minloc_maxloc)
/* This is special because we might have to reorder the argument list. */
t = gfc_check_minloc_maxloc (*ap);
else if (specific->check.f3red == gfc_check_minval_maxval)
diff --git a/gcc/fortran/intrinsic.h b/gcc/fortran/intrinsic.h
index e8280f6f2ac..62827887b3c 100644
--- a/gcc/fortran/intrinsic.h
+++ b/gcc/fortran/intrinsic.h
@@ -537,7 +537,7 @@ void gfc_resolve_logical (gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_lstat (gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_matmul (gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_max (gfc_expr *, gfc_actual_arglist *);
-void gfc_resolve_maxloc (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
+void gfc_resolve_maxloc (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_maxval (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_mclock (gfc_expr *);
void gfc_resolve_mclock8 (gfc_expr *);
@@ -545,7 +545,7 @@ void gfc_resolve_mask (gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_merge (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_merge_bits (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_min (gfc_expr *, gfc_actual_arglist *);
-void gfc_resolve_minloc (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
+void gfc_resolve_minloc (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_minval (gfc_expr *, gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_mod (gfc_expr *, gfc_expr *, gfc_expr *);
void gfc_resolve_modulo (gfc_expr *, gfc_expr *, gfc_expr *);
diff --git a/gcc/fortran/invoke.texi b/gcc/fortran/invoke.texi
index 261f2535bb5..f3a8b34a26b 100644
--- a/gcc/fortran/invoke.texi
+++ b/gcc/fortran/invoke.texi
@@ -149,8 +149,9 @@ and warnings}.
-Wdo-subscript -Wfunction-elimination -Wimplicit-interface @gol
-Wimplicit-procedure -Wintrinsic-shadow -Wuse-without-only -Wintrinsics-std @gol
-Wline-truncation -Wno-align-commons -Wno-tabs -Wreal-q-constant @gol
--Wsurprising -Wunderflow -Wunused-parameter -Wrealloc-lhs -Wrealloc-lhs-all @gol
--Wtarget-lifetime -fmax-errors=@var{n} -fsyntax-only -pedantic -pedantic-errors
+-Wsurprising -Wunderflow -Wunused-parameter -Wrealloc-lhs @gol
+-Wrealloc-lhs-all -Wfrontend-loop-interchange -Wtarget-lifetime @gol
+-fmax-errors=@var{n} -fsyntax-only -pedantic -pedantic-errors @gol
}
@item Debugging Options
@@ -183,6 +184,7 @@ and warnings}.
-fbounds-check -fcheck-array-temporaries @gol
-fcheck=@var{<all|array-temps|bounds|do|mem|pointer|recursion>} @gol
-fcoarray=@var{<none|single|lib>} -fexternal-blas -ff2c
+-ffrontend-loop-interchange @gol
-ffrontend-optimize @gol
-finit-character=@var{n} -finit-integer=@var{n} -finit-local-zero @gol
-finit-derived @gol
@@ -910,6 +912,13 @@ Enables some warning options for usages of language features which
may be problematic. This currently includes @option{-Wcompare-reals},
@option{-Wunused-parameter} and @option{-Wdo-subscript}.
+@item -Wfrontend-loop-interchange
+@opindex @code{Wfrontend-loop-interchange}
+@cindex warnings, loop interchange
+@cindex loop interchange, warning
+Enable warning for loop interchanges performed by the
+@option{-ffrontend-loop-interchange} option.
+
@item -Wimplicit-interface
@opindex @code{Wimplicit-interface}
@cindex warnings, implicit interface
@@ -1705,9 +1714,14 @@ initialization options are provided by the
the real and imaginary parts of local @code{COMPLEX} variables),
@option{-finit-logical=@var{<true|false>}}, and
@option{-finit-character=@var{n}} (where @var{n} is an ASCII character
-value) options. Components of derived type variables will be initialized
-according to these flags only with @option{-finit-derived}. These options do
-not initialize
+value) options.
+
+With @option{-finit-derived}, components of derived type variables will be
+initialized according to these flags. Components whose type is not covered by
+an explicit @option{-finit-*} flag will be treated as described above with
+@option{-finit-local-zero}.
+
+These options do not initialize
@itemize @bullet
@item
objects with the POINTER attribute
@@ -1782,6 +1796,14 @@ expressions, removing unnecessary calls to @code{TRIM} in comparisons
and assignments and replacing @code{TRIM(a)} with
@code{a(1:LEN_TRIM(a))}. It can be deselected by specifying
@option{-fno-frontend-optimize}.
+
+@item -ffrontend-loop-interchange
+@opindex @code{frontend-loop-interchange}
+@cindex loop interchange, Fortran
+Attempt to interchange loops in the Fortran front end where
+profitable. Enabled by default by any @option{-O} option.
+At the moment, this option only affects @code{FORALL} and
+@code{DO CONCURRENT} statements with several forall triplets.
@end table
@xref{Code Gen Options,,Options for Code Generation Conventions,
diff --git a/gcc/fortran/iresolve.c b/gcc/fortran/iresolve.c
index b784ac339e9..a54ed2295b5 100644
--- a/gcc/fortran/iresolve.c
+++ b/gcc/fortran/iresolve.c
@@ -1691,16 +1691,31 @@ gfc_resolve_max (gfc_expr *f, gfc_actual_arglist *args)
gfc_resolve_minmax ("__max_%c%d", f, args);
}
+/* The smallest kind for which a minloc and maxloc implementation exists. */
+
+#define MINMAXLOC_MIN_KIND 4
void
gfc_resolve_maxloc (gfc_expr *f, gfc_expr *array, gfc_expr *dim,
- gfc_expr *mask)
+ gfc_expr *mask, gfc_expr *kind)
{
const char *name;
int i, j, idim;
+ int fkind;
f->ts.type = BT_INTEGER;
- f->ts.kind = gfc_default_integer_kind;
+
+ /* The library versions only exist for kinds 4, 8 and 16. For smaller kinds,
+ we do a type conversion further down. */
+ if (kind)
+ fkind = mpz_get_si (kind->value.integer);
+ else
+ fkind = gfc_default_integer_kind;
+
+ if (fkind < MINMAXLOC_MIN_KIND)
+ f->ts.kind = MINMAXLOC_MIN_KIND;
+ else
+ f->ts.kind = fkind;
if (dim == NULL)
{
@@ -1740,6 +1755,21 @@ gfc_resolve_maxloc (gfc_expr *f, gfc_expr *array, gfc_expr *dim,
f->value.function.name
= gfc_get_string (PREFIX ("%s%d_%d_%c%d"), name, dim != NULL, f->ts.kind,
gfc_type_letter (array->ts.type), array->ts.kind);
+
+ if (kind)
+ fkind = mpz_get_si (kind->value.integer);
+ else
+ fkind = gfc_default_integer_kind;
+
+ if (fkind != f->ts.kind)
+ {
+ gfc_typespec ts;
+ gfc_clear_ts (&ts);
+
+ ts.type = BT_INTEGER;
+ ts.kind = fkind;
+ gfc_convert_type_warn (f, &ts, 2, 0);
+ }
}
@@ -1861,13 +1891,25 @@ gfc_resolve_min (gfc_expr *f, gfc_actual_arglist *args)
void
gfc_resolve_minloc (gfc_expr *f, gfc_expr *array, gfc_expr *dim,
- gfc_expr *mask)
+ gfc_expr *mask, gfc_expr *kind)
{
const char *name;
int i, j, idim;
+ int fkind;
f->ts.type = BT_INTEGER;
- f->ts.kind = gfc_default_integer_kind;
+
+ /* The library versions only exist for kinds 4, 8 and 16. For smaller kinds,
+ we do a type conversion further down. */
+ if (kind)
+ fkind = mpz_get_si (kind->value.integer);
+ else
+ fkind = gfc_default_integer_kind;
+
+ if (fkind < MINMAXLOC_MIN_KIND)
+ f->ts.kind = MINMAXLOC_MIN_KIND;
+ else
+ f->ts.kind = fkind;
if (dim == NULL)
{
@@ -1907,6 +1949,16 @@ gfc_resolve_minloc (gfc_expr *f, gfc_expr *array, gfc_expr *dim,
f->value.function.name
= gfc_get_string (PREFIX ("%s%d_%d_%c%d"), name, dim != NULL, f->ts.kind,
gfc_type_letter (array->ts.type), array->ts.kind);
+
+ if (fkind != f->ts.kind)
+ {
+ gfc_typespec ts;
+ gfc_clear_ts (&ts);
+
+ ts.type = BT_INTEGER;
+ ts.kind = fkind;
+ gfc_convert_type_warn (f, &ts, 2, 0);
+ }
}
diff --git a/gcc/fortran/lang.opt b/gcc/fortran/lang.opt
index 88f6af57ee8..780335f3de7 100644
--- a/gcc/fortran/lang.opt
+++ b/gcc/fortran/lang.opt
@@ -245,6 +245,10 @@ Wextra
Fortran Warning
; Documented in common
+Wfrontend-loop-interchange
+Fortran Var(flag_warn_frontend_loop_interchange)
+Warn if loops have been interchanged.
+
Wfunction-elimination
Fortran Warning Var(warn_function_elimination)
Warn about function call elimination.
@@ -548,6 +552,10 @@ ffree-line-length-
Fortran RejectNegative Joined UInteger Var(flag_free_line_length) Init(132)
-ffree-line-length-<n> Use n as character line width in free mode.
+ffrontend-loop-interchange
+Fortran Var(flag_frontend_loop_interchange) Init(-1)
+Try to interchange loops if profitable.
+
ffrontend-optimize
Fortran Var(flag_frontend_optimize) Init(-1)
Enable front end optimization.
diff --git a/gcc/fortran/options.c b/gcc/fortran/options.c
index f7bbd7f2cde..0ee6b7808d9 100644
--- a/gcc/fortran/options.c
+++ b/gcc/fortran/options.c
@@ -417,6 +417,11 @@ gfc_post_options (const char **pfilename)
if (flag_frontend_optimize == -1)
flag_frontend_optimize = optimize;
+ /* Same for front end loop interchange. */
+
+ if (flag_frontend_loop_interchange == -1)
+ flag_frontend_loop_interchange = optimize;
+
if (flag_max_array_constructor < 65535)
flag_max_array_constructor = 65535;
diff --git a/gcc/fortran/parse.c b/gcc/fortran/parse.c
index e4deff9c79e..d025c912921 100644
--- a/gcc/fortran/parse.c
+++ b/gcc/fortran/parse.c
@@ -132,7 +132,7 @@ use_modules (void)
return st; \
else \
undo_new_statement (); \
- } while (0);
+ } while (0)
/* This is a specialist version of decode_statement that is used
@@ -606,7 +606,7 @@ decode_statement (void)
return st; \
else \
undo_new_statement (); \
- } while (0);
+ } while (0)
static gfc_statement
decode_oacc_directive (void)
@@ -728,7 +728,7 @@ decode_oacc_directive (void)
} \
else \
undo_new_statement (); \
- } while (0);
+ } while (0)
/* Like match, but don't match anything if not -fopenmp
and if spec_only, goto do_spec_only without actually matching. */
@@ -746,7 +746,7 @@ decode_oacc_directive (void)
} \
else \
undo_new_statement (); \
- } while (0);
+ } while (0)
/* Like match, but set a flag simd_matched if keyword matched. */
#define matchds(keyword, subr, st) \
@@ -759,7 +759,7 @@ decode_oacc_directive (void)
} \
else \
undo_new_statement (); \
- } while (0);
+ } while (0)
/* Like match, but don't match anything if not -fopenmp. */
#define matchdo(keyword, subr, st) \
@@ -774,7 +774,7 @@ decode_oacc_directive (void)
} \
else \
undo_new_statement (); \
- } while (0);
+ } while (0)
static gfc_statement
decode_omp_directive (void)
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index 40c1cd3c96f..bdb4015b34d 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -5834,7 +5834,9 @@ update_compcall_arglist (gfc_expr* e)
return true;
}
- gcc_assert (tbp->pass_arg_num > 0);
+ if (tbp->pass_arg_num <= 0)
+ return false;
+
e->value.compcall.actual = update_arglist_pass (e->value.compcall.actual, po,
tbp->pass_arg_num,
tbp->pass_arg);
@@ -10324,7 +10326,8 @@ resolve_ordinary_assign (gfc_code *code, gfc_namespace *ns)
/* Assign the 'data' of a class object to a derived type. */
if (lhs->ts.type == BT_DERIVED
- && rhs->ts.type == BT_CLASS)
+ && rhs->ts.type == BT_CLASS
+ && rhs->expr_type != EXPR_ARRAY)
gfc_add_data_component (rhs);
bool caf_convert_to_send = flag_coarray == GFC_FCOARRAY_LIB
@@ -13496,6 +13499,9 @@ resolve_component (gfc_component *c, gfc_symbol *sym)
if (c->attr.artificial)
return true;
+ if (sym->attr.vtype && sym->attr.use_assoc)
+ return true;
+
/* F2008, C442. */
if ((!sym->attr.is_class || c != sym->components)
&& c->attr.codimension
@@ -14075,6 +14081,20 @@ resolve_fl_derived (gfc_symbol *sym)
if (!resolve_typebound_procedures (sym))
return false;
+ /* Generate module vtables subject to their accessibility and their not
+ being vtables or pdt templates. If this is not done class declarations
+ in external procedures wind up with their own version and so SELECT TYPE
+ fails because the vptrs do not have the same address. */
+ if (gfc_option.allow_std & GFC_STD_F2003
+ && sym->ns->proc_name
+ && sym->ns->proc_name->attr.flavor == FL_MODULE
+ && sym->attr.access != ACCESS_PRIVATE
+ && !(sym->attr.use_assoc || sym->attr.vtype || sym->attr.pdt_template))
+ {
+ gfc_symbol *vtab = gfc_find_derived_vtab (sym);
+ gfc_set_sym_referenced (vtab);
+ }
+
return true;
}
@@ -15266,7 +15286,7 @@ check_data_variable (gfc_data_variable *var, locus *where)
if (!gfc_array_size (e, &size))
{
gfc_error ("Nonconstant array section at %L in DATA statement",
- &e->where);
+ where);
mpz_clear (offset);
return false;
}
@@ -15943,7 +15963,7 @@ resolve_equivalence (gfc_equiv *eq)
{
gfc_use_rename *r;
for (r = sym->ns->use_stmts->rename; r; r = r->next)
- if (strcmp(r->use_name, sym->name) == 0) saw_sym = true;
+ if (strcmp(r->use_name, sym->name) == 0) saw_sym = true;
}
else
saw_sym = true;
diff --git a/gcc/fortran/simplify.c b/gcc/fortran/simplify.c
index ba010a0aebf..c7b7e1a8297 100644
--- a/gcc/fortran/simplify.c
+++ b/gcc/fortran/simplify.c
@@ -6576,8 +6576,7 @@ gfc_simplify_transfer (gfc_expr *source, gfc_expr *mold, gfc_expr *size)
return NULL;
/* Calculate the size of the source. */
- if (source->expr_type == EXPR_ARRAY
- && !gfc_array_size (source, &tmp))
+ if (source->expr_type == EXPR_ARRAY && !gfc_array_size (source, &tmp))
gfc_internal_error ("Failure getting length of a constant array.");
/* Create an empty new expression with the appropriate characteristics. */
@@ -6585,7 +6584,7 @@ gfc_simplify_transfer (gfc_expr *source, gfc_expr *mold, gfc_expr *size)
&source->where);
result->ts = mold->ts;
- mold_element = mold->expr_type == EXPR_ARRAY
+ mold_element = (mold->expr_type == EXPR_ARRAY && mold->value.constructor)
? gfc_constructor_first (mold->value.constructor)->expr
: mold;
diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c
index a357389ae64..93ce68e2a52 100644
--- a/gcc/fortran/trans-array.c
+++ b/gcc/fortran/trans-array.c
@@ -1034,7 +1034,7 @@ gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
gfc_add_expr_to_block (&do_copying, tmp);
was_packed = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, packed,
+ logical_type_node, packed,
source_data);
tmp = gfc_finish_block (&do_copying);
tmp = build3_v (COND_EXPR, was_packed, tmp,
@@ -1302,7 +1302,7 @@ gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
to[n], gfc_index_one_node);
/* Check whether the size for this dimension is negative. */
- cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
tmp, gfc_index_zero_node);
cond = gfc_evaluate_now (cond, pre);
@@ -1310,7 +1310,7 @@ gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
or_expr = cond;
else
or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, or_expr, cond);
+ logical_type_node, or_expr, cond);
size = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, size, tmp);
@@ -1570,7 +1570,7 @@ gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
/* Verify that all constructor elements are of the same
length. */
tree cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, first_len_val,
+ logical_type_node, first_len_val,
se->string_length);
gfc_trans_runtime_check
(true, false, cond, &se->pre, &expr->where,
@@ -1580,6 +1580,17 @@ gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
}
}
}
+ else if (GFC_CLASS_TYPE_P (TREE_TYPE (se->expr))
+ && !GFC_CLASS_TYPE_P (gfc_get_element_type (TREE_TYPE (desc))))
+ {
+ /* Assignment of a CLASS array constructor to a derived type array. */
+ if (expr->expr_type == EXPR_FUNCTION)
+ se->expr = gfc_evaluate_now (se->expr, pblock);
+ se->expr = gfc_class_data_get (se->expr);
+ se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
+ se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
+ gfc_add_modify (&se->pre, tmp, se->expr);
+ }
else
{
/* TODO: Should the frontend already have done this conversion? */
@@ -1901,14 +1912,14 @@ gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
/* Generate the exit condition. Depending on the sign of
the step variable we have to generate the correct
comparison. */
- tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
step, build_int_cst (TREE_TYPE (step), 0));
cond = fold_build3_loc (input_location, COND_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, shadow_loopvar, end),
+ logical_type_node, shadow_loopvar, end),
fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node, shadow_loopvar, end));
+ logical_type_node, shadow_loopvar, end));
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
tmp = build3_v (COND_EXPR, cond, tmp,
@@ -2416,7 +2427,7 @@ trans_array_constructor (gfc_ss * ss, locus * where)
/* Check if the character length is negative. If it is, then
set LEN = 0. */
neg_len = fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node, ss_info->string_length,
+ logical_type_node, ss_info->string_length,
build_int_cst (gfc_charlen_type_node, 0));
/* Print a warning if bounds checking is enabled. */
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
@@ -3054,13 +3065,13 @@ trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
msg = xasprintf ("Index '%%ld' of dimension %d "
"outside of expected range (%%ld:%%ld)", n+1);
- fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ fault = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
index, tmp_lo);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo),
fold_convert (long_integer_type_node, tmp_up));
- fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ fault = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
index, tmp_up);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
@@ -3079,7 +3090,7 @@ trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
msg = xasprintf ("Index '%%ld' of dimension %d "
"below lower bound of %%ld", n+1);
- fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ fault = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
index, tmp_lo);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
@@ -3586,7 +3597,7 @@ gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_expr *expr,
tmp = tmpse.expr;
}
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
indexse.expr, tmp);
msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
"below lower bound of %%ld", n+1, var_name);
@@ -3611,7 +3622,7 @@ gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_expr *expr,
}
cond = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, indexse.expr, tmp);
+ logical_type_node, indexse.expr, tmp);
msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
"above upper bound of %%ld", n+1, var_name);
gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
@@ -3879,7 +3890,7 @@ gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
OMP_FOR_INIT (stmt) = init;
/* The exit condition. */
TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
- boolean_type_node,
+ logical_type_node,
loop->loopvar[n], loop->to[n]);
SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
OMP_FOR_COND (stmt) = cond;
@@ -3914,7 +3925,7 @@ gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
/* The exit condition. */
cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
- boolean_type_node, loop->loopvar[n], loop->to[n]);
+ logical_type_node, loop->loopvar[n], loop->to[n]);
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
@@ -4346,7 +4357,7 @@ done:
check_upper = true;
/* Zero stride is not allowed. */
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
info->stride[dim], gfc_index_zero_node);
msg = xasprintf ("Zero stride is not allowed, for dimension %d "
"of array '%s'", dim + 1, expr_name);
@@ -4369,23 +4380,23 @@ done:
/* non_zerosized is true when the selected range is not
empty. */
stride_pos = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, info->stride[dim],
+ logical_type_node, info->stride[dim],
gfc_index_zero_node);
- tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
info->start[dim], end);
stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, stride_pos, tmp);
+ logical_type_node, stride_pos, tmp);
stride_neg = fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node,
+ logical_type_node,
info->stride[dim], gfc_index_zero_node);
- tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
info->start[dim], end);
stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
stride_neg, tmp);
non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node,
+ logical_type_node,
stride_pos, stride_neg);
/* Check the start of the range against the lower and upper
@@ -4395,16 +4406,16 @@ done:
if (check_upper)
{
tmp = fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node,
+ logical_type_node,
info->start[dim], lbound);
tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
non_zerosized, tmp);
tmp2 = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node,
+ logical_type_node,
info->start[dim], ubound);
tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
non_zerosized, tmp2);
msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
"outside of expected range (%%ld:%%ld)",
@@ -4424,10 +4435,10 @@ done:
else
{
tmp = fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node,
+ logical_type_node,
info->start[dim], lbound);
tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, non_zerosized, tmp);
+ logical_type_node, non_zerosized, tmp);
msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
"below lower bound of %%ld",
dim + 1, expr_name);
@@ -4451,15 +4462,15 @@ done:
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, end, tmp);
tmp2 = fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node, tmp, lbound);
+ logical_type_node, tmp, lbound);
tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, non_zerosized, tmp2);
+ logical_type_node, non_zerosized, tmp2);
if (check_upper)
{
tmp3 = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, tmp, ubound);
+ logical_type_node, tmp, ubound);
tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, non_zerosized, tmp3);
+ logical_type_node, non_zerosized, tmp3);
msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
"outside of expected range (%%ld:%%ld)",
dim + 1, expr_name);
@@ -4505,7 +4516,7 @@ done:
if (size[n])
{
tmp3 = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tmp, size[n]);
+ logical_type_node, tmp, size[n]);
msg = xasprintf ("Array bound mismatch for dimension %d "
"of array '%s' (%%ld/%%ld)",
dim + 1, expr_name);
@@ -5192,7 +5203,7 @@ gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
gfc_index_one_node);
/* Check whether the size for this dimension is negative. */
- cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
+ cond = fold_build2_loc (input_location, LE_EXPR, logical_type_node, res,
gfc_index_zero_node);
res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
gfc_index_zero_node, res);
@@ -5200,7 +5211,7 @@ gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
/* Build OR expression. */
if (or_expr)
*or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, *or_expr, cond);
+ logical_type_node, *or_expr, cond);
return res;
}
@@ -5329,7 +5340,7 @@ gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
gfc_add_modify (pblock, tmp, gfc_get_dtype (type));
}
- or_expr = boolean_false_node;
+ or_expr = logical_false_node;
for (n = 0; n < rank; n++)
{
@@ -5437,12 +5448,12 @@ gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
TYPE_MAX_VALUE (gfc_array_index_type)),
size);
cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node, tmp, stride),
+ logical_type_node, tmp, stride),
PRED_FORTRAN_OVERFLOW);
tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
integer_one_node, integer_zero_node);
cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, size,
+ logical_type_node, size,
gfc_index_zero_node),
PRED_FORTRAN_SIZE_ZERO);
tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
@@ -5538,12 +5549,12 @@ gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
size_type_node,
TYPE_MAX_VALUE (size_type_node), element_size);
cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
- boolean_type_node, tmp, stride),
+ logical_type_node, tmp, stride),
PRED_FORTRAN_OVERFLOW);
tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
integer_one_node, integer_zero_node);
cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, element_size,
+ logical_type_node, element_size,
build_int_cst (size_type_node, 0)),
PRED_FORTRAN_SIZE_ZERO);
tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
@@ -5801,7 +5812,7 @@ gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
if (dimension)
{
cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, var_overflow, integer_zero_node),
+ logical_type_node, var_overflow, integer_zero_node),
PRED_FORTRAN_OVERFLOW);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
error, gfc_finish_block (&elseblock));
@@ -5832,7 +5843,7 @@ gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
if (status != NULL_TREE)
{
cond = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, status,
+ logical_type_node, status,
build_int_cst (TREE_TYPE (status), 0));
gfc_add_expr_to_block (&se->pre,
fold_build3_loc (input_location, COND_EXPR, void_type_node,
@@ -6082,7 +6093,7 @@ gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
/* Make sure that negative size arrays are translated
to being zero size. */
- tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
stride, gfc_index_zero_node);
tmp = fold_build3_loc (input_location, COND_EXPR,
gfc_array_index_type, tmp,
@@ -6369,10 +6380,10 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
/* For non-constant shape arrays we only check if the first dimension
is contiguous. Repacking higher dimensions wouldn't gain us
anything as we still don't know the array stride. */
- partial = gfc_create_var (boolean_type_node, "partial");
+ partial = gfc_create_var (logical_type_node, "partial");
TREE_USED (partial) = 1;
tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, tmp,
gfc_index_one_node);
gfc_add_modify (&init, partial, tmp);
}
@@ -6387,7 +6398,7 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
stride = gfc_evaluate_now (stride, &init);
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
stride, gfc_index_zero_node);
tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
tmp, gfc_index_one_node, stride);
@@ -6628,7 +6639,7 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
else
tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
tmp = gfc_conv_descriptor_data_get (tmp);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tmp, tmpdesc);
stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
build_empty_stmt (input_location));
@@ -7911,12 +7922,12 @@ gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, bool g77,
tmp = build_fold_indirect_ref_loc (input_location,
desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
fold_convert (TREE_TYPE (tmp), ptr), tmp);
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
gfc_conv_expr_present (sym), tmp);
gfc_trans_runtime_check (false, true, tmp, &se->pre,
@@ -7946,12 +7957,12 @@ gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, bool g77,
tmp = build_fold_indirect_ref_loc (input_location,
desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
fold_convert (TREE_TYPE (tmp), ptr), tmp);
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
gfc_conv_expr_present (sym), tmp);
tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
@@ -8090,7 +8101,7 @@ duplicate_allocatable (tree dest, tree src, tree type, int rank,
null_cond = gfc_conv_descriptor_data_get (src);
null_cond = convert (pvoid_type_node, null_cond);
- null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ null_cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
null_cond, null_pointer_node);
return build3_v (COND_EXPR, null_cond, tmp, null_data);
}
@@ -8224,7 +8235,7 @@ duplicate_allocatable_coarray (tree dest, tree dest_tok, tree src,
null_cond = gfc_conv_descriptor_data_get (src);
null_cond = convert (pvoid_type_node, null_cond);
- null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ null_cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
null_cond, null_pointer_node);
gfc_add_expr_to_block (&globalblock, build3_v (COND_EXPR, null_cond, tmp,
null_data));
@@ -8339,7 +8350,7 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl,
null_cond = gfc_conv_descriptor_data_get (decl);
null_cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, null_cond,
+ logical_type_node, null_cond,
build_int_cst (TREE_TYPE (null_cond), 0));
}
else
@@ -8590,7 +8601,7 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl,
dealloc_fndecl);
tmp = build_int_cst (TREE_TYPE (comp), 0);
is_allocated = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
comp);
cdesc = gfc_build_addr_expr (NULL_TREE, cdesc);
@@ -8870,7 +8881,7 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl,
null_data = gfc_finish_block (&tmpblock);
null_cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, src_data,
+ logical_type_node, src_data,
null_pointer_node);
gfc_add_expr_to_block (&fnblock, build3_v (COND_EXPR, null_cond,
@@ -9132,7 +9143,7 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl,
{
tmp = gfc_conv_descriptor_data_get (comp);
null_cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
build_int_cst (TREE_TYPE (tmp), 0));
tmp = gfc_call_free (tmp);
tmp = build3_v (COND_EXPR, null_cond, tmp,
@@ -9143,7 +9154,7 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl,
else if (c->attr.pdt_string)
{
null_cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, comp,
+ logical_type_node, comp,
build_int_cst (TREE_TYPE (comp), 0));
tmp = gfc_call_free (comp);
tmp = build3_v (COND_EXPR, null_cond, tmp,
@@ -9190,7 +9201,7 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl,
tree error, cond, cname;
gfc_conv_expr_type (&tse, c_expr, TREE_TYPE (comp));
cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
comp, tse.expr);
cname = gfc_build_cstring_const (c->name);
cname = gfc_build_addr_expr (pchar_type_node, cname);
@@ -9350,25 +9361,25 @@ get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
stride = gfc_conv_descriptor_stride_get (desc, tmp);
- cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ cond1 = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
ubound, lbound);
- cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ cond3 = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
stride, gfc_index_zero_node);
cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, cond3, cond1);
- cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ logical_type_node, cond3, cond1);
+ cond4 = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
stride, gfc_index_zero_node);
if (assumed_size)
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
tmp, build_int_cst (gfc_array_index_type,
expr->rank - 1));
else
- cond = boolean_false_node;
+ cond = logical_false_node;
cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond3, cond4);
+ logical_type_node, cond3, cond4);
cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond, cond1);
+ logical_type_node, cond, cond1);
return fold_build3_loc (input_location, COND_EXPR,
gfc_array_index_type, cond,
@@ -9621,11 +9632,11 @@ gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
jump_label2 = gfc_build_label_decl (NULL_TREE);
/* Allocate if data is NULL. */
- cond_null = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond_null = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
array1, build_int_cst (TREE_TYPE (array1), 0));
if (expr1->ts.deferred)
- cond_null = gfc_evaluate_now (boolean_true_node, &fblock);
+ cond_null = gfc_evaluate_now (logical_true_node, &fblock);
else
cond_null= gfc_evaluate_now (cond_null, &fblock);
@@ -9665,7 +9676,7 @@ gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
gfc_array_index_type,
tmp, ubound);
cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
tmp, gfc_index_zero_node);
tmp = build3_v (COND_EXPR, cond,
build1_v (GOTO_EXPR, jump_label1),
@@ -9715,13 +9726,13 @@ gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
}
size2 = gfc_evaluate_now (size2, &fblock);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
size1, size2);
/* If the lhs is deferred length, assume that the element size
changes and force a reallocation. */
if (expr1->ts.deferred)
- neq_size = gfc_evaluate_now (boolean_true_node, &fblock);
+ neq_size = gfc_evaluate_now (logical_true_node, &fblock);
else
neq_size = gfc_evaluate_now (cond, &fblock);
@@ -10001,7 +10012,7 @@ gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
/* Malloc if not allocated; realloc otherwise. */
tmp = build_int_cst (TREE_TYPE (array1), 0);
cond = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node,
+ logical_type_node,
array1, tmp);
tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
gfc_add_expr_to_block (&fblock, tmp);
diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c
index 45d5119236a..60e7d8f79ee 100644
--- a/gcc/fortran/trans-decl.c
+++ b/gcc/fortran/trans-decl.c
@@ -4198,7 +4198,7 @@ gfc_trans_deferred_vars (gfc_symbol * proc_sym, gfc_wrapped_block * block)
break;
}
/* TODO: move to the appropriate place in resolve.c. */
- if (warn_return_type && el == NULL)
+ if (warn_return_type > 0 && el == NULL)
gfc_warning (OPT_Wreturn_type,
"Return value of function %qs at %L not set",
proc_sym->name, &proc_sym->declared_at);
@@ -5619,7 +5619,7 @@ generate_local_decl (gfc_symbol * sym)
else if (sym->attr.flavor == FL_PROCEDURE)
{
/* TODO: move to the appropriate place in resolve.c. */
- if (warn_return_type
+ if (warn_return_type > 0
&& sym->attr.function
&& sym->result
&& sym != sym->result
@@ -5784,7 +5784,7 @@ add_argument_checking (stmtblock_t *block, gfc_symbol *sym)
/* Build the condition. For optional arguments, an actual length
of 0 is also acceptable if the associated string is NULL, which
means the argument was not passed. */
- cond = fold_build2_loc (input_location, comparison, boolean_type_node,
+ cond = fold_build2_loc (input_location, comparison, logical_type_node,
cl->passed_length, cl->backend_decl);
if (fsym->attr.optional)
{
@@ -5793,7 +5793,7 @@ add_argument_checking (stmtblock_t *block, gfc_symbol *sym)
tree absent_failed;
not_0length = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
cl->passed_length,
build_zero_cst (gfc_charlen_type_node));
/* The symbol needs to be referenced for gfc_get_symbol_decl. */
@@ -5801,11 +5801,11 @@ add_argument_checking (stmtblock_t *block, gfc_symbol *sym)
not_absent = gfc_conv_expr_present (fsym);
absent_failed = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, not_0length,
+ logical_type_node, not_0length,
not_absent);
cond = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, cond, absent_failed);
+ logical_type_node, cond, absent_failed);
}
/* Build the runtime check. */
@@ -6376,13 +6376,13 @@ gfc_generate_function_code (gfc_namespace * ns)
msg = xasprintf ("Recursive call to nonrecursive procedure '%s'",
sym->name);
- recurcheckvar = gfc_create_var (boolean_type_node, "is_recursive");
+ recurcheckvar = gfc_create_var (logical_type_node, "is_recursive");
TREE_STATIC (recurcheckvar) = 1;
- DECL_INITIAL (recurcheckvar) = boolean_false_node;
+ DECL_INITIAL (recurcheckvar) = logical_false_node;
gfc_add_expr_to_block (&init, recurcheckvar);
gfc_trans_runtime_check (true, false, recurcheckvar, &init,
&sym->declared_at, msg);
- gfc_add_modify (&init, recurcheckvar, boolean_true_node);
+ gfc_add_modify (&init, recurcheckvar, logical_true_node);
free (msg);
}
@@ -6494,11 +6494,11 @@ gfc_generate_function_code (gfc_namespace * ns)
if (result == NULL_TREE || artificial_result_decl)
{
/* TODO: move to the appropriate place in resolve.c. */
- if (warn_return_type && sym == sym->result)
+ if (warn_return_type > 0 && sym == sym->result)
gfc_warning (OPT_Wreturn_type,
"Return value of function %qs at %L not set",
sym->name, &sym->declared_at);
- if (warn_return_type)
+ if (warn_return_type > 0)
TREE_NO_WARNING(sym->backend_decl) = 1;
}
if (result != NULL_TREE)
@@ -6511,7 +6511,7 @@ gfc_generate_function_code (gfc_namespace * ns)
if ((gfc_option.rtcheck & GFC_RTCHECK_RECURSION)
&& !is_recursive && !flag_openmp && recurcheckvar != NULL_TREE)
{
- gfc_add_modify (&cleanup, recurcheckvar, boolean_false_node);
+ gfc_add_modify (&cleanup, recurcheckvar, logical_false_node);
recurcheckvar = NULL;
}
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index 1a3e3d45e4c..c5e1d72bd04 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -1287,7 +1287,7 @@ gfc_copy_class_to_class (tree from, tree to, tree nelems, bool unlimited)
from_len = gfc_conv_descriptor_size (from_data, 1);
tmp = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, from_len, orig_nelems);
+ logical_type_node, from_len, orig_nelems);
msg = xasprintf ("Array bound mismatch for dimension %d "
"of array '%s' (%%ld/%%ld)",
1, name);
@@ -1338,7 +1338,7 @@ gfc_copy_class_to_class (tree from, tree to, tree nelems, bool unlimited)
extcopy = gfc_finish_block (&ifbody);
tmp = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, from_len,
+ logical_type_node, from_len,
integer_zero_node);
tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node, tmp, extcopy, stdcopy);
@@ -1366,7 +1366,7 @@ gfc_copy_class_to_class (tree from, tree to, tree nelems, bool unlimited)
vec_safe_push (args, to_len);
extcopy = build_call_vec (fcn_type, fcn, args);
tmp = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, from_len,
+ logical_type_node, from_len,
integer_zero_node);
tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node, tmp, extcopy, stdcopy);
@@ -1380,7 +1380,7 @@ gfc_copy_class_to_class (tree from, tree to, tree nelems, bool unlimited)
{
tree cond;
cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
from_data, null_pointer_node);
tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node, cond,
@@ -1425,7 +1425,7 @@ gfc_trans_class_array_init_assign (gfc_expr *rhs, gfc_expr *lhs, gfc_expr *obj)
gfc_init_se (&src, NULL);
gfc_conv_expr (&src, rhs);
src.expr = gfc_build_addr_expr (NULL_TREE, src.expr);
- tree cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tree cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
src.expr, fold_convert (TREE_TYPE (src.expr),
null_pointer_node));
res = build3_loc (input_location, COND_EXPR, TREE_TYPE (res), cond, res,
@@ -1492,7 +1492,7 @@ gfc_trans_class_init_assign (gfc_code *code)
{
/* Check if _def_init is non-NULL. */
tree cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, src.expr,
+ logical_type_node, src.expr,
fold_convert (TREE_TYPE (src.expr),
null_pointer_node));
tmp = build3_loc (input_location, COND_EXPR, TREE_TYPE (tmp), cond,
@@ -1662,7 +1662,7 @@ gfc_conv_expr_present (gfc_symbol * sym)
decl = GFC_DECL_SAVED_DESCRIPTOR (decl);
}
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, decl,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node, decl,
fold_convert (TREE_TYPE (decl), null_pointer_node));
/* Fortran 2008 allows to pass null pointers and non-associated pointers
@@ -1699,10 +1699,10 @@ gfc_conv_expr_present (gfc_symbol * sym)
if (tmp != NULL_TREE)
{
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node, tmp,
fold_convert (TREE_TYPE (tmp), null_pointer_node));
cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, cond, tmp);
+ logical_type_node, cond, tmp);
}
}
@@ -2264,15 +2264,15 @@ gfc_conv_substring (gfc_se * se, gfc_ref * ref, int kind,
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
{
tree nonempty = fold_build2_loc (input_location, LE_EXPR,
- boolean_type_node, start.expr,
+ logical_type_node, start.expr,
end.expr);
/* Check lower bound. */
- fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ fault = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
start.expr,
build_int_cst (gfc_charlen_type_node, 1));
fault = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, nonempty, fault);
+ logical_type_node, nonempty, fault);
if (name)
msg = xasprintf ("Substring out of bounds: lower bound (%%ld) of '%s' "
"is less than one", name);
@@ -2285,10 +2285,10 @@ gfc_conv_substring (gfc_se * se, gfc_ref * ref, int kind,
free (msg);
/* Check upper bound. */
- fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ fault = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
end.expr, se->string_length);
fault = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, nonempty, fault);
+ logical_type_node, nonempty, fault);
if (name)
msg = xasprintf ("Substring out of bounds: upper bound (%%ld) of '%s' "
"exceeds string length (%%ld)", name);
@@ -2890,9 +2890,9 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs)
/* If rhs < 0 and lhs is an integer, the result is -1, 0 or 1. */
if ((sgn == -1) && (TREE_CODE (type) == INTEGER_TYPE))
{
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
lhs, build_int_cst (TREE_TYPE (lhs), -1));
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
lhs, build_int_cst (TREE_TYPE (lhs), 1));
/* If rhs is even,
@@ -2900,7 +2900,7 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs)
if ((n & 1) == 0)
{
tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, tmp, cond);
+ logical_type_node, tmp, cond);
se->expr = fold_build3_loc (input_location, COND_EXPR, type,
tmp, build_int_cst (type, 1),
build_int_cst (type, 0));
@@ -3386,8 +3386,8 @@ gfc_conv_expr_op (gfc_se * se, gfc_expr * expr)
if (lop)
{
- /* The result of logical ops is always boolean_type_node. */
- tmp = fold_build2_loc (input_location, code, boolean_type_node,
+ /* The result of logical ops is always logical_type_node. */
+ tmp = fold_build2_loc (input_location, code, logical_type_node,
lse.expr, rse.expr);
se->expr = convert (type, tmp);
}
@@ -4178,9 +4178,7 @@ gfc_map_intrinsic_function (gfc_expr *expr, gfc_interface_mapping *mapping)
if (arg2 && arg2->expr_type == EXPR_CONSTANT)
d = mpz_get_si (arg2->value.integer) - 1;
else
- /* TODO: If the need arises, this could produce an array of
- ubound/lbounds. */
- gcc_unreachable ();
+ return false;
if (expr->value.function.isym->id == GFC_ISYM_LBOUND)
{
@@ -4987,7 +4985,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
tree descriptor_data;
descriptor_data = ss->info->data.array.data;
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
descriptor_data,
fold_convert (TREE_TYPE (descriptor_data),
null_pointer_node));
@@ -5151,7 +5149,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
tree cond;
tmp = gfc_build_addr_expr (NULL_TREE, parmse.expr);
cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
fold_convert (TREE_TYPE (tmp),
null_pointer_node));
gfc_start_block (&block);
@@ -5683,16 +5681,16 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
present = gfc_conv_expr_present (e->symtree->n.sym);
type = TREE_TYPE (present);
present = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, present,
+ logical_type_node, present,
fold_convert (type,
null_pointer_node));
type = TREE_TYPE (parmse.expr);
null_ptr = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, parmse.expr,
+ logical_type_node, parmse.expr,
fold_convert (type,
null_pointer_node));
cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
- boolean_type_node, present, null_ptr);
+ logical_type_node, present, null_ptr);
}
else
{
@@ -5719,7 +5717,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
cond = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
fold_convert (TREE_TYPE (tmp),
null_pointer_node));
}
@@ -6215,7 +6213,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
happen in a function returning a pointer. */
tmp = gfc_conv_descriptor_data_get (info->descriptor);
tmp = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
tmp, info->data);
gfc_trans_runtime_check (true, false, tmp, &se->pre, NULL,
gfc_msg_fault);
@@ -6341,7 +6339,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
final_fndecl = gfc_class_vtab_final_get (se->expr);
is_final = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
final_fndecl,
fold_convert (TREE_TYPE (final_fndecl),
null_pointer_node));
@@ -6415,7 +6413,7 @@ fill_with_spaces (tree start, tree type, tree size)
gfc_init_block (&loop);
/* Exit condition. */
- cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, i,
+ cond = fold_build2_loc (input_location, LE_EXPR, logical_type_node, i,
build_zero_cst (sizetype));
tmp = build1_v (GOTO_EXPR, exit_label);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp,
@@ -6508,7 +6506,7 @@ gfc_trans_string_copy (stmtblock_t * block, tree dlength, tree dest,
*/
/* Do nothing if the destination length is zero. */
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, dlen,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node, dlen,
build_int_cst (size_type_node, 0));
/* For non-default character kinds, we have to multiply the string
@@ -6544,7 +6542,7 @@ gfc_trans_string_copy (stmtblock_t * block, tree dlength, tree dest,
gfc_add_expr_to_block (&tmpblock2, tmp2);
/* If the destination is longer, fill the end with spaces. */
- cond2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, slen,
+ cond2 = fold_build2_loc (input_location, LT_EXPR, logical_type_node, slen,
dlen);
/* Wstringop-overflow appears at -O3 even though this warning is not
@@ -7129,7 +7127,7 @@ gfc_trans_alloc_subarray_assign (tree dest, gfc_component * cm,
null_pointer_node);
null_expr = gfc_finish_block (&block);
tmp = gfc_conv_descriptor_data_get (arg->symtree->n.sym->backend_decl);
- tmp = build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
+ tmp = build2_loc (input_location, EQ_EXPR, logical_type_node, tmp,
fold_convert (TREE_TYPE (tmp), null_pointer_node));
return build3_v (COND_EXPR, tmp,
null_expr, non_null_expr);
@@ -8686,7 +8684,7 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
lsize = gfc_evaluate_now (lsize, &block);
rsize = gfc_evaluate_now (rsize, &block);
- fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ fault = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
rsize, lsize);
msg = _("Target of rank remapping is too small (%ld < %ld)");
@@ -8805,7 +8803,7 @@ gfc_trans_scalar_assign (gfc_se * lse, gfc_se * rse, gfc_typespec ts,
/* Are the rhs and the lhs the same? */
if (deep_copy)
{
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
gfc_build_addr_expr (NULL_TREE, lse->expr),
gfc_build_addr_expr (NULL_TREE, rse->expr));
cond = gfc_evaluate_now (cond, &lse->pre);
@@ -9080,7 +9078,7 @@ fcncall_realloc_result (gfc_se *se, int rank)
the lhs descriptor. */
tmp = gfc_conv_descriptor_data_get (desc);
zero_cond = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
build_int_cst (TREE_TYPE (tmp), 0));
zero_cond = gfc_evaluate_now (zero_cond, &se->post);
tmp = gfc_call_free (tmp);
@@ -9104,11 +9102,11 @@ fcncall_realloc_result (gfc_se *se, int rank)
tmp = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, tmp, tmp1);
tmp = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
gfc_index_zero_node);
tmp = gfc_evaluate_now (tmp, &se->post);
zero_cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
zero_cond);
}
@@ -9547,7 +9545,7 @@ alloc_scalar_allocatable_for_assignment (stmtblock_t *block,
/* Do the allocation if the lhs is NULL. Otherwise go to label 1. */
tmp = build_int_cst (TREE_TYPE (lse.expr), 0);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
lse.expr, tmp);
tmp = build3_v (COND_EXPR, cond,
build1_v (GOTO_EXPR, jump_label1),
@@ -9625,7 +9623,7 @@ alloc_scalar_allocatable_for_assignment (stmtblock_t *block,
rhs are different. */
if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
{
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
lse.string_length, size);
/* Jump past the realloc if the lengths are the same. */
tmp = build3_v (COND_EXPR, cond,
@@ -9771,7 +9769,7 @@ trans_class_assignment (stmtblock_t *block, gfc_expr *lhs, gfc_expr *rhs,
gfc_init_block (&alloc);
gfc_allocate_using_malloc (&alloc, class_han, tmp, NULL_TREE);
tmp = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, class_han,
+ logical_type_node, class_han,
build_int_cst (prvoid_type_node, 0));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (tmp,
@@ -9824,7 +9822,7 @@ trans_class_assignment (stmtblock_t *block, gfc_expr *lhs, gfc_expr *rhs,
extcopy = build_call_vec (TREE_TYPE (TREE_TYPE (fcn)), fcn, args);
tmp = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, from_len,
+ logical_type_node, from_len,
integer_zero_node);
return fold_build3_loc (input_location, COND_EXPR,
void_type_node, tmp,
@@ -10053,7 +10051,7 @@ gfc_trans_assignment_1 (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
if (TREE_CODE (lse.expr) == ARRAY_REF)
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
tmp, build_int_cst (TREE_TYPE (tmp), 0));
msg = _("Assignment of scalar to unallocated array");
gfc_trans_runtime_check (true, false, cond, &loop.pre,
@@ -10084,12 +10082,16 @@ gfc_trans_assignment_1 (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
NOTE: This relies on having the exact dependence of the length type
parameter available to the caller; gfortran saves it in the .mod files.
NOTE ALSO: The concatenation operation generates a temporary pointer,
- whose allocation must go to the innermost loop. */
+ whose allocation must go to the innermost loop.
+ NOTE ALSO (2): A character conversion may generate a temporary, too. */
if (flag_realloc_lhs
&& expr2->ts.type == BT_CHARACTER && expr1->ts.deferred
&& !(lss != gfc_ss_terminator
- && expr2->expr_type == EXPR_OP
- && expr2->value.op.op == INTRINSIC_CONCAT))
+ && ((expr2->expr_type == EXPR_OP
+ && expr2->value.op.op == INTRINSIC_CONCAT)
+ || (expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym != NULL
+ && expr2->value.function.isym->id == GFC_ISYM_CONVERSION))))
gfc_add_block_to_block (&block, &rse.pre);
/* Nullify the allocatable components corresponding to those of the lhs
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index 532d3ab237d..ed4496c845d 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -358,7 +358,7 @@ build_fixbound_expr (stmtblock_t * pblock, tree arg, tree type, int up)
tmp = convert (argtype, intval);
cond = fold_build2_loc (input_location, up ? GE_EXPR : LE_EXPR,
- boolean_type_node, tmp, arg);
+ logical_type_node, tmp, arg);
tmp = fold_build2_loc (input_location, up ? PLUS_EXPR : MINUS_EXPR, type,
intval, build_int_cst (type, 1));
@@ -490,14 +490,14 @@ gfc_conv_intrinsic_aint (gfc_se * se, gfc_expr * expr, enum rounding_mode op)
n = gfc_validate_kind (BT_INTEGER, kind, false);
mpfr_set_z (huge, gfc_integer_kinds[n].huge, GFC_RND_MODE);
tmp = gfc_conv_mpfr_to_tree (huge, kind, 0);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, arg[0],
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, arg[0],
tmp);
mpfr_neg (huge, huge, GFC_RND_MODE);
tmp = gfc_conv_mpfr_to_tree (huge, kind, 0);
- tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, arg[0],
+ tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node, arg[0],
tmp);
- cond = fold_build2_loc (input_location, TRUTH_AND_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, TRUTH_AND_EXPR, logical_type_node,
cond, tmp);
itype = gfc_get_int_type (kind);
@@ -885,7 +885,7 @@ gfc_trans_same_strlen_check (const char* intr_name, locus* where,
return;
/* Compare the two string lengths. */
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, a, b);
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node, a, b);
/* Output the runtime-check. */
name = gfc_build_cstring_const (intr_name);
@@ -1871,12 +1871,21 @@ conv_caf_send (gfc_code *code) {
gfc_init_se (&lhs_se, NULL);
if (lhs_expr->rank == 0)
{
- symbol_attribute attr;
- gfc_clear_attr (&attr);
- gfc_conv_expr (&lhs_se, lhs_expr);
- lhs_type = TREE_TYPE (lhs_se.expr);
- lhs_se.expr = gfc_conv_scalar_to_descriptor (&lhs_se, lhs_se.expr, attr);
- lhs_se.expr = gfc_build_addr_expr (NULL_TREE, lhs_se.expr);
+ if (lhs_expr->ts.type == BT_CHARACTER && lhs_expr->ts.deferred)
+ {
+ lhs_se.expr = gfc_get_tree_for_caf_expr (lhs_expr);
+ lhs_se.expr = gfc_build_addr_expr (NULL_TREE, lhs_se.expr);
+ }
+ else
+ {
+ symbol_attribute attr;
+ gfc_clear_attr (&attr);
+ gfc_conv_expr (&lhs_se, lhs_expr);
+ lhs_type = TREE_TYPE (lhs_se.expr);
+ lhs_se.expr = gfc_conv_scalar_to_descriptor (&lhs_se, lhs_se.expr,
+ attr);
+ lhs_se.expr = gfc_build_addr_expr (NULL_TREE, lhs_se.expr);
+ }
}
else if ((lhs_caf_attr.alloc_comp || lhs_caf_attr.pointer_comp)
&& lhs_caf_attr.codimension)
@@ -1952,7 +1961,7 @@ conv_caf_send (gfc_code *code) {
TYPE_SIZE_UNIT (
gfc_typenode_for_spec (&lhs_expr->ts)),
NULL_TREE);
- tmp = fold_build2 (EQ_EXPR, boolean_type_node, scal_se.expr,
+ tmp = fold_build2 (EQ_EXPR, logical_type_node, scal_se.expr,
null_pointer_node);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
tmp, gfc_finish_block (&scal_se.pre),
@@ -2245,14 +2254,14 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
{
dim_arg = gfc_evaluate_now (dim_arg, &se->pre);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
dim_arg,
build_int_cst (TREE_TYPE (dim_arg), 1));
tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))];
- tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
dim_arg, tmp);
cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
- boolean_type_node, cond, tmp);
+ logical_type_node, cond, tmp);
gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where,
gfc_msg_fault);
}
@@ -2343,7 +2352,7 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
m, extent));
/* Exit condition: if (i >= min_var) goto exit_label. */
- cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, loop_var,
+ cond = fold_build2_loc (input_location, GE_EXPR, logical_type_node, loop_var,
min_var);
tmp = build1_v (GOTO_EXPR, exit_label);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp,
@@ -2368,7 +2377,7 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
/* sub(co_dim) = (co_dim < corank) ? ml - m*extent + lcobound(dim_arg)
: m + lcobound(corank) */
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, dim_arg,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, dim_arg,
build_int_cst (TREE_TYPE (dim_arg), corank));
lbound = gfc_conv_descriptor_lbound_get (desc,
@@ -2406,7 +2415,7 @@ conv_intrinsic_image_status (gfc_se *se, gfc_expr *expr)
{
tree arg;
arg = gfc_evaluate_now (args[0], &se->pre);
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
fold_convert (integer_type_node, arg),
integer_one_node);
tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node,
@@ -2457,7 +2466,7 @@ trans_image_index (gfc_se * se, gfc_expr *expr)
lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]);
tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], NULL);
- invalid_bound = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ invalid_bound = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
fold_convert (gfc_array_index_type, tmp),
lbound);
@@ -2466,16 +2475,16 @@ trans_image_index (gfc_se * se, gfc_expr *expr)
lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]);
ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]);
tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
fold_convert (gfc_array_index_type, tmp),
lbound);
invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, invalid_bound, cond);
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ logical_type_node, invalid_bound, cond);
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
fold_convert (gfc_array_index_type, tmp),
ubound);
invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, invalid_bound, cond);
+ logical_type_node, invalid_bound, cond);
}
invalid_bound = gfc_unlikely (invalid_bound, PRED_FORTRAN_INVALID_BOUND);
@@ -2535,11 +2544,11 @@ trans_image_index (gfc_se * se, gfc_expr *expr)
tmp = gfc_create_var (type, NULL);
gfc_add_modify (&se->pre, tmp, coindex);
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, tmp,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node, tmp,
num_images);
- cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, logical_type_node,
cond,
- fold_convert (boolean_type_node, invalid_bound));
+ fold_convert (logical_type_node, invalid_bound));
se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond,
build_int_cst (type, 0), tmp);
}
@@ -2671,16 +2680,16 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
{
bound = gfc_evaluate_now (bound, &se->pre);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
bound, build_int_cst (TREE_TYPE (bound), 0));
if (as && as->type == AS_ASSUMED_RANK)
tmp = gfc_conv_descriptor_rank (desc);
else
tmp = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))];
- tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
bound, fold_convert(TREE_TYPE (bound), tmp));
cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
- boolean_type_node, cond, tmp);
+ logical_type_node, cond, tmp);
gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where,
gfc_msg_fault);
}
@@ -2726,27 +2735,27 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
{
tree stride = gfc_conv_descriptor_stride_get (desc, bound);
- cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ cond1 = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
ubound, lbound);
- cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ cond3 = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
stride, gfc_index_zero_node);
cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, cond3, cond1);
- cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ logical_type_node, cond3, cond1);
+ cond4 = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
stride, gfc_index_zero_node);
if (upper)
{
tree cond5;
cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond3, cond4);
- cond5 = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ logical_type_node, cond3, cond4);
+ cond5 = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
gfc_index_one_node, lbound);
cond5 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, cond4, cond5);
+ logical_type_node, cond4, cond5);
cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond, cond5);
+ logical_type_node, cond, cond5);
if (assumed_rank_lb_one)
{
@@ -2765,16 +2774,16 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
else
{
if (as->type == AS_ASSUMED_SIZE)
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
bound, build_int_cst (TREE_TYPE (bound),
arg->expr->rank - 1));
else
- cond = boolean_false_node;
+ cond = logical_false_node;
cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond3, cond4);
+ logical_type_node, cond3, cond4);
cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond, cond1);
+ logical_type_node, cond, cond1);
se->expr = fold_build3_loc (input_location, COND_EXPR,
gfc_array_index_type, cond,
@@ -2865,13 +2874,13 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr)
else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
{
bound = gfc_evaluate_now (bound, &se->pre);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
bound, build_int_cst (TREE_TYPE (bound), 1));
tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))];
- tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
bound, tmp);
cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
- boolean_type_node, cond, tmp);
+ logical_type_node, cond, tmp);
gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where,
gfc_msg_fault);
}
@@ -2940,7 +2949,7 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr)
if (corank > 1)
{
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
bound,
build_int_cst (TREE_TYPE (bound),
arg->expr->rank + corank - 1));
@@ -3129,16 +3138,16 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo)
tmp = gfc_evaluate_now (se->expr, &se->pre);
if (!flag_signed_zeros)
{
- test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ test = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
args[0], zero);
- test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ test2 = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
args[1], zero);
test2 = fold_build2_loc (input_location, TRUTH_XOR_EXPR,
- boolean_type_node, test, test2);
- test = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ logical_type_node, test, test2);
+ test = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tmp, zero);
test = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, test, test2);
+ logical_type_node, test, test2);
test = gfc_evaluate_now (test, &se->pre);
se->expr = fold_build3_loc (input_location, COND_EXPR, type, test,
fold_build2_loc (input_location,
@@ -3151,18 +3160,18 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo)
tree expr1, copysign, cscall;
copysign = gfc_builtin_decl_for_float_kind (BUILT_IN_COPYSIGN,
expr->ts.kind);
- test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ test = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
args[0], zero);
- test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ test2 = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
args[1], zero);
test2 = fold_build2_loc (input_location, TRUTH_XOR_EXPR,
- boolean_type_node, test, test2);
+ logical_type_node, test, test2);
expr1 = fold_build3_loc (input_location, COND_EXPR, type, test2,
fold_build2_loc (input_location,
PLUS_EXPR,
type, tmp, args[1]),
tmp);
- test = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ test = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tmp, zero);
cscall = build_call_expr_loc (input_location, copysign, 2, zero,
args[1]);
@@ -3218,12 +3227,12 @@ gfc_conv_intrinsic_dshift (gfc_se * se, gfc_expr * expr, bool dshiftl)
res = fold_build2_loc (input_location, BIT_IOR_EXPR, type, left, right);
/* Special cases. */
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, shift,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, shift,
build_int_cst (stype, 0));
res = fold_build3_loc (input_location, COND_EXPR, type, cond,
dshiftl ? arg1 : arg2, res);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, shift,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, shift,
build_int_cst (stype, bitsize));
res = fold_build3_loc (input_location, COND_EXPR, type, cond,
dshiftl ? arg2 : arg1, res);
@@ -3250,7 +3259,7 @@ gfc_conv_intrinsic_dim (gfc_se * se, gfc_expr * expr)
val = gfc_evaluate_now (val, &se->pre);
zero = gfc_build_const (type, integer_zero_node);
- tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, val, zero);
+ tmp = fold_build2_loc (input_location, LE_EXPR, logical_type_node, val, zero);
se->expr = fold_build3_loc (input_location, COND_EXPR, type, tmp, zero, val);
}
@@ -3283,7 +3292,7 @@ gfc_conv_intrinsic_sign (gfc_se * se, gfc_expr * expr)
{
tree cond, zero;
zero = build_real_from_int_cst (TREE_TYPE (args[1]), integer_zero_node);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
args[1], zero);
se->expr = fold_build3_loc (input_location, COND_EXPR,
TREE_TYPE (args[0]), cond,
@@ -3404,7 +3413,7 @@ gfc_conv_intrinsic_ctime (gfc_se * se, gfc_expr * expr)
gfc_add_expr_to_block (&se->pre, tmp);
/* Free the temporary afterwards, if necessary. */
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
len, build_int_cst (TREE_TYPE (len), 0));
tmp = gfc_call_free (var);
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
@@ -3443,7 +3452,7 @@ gfc_conv_intrinsic_fdate (gfc_se * se, gfc_expr * expr)
gfc_add_expr_to_block (&se->pre, tmp);
/* Free the temporary afterwards, if necessary. */
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
len, build_int_cst (TREE_TYPE (len), 0));
tmp = gfc_call_free (var);
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
@@ -3653,7 +3662,7 @@ gfc_conv_intrinsic_ttynam (gfc_se * se, gfc_expr * expr)
gfc_add_expr_to_block (&se->pre, tmp);
/* Free the temporary afterwards, if necessary. */
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
len, build_int_cst (TREE_TYPE (len), 0));
tmp = gfc_call_free (var);
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
@@ -3717,7 +3726,7 @@ gfc_conv_intrinsic_minmax (gfc_se * se, gfc_expr * expr, enum tree_code op)
&& argexpr->expr->symtree->n.sym->attr.optional
&& TREE_CODE (val) == INDIRECT_REF)
cond = fold_build2_loc (input_location,
- NE_EXPR, boolean_type_node,
+ NE_EXPR, logical_type_node,
TREE_OPERAND (val, 0),
build_int_cst (TREE_TYPE (TREE_OPERAND (val, 0)), 0));
else
@@ -3731,7 +3740,7 @@ gfc_conv_intrinsic_minmax (gfc_se * se, gfc_expr * expr, enum tree_code op)
thencase = build2_v (MODIFY_EXPR, mvar, convert (type, val));
- tmp = fold_build2_loc (input_location, op, boolean_type_node,
+ tmp = fold_build2_loc (input_location, op, logical_type_node,
convert (type, val), mvar);
/* FIXME: When the IEEE_ARITHMETIC module is implemented, the call to
@@ -3743,8 +3752,8 @@ gfc_conv_intrinsic_minmax (gfc_se * se, gfc_expr * expr, enum tree_code op)
builtin_decl_explicit (BUILT_IN_ISNAN),
1, mvar);
tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, tmp,
- fold_convert (boolean_type_node, isnan));
+ logical_type_node, tmp,
+ fold_convert (logical_type_node, isnan));
}
tmp = build3_v (COND_EXPR, tmp, thencase,
build_empty_stmt (input_location));
@@ -3796,7 +3805,7 @@ gfc_conv_intrinsic_minmax_char (gfc_se * se, gfc_expr * expr, int op)
gfc_add_expr_to_block (&se->pre, tmp);
/* Free the temporary afterwards, if necessary. */
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
len, build_int_cst (TREE_TYPE (len), 0));
tmp = gfc_call_free (var);
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
@@ -3996,7 +4005,7 @@ gfc_conv_intrinsic_anyall (gfc_se * se, gfc_expr * expr, enum tree_code op)
gfc_conv_expr_val (&arrayse, actual->expr);
gfc_add_block_to_block (&body, &arrayse.pre);
- tmp = fold_build2_loc (input_location, op, boolean_type_node, arrayse.expr,
+ tmp = fold_build2_loc (input_location, op, logical_type_node, arrayse.expr,
build_int_cst (TREE_TYPE (arrayse.expr), 0));
tmp = build3_v (COND_EXPR, tmp, found, build_empty_stmt (input_location));
gfc_add_expr_to_block (&body, tmp);
@@ -4275,13 +4284,13 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op,
gfc_add_modify (&ifblock3, resvar, res2);
res2 = gfc_finish_block (&ifblock3);
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
absX, scale);
tmp = build3_v (COND_EXPR, cond, res1, res2);
gfc_add_expr_to_block (&ifblock1, tmp);
tmp = gfc_finish_block (&ifblock1);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
arrayse.expr,
gfc_build_const (type, integer_zero_node));
@@ -4587,7 +4596,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
nonempty = gfc_conv_mpz_to_tree (asize, gfc_index_integer_kind);
mpz_clear (asize);
nonempty = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, nonempty,
+ logical_type_node, nonempty,
gfc_index_zero_node);
}
maskss = NULL;
@@ -4651,7 +4660,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
gcc_assert (loop.dimen == 1);
if (nonempty == NULL && maskss == NULL && loop.from[0] && loop.to[0])
- nonempty = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ nonempty = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
loop.from[0], loop.to[0]);
lab1 = NULL;
@@ -4727,7 +4736,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
loop.loopvar[0], offset);
gfc_add_modify (&ifblock2, pos, tmp);
ifbody2 = gfc_finish_block (&ifblock2);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, pos,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, pos,
gfc_index_zero_node);
tmp = build3_v (COND_EXPR, cond, ifbody2,
build_empty_stmt (input_location));
@@ -4748,9 +4757,9 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
if (lab1)
cond = fold_build2_loc (input_location,
op == GT_EXPR ? GE_EXPR : LE_EXPR,
- boolean_type_node, arrayse.expr, limit);
+ logical_type_node, arrayse.expr, limit);
else
- cond = fold_build2_loc (input_location, op, boolean_type_node,
+ cond = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
ifbody = build3_v (COND_EXPR, cond, ifbody,
@@ -4821,7 +4830,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
ifbody = gfc_finish_block (&ifblock);
- cond = fold_build2_loc (input_location, op, boolean_type_node,
+ cond = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
tmp = build3_v (COND_EXPR, cond, ifbody,
@@ -5073,7 +5082,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
nonempty = gfc_conv_mpz_to_tree (asize, gfc_index_integer_kind);
mpz_clear (asize);
nonempty = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, nonempty,
+ logical_type_node, nonempty,
gfc_index_zero_node);
}
maskss = NULL;
@@ -5107,15 +5116,15 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
if (nonempty == NULL && maskss == NULL
&& loop.dimen == 1 && loop.from[0] && loop.to[0])
- nonempty = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ nonempty = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
loop.from[0], loop.to[0]);
nonempty_var = NULL;
if (nonempty == NULL
&& (HONOR_INFINITIES (DECL_MODE (limit))
|| HONOR_NANS (DECL_MODE (limit))))
{
- nonempty_var = gfc_create_var (boolean_type_node, "nonempty");
- gfc_add_modify (&se->pre, nonempty_var, boolean_false_node);
+ nonempty_var = gfc_create_var (logical_type_node, "nonempty");
+ gfc_add_modify (&se->pre, nonempty_var, logical_false_node);
nonempty = nonempty_var;
}
lab = NULL;
@@ -5129,8 +5138,8 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
}
else
{
- fast = gfc_create_var (boolean_type_node, "fast");
- gfc_add_modify (&se->pre, fast, boolean_false_node);
+ fast = gfc_create_var (logical_type_node, "fast");
+ gfc_add_modify (&se->pre, fast, logical_false_node);
}
}
@@ -5164,12 +5173,12 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
gfc_init_block (&block2);
if (nonempty_var)
- gfc_add_modify (&block2, nonempty_var, boolean_true_node);
+ gfc_add_modify (&block2, nonempty_var, logical_true_node);
if (HONOR_NANS (DECL_MODE (limit)))
{
tmp = fold_build2_loc (input_location, op == GT_EXPR ? GE_EXPR : LE_EXPR,
- boolean_type_node, arrayse.expr, limit);
+ logical_type_node, arrayse.expr, limit);
if (lab)
ifbody = build1_v (GOTO_EXPR, lab);
else
@@ -5178,7 +5187,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
gfc_init_block (&ifblock);
gfc_add_modify (&ifblock, limit, arrayse.expr);
- gfc_add_modify (&ifblock, fast, boolean_true_node);
+ gfc_add_modify (&ifblock, fast, logical_true_node);
ifbody = gfc_finish_block (&ifblock);
}
tmp = build3_v (COND_EXPR, tmp, ifbody,
@@ -5191,7 +5200,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
signed zeros. */
if (HONOR_SIGNED_ZEROS (DECL_MODE (limit)))
{
- tmp = fold_build2_loc (input_location, op, boolean_type_node,
+ tmp = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr);
tmp = build3_v (COND_EXPR, tmp, ifbody,
@@ -5216,7 +5225,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
if (HONOR_NANS (DECL_MODE (limit))
|| HONOR_SIGNED_ZEROS (DECL_MODE (limit)))
{
- tmp = fold_build2_loc (input_location, op, boolean_type_node,
+ tmp = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr);
ifbody = build3_v (COND_EXPR, tmp, ifbody,
@@ -5279,7 +5288,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
if (HONOR_NANS (DECL_MODE (limit))
|| HONOR_SIGNED_ZEROS (DECL_MODE (limit)))
{
- tmp = fold_build2_loc (input_location, op, boolean_type_node,
+ tmp = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr);
tmp = build3_v (COND_EXPR, tmp, ifbody,
@@ -5369,7 +5378,7 @@ gfc_conv_intrinsic_btest (gfc_se * se, gfc_expr * expr)
tmp = fold_build2_loc (input_location, LSHIFT_EXPR, type,
build_int_cst (type, 1), args[1]);
tmp = fold_build2_loc (input_location, BIT_AND_EXPR, type, args[0], tmp);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node, tmp,
build_int_cst (type, 0));
type = gfc_typenode_for_spec (&expr->ts);
se->expr = convert (type, tmp);
@@ -5397,7 +5406,7 @@ gfc_conv_intrinsic_bitcomp (gfc_se * se, gfc_expr * expr, enum tree_code op)
args[0] = fold_convert (TREE_TYPE (args[1]), args[0]);
/* Now, we compare them. */
- se->expr = fold_build2_loc (input_location, op, boolean_type_node,
+ se->expr = fold_build2_loc (input_location, op, logical_type_node,
args[0], args[1]);
}
@@ -5498,7 +5507,7 @@ gfc_conv_intrinsic_shift (gfc_se * se, gfc_expr * expr, bool right_shift,
gcc requires a shift width < BIT_SIZE(I), so we have to catch this
special case. */
num_bits = build_int_cst (TREE_TYPE (args[1]), TYPE_PRECISION (type));
- cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
args[1], num_bits);
se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond,
@@ -5544,7 +5553,7 @@ gfc_conv_intrinsic_ishft (gfc_se * se, gfc_expr * expr)
rshift = fold_convert (type, fold_build2_loc (input_location, RSHIFT_EXPR,
utype, convert (utype, args[0]), width));
- tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, args[1],
+ tmp = fold_build2_loc (input_location, GE_EXPR, logical_type_node, args[1],
build_int_cst (TREE_TYPE (args[1]), 0));
tmp = fold_build3_loc (input_location, COND_EXPR, type, tmp, lshift, rshift);
@@ -5552,7 +5561,7 @@ gfc_conv_intrinsic_ishft (gfc_se * se, gfc_expr * expr)
gcc requires a shift width < BIT_SIZE(I), so we have to catch this
special case. */
num_bits = build_int_cst (TREE_TYPE (args[1]), TYPE_PRECISION (type));
- cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, width,
+ cond = fold_build2_loc (input_location, GE_EXPR, logical_type_node, width,
num_bits);
se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond,
build_int_cst (type, 0), tmp);
@@ -5636,12 +5645,12 @@ gfc_conv_intrinsic_ishftc (gfc_se * se, gfc_expr * expr)
rrot = fold_build2_loc (input_location,RROTATE_EXPR, type, args[0], tmp);
zero = build_int_cst (TREE_TYPE (args[1]), 0);
- tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, args[1],
+ tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node, args[1],
zero);
rrot = fold_build3_loc (input_location, COND_EXPR, type, tmp, lrot, rrot);
/* Do nothing if shift == 0. */
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, args[1],
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, args[1],
zero);
se->expr = fold_build3_loc (input_location, COND_EXPR, type, tmp, args[0],
rrot);
@@ -5739,7 +5748,7 @@ gfc_conv_intrinsic_leadz (gfc_se * se, gfc_expr * expr)
fold_convert (arg_type, ullmax), ullsize);
cond = fold_build2_loc (input_location, BIT_AND_EXPR, arg_type,
arg, cond);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
cond, build_int_cst (arg_type, 0));
tmp1 = fold_build2_loc (input_location, RSHIFT_EXPR, arg_type,
@@ -5763,7 +5772,7 @@ gfc_conv_intrinsic_leadz (gfc_se * se, gfc_expr * expr)
/* Build BIT_SIZE. */
bit_size = build_int_cst (result_type, argsize);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
arg, build_int_cst (arg_type, 0));
se->expr = fold_build3_loc (input_location, COND_EXPR, result_type, cond,
bit_size, leadz);
@@ -5848,7 +5857,7 @@ gfc_conv_intrinsic_trailz (gfc_se * se, gfc_expr *expr)
cond = fold_build2_loc (input_location, BIT_AND_EXPR, arg_type, arg,
fold_convert (arg_type, ullmax));
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, cond,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, cond,
build_int_cst (arg_type, 0));
tmp1 = fold_build2_loc (input_location, RSHIFT_EXPR, arg_type,
@@ -5872,7 +5881,7 @@ gfc_conv_intrinsic_trailz (gfc_se * se, gfc_expr *expr)
/* Build BIT_SIZE. */
bit_size = build_int_cst (result_type, argsize);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
arg, build_int_cst (arg_type, 0));
se->expr = fold_build3_loc (input_location, COND_EXPR, result_type, cond,
bit_size, trailz);
@@ -6305,7 +6314,7 @@ gfc_conv_intrinsic_mask (gfc_se * se, gfc_expr * expr, int left)
/* Special case arg == 0, because SHIFT_EXPR wants a shift strictly
smaller than type width. */
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, arg,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, arg,
build_int_cst (TREE_TYPE (arg), 0));
res = fold_build3_loc (input_location, COND_EXPR, utype, cond,
build_int_cst (utype, 0), res);
@@ -6319,7 +6328,7 @@ gfc_conv_intrinsic_mask (gfc_se * se, gfc_expr * expr, int left)
/* Special case agr == bit_size, because SHIFT_EXPR wants a shift
strictly smaller than type width. */
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
arg, bitsize);
res = fold_build3_loc (input_location, COND_EXPR, utype,
cond, allones, res);
@@ -6440,7 +6449,7 @@ gfc_conv_intrinsic_spacing (gfc_se * se, gfc_expr * expr)
gfc_add_modify (&block, res, tmp);
/* Finish by building the IF statement for value zero. */
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, arg,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, arg,
build_real_from_int_cst (type, integer_zero_node));
tmp = build3_v (COND_EXPR, cond, build2_v (MODIFY_EXPR, res, tiny),
gfc_finish_block (&block));
@@ -6511,7 +6520,7 @@ gfc_conv_intrinsic_rrspacing (gfc_se * se, gfc_expr * expr)
stmt = gfc_finish_block (&block);
/* if (x != 0) */
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, x,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node, x,
build_real_from_int_cst (type, integer_zero_node));
tmp = build3_v (COND_EXPR, cond, stmt, build_empty_stmt (input_location));
@@ -6641,7 +6650,7 @@ gfc_conv_intrinsic_size (gfc_se * se, gfc_expr * expr)
argse.data_not_needed = 1;
gfc_conv_expr (&argse, actual->expr);
gfc_add_block_to_block (&se->pre, &argse.pre);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
argse.expr, null_pointer_node);
tmp = gfc_evaluate_now (tmp, &se->pre);
se->expr = fold_build3_loc (input_location, COND_EXPR,
@@ -6810,7 +6819,7 @@ gfc_conv_intrinsic_sizeof (gfc_se *se, gfc_expr *expr)
}
exit: */
gfc_start_block (&body);
- cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GE_EXPR, logical_type_node,
loop_var, tmp);
tmp = build1_v (GOTO_EXPR, exit_label);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
@@ -7081,7 +7090,7 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr)
/* Clean up if it was repacked. */
gfc_init_block (&block);
tmp = gfc_conv_array_data (argse.expr);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
source, tmp);
tmp = build3_v (COND_EXPR, tmp, stmt,
build_empty_stmt (input_location));
@@ -7306,14 +7315,14 @@ scalar_transfer:
indirect = gfc_finish_block (&block);
/* Wrap it up with the condition. */
- tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
dest_word_len, source_bytes);
tmp = build3_v (COND_EXPR, tmp, direct, indirect);
gfc_add_expr_to_block (&se->pre, tmp);
/* Free the temporary string, if necessary. */
free = gfc_call_free (tmpdecl);
- tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
dest_word_len, source_bytes);
tmp = build3_v (COND_EXPR, tmp, free, build_empty_stmt (input_location));
gfc_add_expr_to_block (&se->post, tmp);
@@ -7455,7 +7464,7 @@ gfc_conv_allocated (gfc_se *se, gfc_expr *expr)
tmp = gfc_conv_descriptor_data_get (arg1se.expr);
}
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node, tmp,
fold_convert (TREE_TYPE (tmp), null_pointer_node));
}
se->expr = convert (gfc_typenode_for_spec (&expr->ts), tmp);
@@ -7523,7 +7532,7 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr)
}
gfc_add_block_to_block (&se->pre, &arg1se.pre);
gfc_add_block_to_block (&se->post, &arg1se.post);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp2,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node, tmp2,
fold_convert (TREE_TYPE (tmp2), null_pointer_node));
se->expr = tmp;
}
@@ -7536,7 +7545,7 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr)
nonzero_charlen = NULL_TREE;
if (arg1->expr->ts.type == BT_CHARACTER)
nonzero_charlen = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
arg1->expr->ts.u.cl->backend_decl,
integer_zero_node);
if (scalar)
@@ -7561,12 +7570,12 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr)
gfc_add_block_to_block (&se->post, &arg1se.post);
gfc_add_block_to_block (&se->pre, &arg2se.pre);
gfc_add_block_to_block (&se->post, &arg2se.post);
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
arg1se.expr, arg2se.expr);
- tmp2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
arg1se.expr, null_pointer_node);
se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, tmp, tmp2);
+ logical_type_node, tmp, tmp2);
}
else
{
@@ -7584,7 +7593,7 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr)
tmp = gfc_rank_cst[arg1->expr->rank - 1];
tmp = gfc_conv_descriptor_stride_get (arg1se.expr, tmp);
nonzero_arraylen = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
build_int_cst (TREE_TYPE (tmp), 0));
/* A pointer to an array, call library function _gfor_associated. */
@@ -7598,9 +7607,9 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr)
se->expr = build_call_expr_loc (input_location,
gfor_fndecl_associated, 2,
arg1se.expr, arg2se.expr);
- se->expr = convert (boolean_type_node, se->expr);
+ se->expr = convert (logical_type_node, se->expr);
se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, se->expr,
+ logical_type_node, se->expr,
nonzero_arraylen);
}
@@ -7608,7 +7617,7 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr)
be associated. */
if (nonzero_charlen != NULL_TREE)
se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
se->expr, nonzero_charlen);
}
@@ -7636,14 +7645,14 @@ gfc_conv_same_type_as (gfc_se *se, gfc_expr *expr)
if (UNLIMITED_POLY (a))
{
tmp = gfc_class_vptr_get (a->symtree->n.sym->backend_decl);
- conda = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ conda = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tmp, build_int_cst (TREE_TYPE (tmp), 0));
}
if (UNLIMITED_POLY (b))
{
tmp = gfc_class_vptr_get (b->symtree->n.sym->backend_decl);
- condb = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ condb = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tmp, build_int_cst (TREE_TYPE (tmp), 0));
}
@@ -7669,16 +7678,16 @@ gfc_conv_same_type_as (gfc_se *se, gfc_expr *expr)
gfc_conv_expr (&se2, b);
tmp = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, se1.expr,
+ logical_type_node, se1.expr,
fold_convert (TREE_TYPE (se1.expr), se2.expr));
if (conda)
tmp = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, conda, tmp);
+ logical_type_node, conda, tmp);
if (condb)
tmp = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, condb, tmp);
+ logical_type_node, condb, tmp);
se->expr = convert (gfc_typenode_for_spec (&expr->ts), tmp);
}
@@ -7804,7 +7813,7 @@ gfc_conv_intrinsic_trim (gfc_se * se, gfc_expr * expr)
gfc_add_expr_to_block (&se->pre, tmp);
/* Free the temporary afterwards, if necessary. */
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
len, build_int_cst (TREE_TYPE (len), 0));
tmp = gfc_call_free (var);
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
@@ -7838,7 +7847,7 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr)
ncopies_type = TREE_TYPE (ncopies);
/* Check that NCOPIES is not negative. */
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, ncopies,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, ncopies,
build_int_cst (ncopies_type, 0));
gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where,
"Argument NCOPIES of REPEAT intrinsic is negative "
@@ -7848,7 +7857,7 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr)
/* If the source length is zero, any non negative value of NCOPIES
is valid, and nothing happens. */
n = gfc_create_var (ncopies_type, "ncopies");
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, slen,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, slen,
build_int_cst (size_type_node, 0));
tmp = fold_build3_loc (input_location, COND_EXPR, ncopies_type, cond,
build_int_cst (ncopies_type, 0), ncopies);
@@ -7865,13 +7874,13 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr)
fold_convert (size_type_node, max), slen);
largest = TYPE_PRECISION (size_type_node) > TYPE_PRECISION (ncopies_type)
? size_type_node : ncopies_type;
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
fold_convert (largest, ncopies),
fold_convert (largest, max));
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, slen,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, slen,
build_int_cst (size_type_node, 0));
- cond = fold_build3_loc (input_location, COND_EXPR, boolean_type_node, tmp,
- boolean_false_node, cond);
+ cond = fold_build3_loc (input_location, COND_EXPR, logical_type_node, tmp,
+ logical_false_node, cond);
gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where,
"Argument NCOPIES of REPEAT intrinsic is too large");
@@ -7894,7 +7903,7 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr)
gfc_start_block (&body);
/* Exit the loop if count >= ncopies. */
- cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, count,
+ cond = fold_build2_loc (input_location, GE_EXPR, logical_type_node, count,
ncopies);
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
@@ -8043,7 +8052,7 @@ conv_isocbinding_function (gfc_se *se, gfc_expr *expr)
if (arg->next->expr == NULL)
/* Only given one arg so generate a null and do a
not-equal comparison against the first arg. */
- se->expr = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ se->expr = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
arg1se.expr,
fold_convert (TREE_TYPE (arg1se.expr),
null_pointer_node));
@@ -8059,17 +8068,17 @@ conv_isocbinding_function (gfc_se *se, gfc_expr *expr)
gfc_add_block_to_block (&se->post, &arg2se.post);
/* Generate test to compare that the two args are equal. */
- eq_expr = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ eq_expr = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
arg1se.expr, arg2se.expr);
/* Generate test to ensure that the first arg is not null. */
not_null_expr = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
arg1se.expr, null_pointer_node);
/* Finally, the generated test must check that both arg1 is not
NULL and that it is equal to the second arg. */
se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node,
+ logical_type_node,
not_null_expr, eq_expr);
}
}
@@ -8299,11 +8308,11 @@ conv_intrinsic_ieee_is_normal (gfc_se * se, gfc_expr * expr)
isnormal = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_ISNORMAL),
1, arg);
- iszero = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, arg,
+ iszero = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, arg,
build_real_from_int_cst (TREE_TYPE (arg),
integer_zero_node));
se->expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, isnormal, iszero);
+ logical_type_node, isnormal, iszero);
se->expr = fold_convert (gfc_typenode_for_spec (&expr->ts), se->expr);
}
@@ -8328,11 +8337,11 @@ conv_intrinsic_ieee_is_negative (gfc_se * se, gfc_expr * expr)
signbit = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_SIGNBIT),
1, arg);
- signbit = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ signbit = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
signbit, integer_zero_node);
se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR,
- boolean_type_node, signbit,
+ logical_type_node, signbit,
fold_build1_loc (input_location, TRUTH_NOT_EXPR,
TREE_TYPE(isnan), isnan));
@@ -8478,7 +8487,7 @@ conv_intrinsic_ieee_copy_sign (gfc_se * se, gfc_expr * expr)
sign = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_SIGNBIT),
1, args[1]);
- sign = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ sign = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
sign, integer_zero_node);
/* Create a value of one, with the right sign. */
@@ -10544,7 +10553,7 @@ conv_intrinsic_move_alloc (gfc_code *code)
tmp = gfc_conv_descriptor_data_get (to_se.expr);
cond = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, tmp,
+ logical_type_node, tmp,
fold_convert (TREE_TYPE (tmp),
null_pointer_node));
tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_sync_all,
diff --git a/gcc/fortran/trans-io.c b/gcc/fortran/trans-io.c
index f3e1f3e4d09..9cd33b331e1 100644
--- a/gcc/fortran/trans-io.c
+++ b/gcc/fortran/trans-io.c
@@ -581,7 +581,7 @@ set_parameter_value_chk (stmtblock_t *block, bool has_iostat, tree var,
/* UNIT numbers should be greater than the min. */
i = gfc_validate_kind (BT_INTEGER, 4, false);
val = gfc_conv_mpz_to_tree (gfc_integer_kinds[i].pedantic_min_int, 4);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
se.expr,
fold_convert (TREE_TYPE (se.expr), val));
gfc_trans_io_runtime_check (has_iostat, cond, var, LIBERROR_BAD_UNIT,
@@ -590,7 +590,7 @@ set_parameter_value_chk (stmtblock_t *block, bool has_iostat, tree var,
/* UNIT numbers should be less than the max. */
val = gfc_conv_mpz_to_tree (gfc_integer_kinds[i].huge, 4);
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
se.expr,
fold_convert (TREE_TYPE (se.expr), val));
gfc_trans_io_runtime_check (has_iostat, cond, var, LIBERROR_BAD_UNIT,
@@ -641,17 +641,17 @@ set_parameter_value_inquire (stmtblock_t *block, tree var,
/* UNIT numbers should be greater than zero. */
i = gfc_validate_kind (BT_INTEGER, 4, false);
- cond1 = build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond1 = build2_loc (input_location, LT_EXPR, logical_type_node,
se.expr,
fold_convert (TREE_TYPE (se.expr),
integer_zero_node));
/* UNIT numbers should be less than the max. */
val = gfc_conv_mpz_to_tree (gfc_integer_kinds[i].huge, 4);
- cond2 = build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond2 = build2_loc (input_location, GT_EXPR, logical_type_node,
se.expr,
fold_convert (TREE_TYPE (se.expr), val));
cond3 = build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond1, cond2);
+ logical_type_node, cond1, cond2);
gfc_start_block (&newblock);
@@ -826,7 +826,7 @@ set_string (stmtblock_t * block, stmtblock_t * postblock, tree var,
gfc_conv_label_variable (&se, e);
tmp = GFC_DECL_STRING_LEN (se.expr);
- cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
tmp, build_int_cst (TREE_TYPE (tmp), 0));
msg = xasprintf ("Label assigned to variable '%s' (%%ld) is not a format "
diff --git a/gcc/fortran/trans-openmp.c b/gcc/fortran/trans-openmp.c
index 00c02a75d18..75eafe42f93 100644
--- a/gcc/fortran/trans-openmp.c
+++ b/gcc/fortran/trans-openmp.c
@@ -413,7 +413,7 @@ gfc_walk_alloc_comps (tree decl, tree dest, tree var,
{
tem = fold_convert (pvoid_type_node, tem);
tem = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tem,
+ logical_type_node, tem,
null_pointer_node);
then_b = build3_loc (input_location, COND_EXPR, void_type_node,
tem, then_b,
@@ -540,7 +540,7 @@ gfc_omp_clause_default_ctor (tree clause, tree decl, tree outer)
GFC_DESCRIPTOR_TYPE_P (type)
? gfc_conv_descriptor_data_get (outer) : outer);
tem = unshare_expr (tem);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tem, null_pointer_node);
gfc_add_expr_to_block (&block,
build3_loc (input_location, COND_EXPR,
@@ -646,7 +646,7 @@ gfc_omp_clause_copy_ctor (tree clause, tree dest, tree src)
build_zero_cst (TREE_TYPE (dest)));
else_b = gfc_finish_block (&cond_block);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
unshare_expr (srcptr), null_pointer_node);
gfc_add_expr_to_block (&block,
build3_loc (input_location, COND_EXPR,
@@ -699,7 +699,7 @@ gfc_omp_clause_assign_op (tree clause, tree dest, tree src)
GFC_DESCRIPTOR_TYPE_P (type)
? gfc_conv_descriptor_data_get (dest) : dest);
tem = unshare_expr (tem);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tem, null_pointer_node);
tem = build3_loc (input_location, COND_EXPR, void_type_node, cond,
then_b, build_empty_stmt (input_location));
@@ -739,7 +739,7 @@ gfc_omp_clause_assign_op (tree clause, tree dest, tree src)
destptr = fold_convert (pvoid_type_node, destptr);
gfc_add_modify (&cond_block, ptr, destptr);
- nonalloc = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ nonalloc = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
destptr, null_pointer_node);
cond = nonalloc;
if (GFC_DESCRIPTOR_TYPE_P (type))
@@ -755,11 +755,11 @@ gfc_omp_clause_assign_op (tree clause, tree dest, tree src)
tem = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, tem,
gfc_conv_descriptor_lbound_get (dest, rank));
- tem = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tem = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tem, gfc_conv_descriptor_ubound_get (dest,
rank));
cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
- boolean_type_node, cond, tem);
+ logical_type_node, cond, tem);
}
}
@@ -835,7 +835,7 @@ gfc_omp_clause_assign_op (tree clause, tree dest, tree src)
}
else_b = gfc_finish_block (&cond_block);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
unshare_expr (srcptr), null_pointer_node);
gfc_add_expr_to_block (&block,
build3_loc (input_location, COND_EXPR,
@@ -1028,7 +1028,7 @@ gfc_omp_clause_dtor (tree clause, tree decl)
GFC_DESCRIPTOR_TYPE_P (type)
? gfc_conv_descriptor_data_get (decl) : decl);
tem = unshare_expr (tem);
- tree cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tree cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tem, null_pointer_node);
tem = build3_loc (input_location, COND_EXPR, void_type_node, cond,
then_b, build_empty_stmt (input_location));
@@ -1129,7 +1129,7 @@ gfc_omp_finish_clause (tree c, gimple_seq *pre_p)
tem = gfc_conv_descriptor_data_get (decl);
tem = fold_convert (pvoid_type_node, tem);
cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, tem, null_pointer_node);
+ logical_type_node, tem, null_pointer_node);
gfc_add_expr_to_block (&block, build3_loc (input_location, COND_EXPR,
void_type_node, cond,
then_b, else_b));
@@ -2155,7 +2155,7 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
tem = gfc_conv_descriptor_data_get (decl);
tem = fold_convert (pvoid_type_node, tem);
cond = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node,
+ logical_type_node,
tem, null_pointer_node);
gfc_add_expr_to_block (block,
build3_loc (input_location,
@@ -3599,7 +3599,7 @@ gfc_trans_omp_do (gfc_code *code, gfc_exec_op op, stmtblock_t *pblock,
/* The condition should not be folded. */
TREE_VEC_ELT (cond, i) = build2_loc (input_location, simple > 0
? LE_EXPR : GE_EXPR,
- boolean_type_node, dovar, to);
+ logical_type_node, dovar, to);
TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location, PLUS_EXPR,
type, dovar, step);
TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location,
@@ -3626,7 +3626,7 @@ gfc_trans_omp_do (gfc_code *code, gfc_exec_op op, stmtblock_t *pblock,
build_int_cst (type, 0));
/* The condition should not be folded. */
TREE_VEC_ELT (cond, i) = build2_loc (input_location, LT_EXPR,
- boolean_type_node,
+ logical_type_node,
count, tmp);
TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location, PLUS_EXPR,
type, count,
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index 7a76b8ead31..ea0f9529f1c 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -150,7 +150,7 @@ gfc_trans_goto (gfc_code * code)
gfc_start_block (&se.pre);
gfc_conv_label_variable (&se, code->expr1);
tmp = GFC_DECL_STRING_LEN (se.expr);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node, tmp,
build_int_cst (TREE_TYPE (tmp), -1));
gfc_trans_runtime_check (true, false, tmp, &se.pre, &loc,
"Assigned label is not a target label");
@@ -1107,7 +1107,7 @@ gfc_trans_sync (gfc_code *code, gfc_exec_op type)
{
tree cond;
if (flag_coarray != GFC_FCOARRAY_LIB)
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
images, build_int_cst (TREE_TYPE (images), 1));
else
{
@@ -1115,13 +1115,13 @@ gfc_trans_sync (gfc_code *code, gfc_exec_op type)
tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_num_images,
2, integer_zero_node,
build_int_cst (integer_type_node, -1));
- cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node,
images, tmp);
- cond2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
images,
build_int_cst (TREE_TYPE (images), 1));
cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
- boolean_type_node, cond, cond2);
+ logical_type_node, cond, cond2);
}
gfc_trans_runtime_check (true, false, cond, &se.pre,
&code->expr1->where, "Invalid image number "
@@ -1413,10 +1413,10 @@ gfc_trans_arithmetic_if (gfc_code * code)
branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label2));
if (code->label1->value != code->label3->value)
- tmp = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, LT_EXPR, logical_type_node,
se.expr, zero);
else
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
se.expr, zero);
branch1 = fold_build3_loc (input_location, COND_EXPR, void_type_node,
@@ -1430,7 +1430,7 @@ gfc_trans_arithmetic_if (gfc_code * code)
{
/* if (cond <= 0) take branch1 else take branch2. */
branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label3));
- tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
se.expr, zero);
branch1 = fold_build3_loc (input_location, COND_EXPR, void_type_node,
tmp, branch1, branch2);
@@ -1966,10 +1966,10 @@ gfc_trans_simple_do (gfc_code * code, stmtblock_t *pblock, tree dovar,
/* Evaluate the loop condition. */
if (is_step_positive)
- cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, dovar,
+ cond = fold_build2_loc (loc, GT_EXPR, logical_type_node, dovar,
fold_convert (type, to));
else
- cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, dovar,
+ cond = fold_build2_loc (loc, LT_EXPR, logical_type_node, dovar,
fold_convert (type, to));
cond = gfc_evaluate_now_loc (loc, cond, &body);
@@ -1988,7 +1988,7 @@ gfc_trans_simple_do (gfc_code * code, stmtblock_t *pblock, tree dovar,
tree boundary = is_step_positive ? TYPE_MAX_VALUE (type)
: TYPE_MIN_VALUE (type);
- tmp = fold_build2_loc (loc, EQ_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (loc, EQ_EXPR, logical_type_node,
dovar, boundary);
gfc_trans_runtime_check (true, false, tmp, &body, &code->loc,
"Loop iterates infinitely");
@@ -2008,7 +2008,7 @@ gfc_trans_simple_do (gfc_code * code, stmtblock_t *pblock, tree dovar,
/* Check whether someone has modified the loop variable. */
if (gfc_option.rtcheck & GFC_RTCHECK_DO)
{
- tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (loc, NE_EXPR, logical_type_node,
dovar, saved_dovar);
gfc_trans_runtime_check (true, false, tmp, &body, &code->loc,
"Loop variable has been modified");
@@ -2117,7 +2117,7 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
if (gfc_option.rtcheck & GFC_RTCHECK_DO)
{
- tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, step,
+ tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, step,
build_zero_cst (type));
gfc_trans_runtime_check (true, false, tmp, &block, &code->loc,
"DO step value is zero");
@@ -2184,7 +2184,7 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
/* For a positive step, when to < from, exit, otherwise compute
countm1 = ((unsigned)to - (unsigned)from) / (unsigned)step */
- tmp = fold_build2_loc (loc, LT_EXPR, boolean_type_node, to, from);
+ tmp = fold_build2_loc (loc, LT_EXPR, logical_type_node, to, from);
tmp2 = fold_build2_loc (loc, TRUNC_DIV_EXPR, utype,
fold_build2_loc (loc, MINUS_EXPR, utype,
tou, fromu),
@@ -2199,7 +2199,7 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
/* For a negative step, when to > from, exit, otherwise compute
countm1 = ((unsigned)from - (unsigned)to) / -(unsigned)step */
- tmp = fold_build2_loc (loc, GT_EXPR, boolean_type_node, to, from);
+ tmp = fold_build2_loc (loc, GT_EXPR, logical_type_node, to, from);
tmp2 = fold_build2_loc (loc, TRUNC_DIV_EXPR, utype,
fold_build2_loc (loc, MINUS_EXPR, utype,
fromu, tou),
@@ -2212,7 +2212,7 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
build1_loc (loc, GOTO_EXPR, void_type_node,
exit_label), NULL_TREE));
- tmp = fold_build2_loc (loc, LT_EXPR, boolean_type_node, step,
+ tmp = fold_build2_loc (loc, LT_EXPR, logical_type_node, step,
build_int_cst (TREE_TYPE (step), 0));
tmp = fold_build3_loc (loc, COND_EXPR, void_type_node, tmp, neg, pos);
@@ -2233,13 +2233,13 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
/* We need a special check for empty loops:
empty = (step > 0 ? to < from : to > from); */
- pos_step = fold_build2_loc (loc, GT_EXPR, boolean_type_node, step,
+ pos_step = fold_build2_loc (loc, GT_EXPR, logical_type_node, step,
build_zero_cst (type));
- tmp = fold_build3_loc (loc, COND_EXPR, boolean_type_node, pos_step,
+ tmp = fold_build3_loc (loc, COND_EXPR, logical_type_node, pos_step,
fold_build2_loc (loc, LT_EXPR,
- boolean_type_node, to, from),
+ logical_type_node, to, from),
fold_build2_loc (loc, GT_EXPR,
- boolean_type_node, to, from));
+ logical_type_node, to, from));
/* If the loop is empty, go directly to the exit label. */
tmp = fold_build3_loc (loc, COND_EXPR, void_type_node, tmp,
build1_v (GOTO_EXPR, exit_label),
@@ -2264,7 +2264,7 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
/* Check whether someone has modified the loop variable. */
if (gfc_option.rtcheck & GFC_RTCHECK_DO)
{
- tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node, dovar,
+ tmp = fold_build2_loc (loc, NE_EXPR, logical_type_node, dovar,
saved_dovar);
gfc_trans_runtime_check (true, false, tmp, &body, &code->loc,
"Loop variable has been modified");
@@ -2297,7 +2297,7 @@ gfc_trans_do (gfc_code * code, tree exit_cond)
gfc_add_modify_loc (loc, &body, countm1, tmp);
/* End with the loop condition. Loop until countm1t == 0. */
- cond = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, countm1t,
+ cond = fold_build2_loc (loc, EQ_EXPR, logical_type_node, countm1t,
build_int_cst (utype, 0));
tmp = fold_build1_loc (loc, GOTO_EXPR, void_type_node, exit_label);
tmp = fold_build3_loc (loc, COND_EXPR, void_type_node,
@@ -3450,7 +3450,7 @@ gfc_trans_forall_loop (forall_info *forall_tmp, tree body,
gfc_init_block (&block);
/* The exit condition. */
- cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, LE_EXPR, logical_type_node,
count, build_int_cst (TREE_TYPE (count), 0));
if (forall_tmp->do_concurrent)
cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
@@ -5128,7 +5128,7 @@ gfc_trans_where_2 (gfc_code * code, tree mask, bool invert,
&inner_size_body, block);
/* Check whether the size is negative. */
- cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, size,
+ cond = fold_build2_loc (input_location, LE_EXPR, logical_type_node, size,
gfc_index_zero_node);
size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
cond, gfc_index_zero_node, size);
@@ -5913,10 +5913,9 @@ gfc_trans_allocate (gfc_code * code)
if (code->ext.alloc.ts.type != BT_CHARACTER)
expr3_esize = TYPE_SIZE_UNIT (
gfc_typenode_for_spec (&code->ext.alloc.ts));
- else
+ else if (code->ext.alloc.ts.u.cl->length != NULL)
{
gfc_expr *sz;
- gcc_assert (code->ext.alloc.ts.u.cl->length != NULL);
sz = gfc_copy_expr (code->ext.alloc.ts.u.cl->length);
gfc_init_se (&se_sz, NULL);
gfc_conv_expr (&se_sz, sz);
@@ -5930,6 +5929,8 @@ gfc_trans_allocate (gfc_code * code)
tmp, se_sz.expr);
expr3_esize = gfc_evaluate_now (expr3_esize, &block);
}
+ else
+ expr3_esize = NULL_TREE;
}
/* The routine gfc_trans_assignment () already implements all
@@ -6134,7 +6135,7 @@ gfc_trans_allocate (gfc_code * code)
polymorphic and stores a _len dependent object,
e.g., a string. */
memsz = fold_build2_loc (input_location, GT_EXPR,
- boolean_type_node, expr3_len,
+ logical_type_node, expr3_len,
integer_zero_node);
memsz = fold_build3_loc (input_location, COND_EXPR,
TREE_TYPE (expr3_esize),
@@ -6267,7 +6268,7 @@ gfc_trans_allocate (gfc_code * code)
{
tmp = build1_v (GOTO_EXPR, label_errmsg);
parm = fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, stat,
+ logical_type_node, stat,
build_int_cst (TREE_TYPE (stat), 0));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (parm, PRED_FORTRAN_FAIL_ALLOC),
@@ -6515,7 +6516,7 @@ gfc_trans_allocate (gfc_code * code)
gfc_default_character_kind);
dlen = gfc_finish_block (&errmsg_block);
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
stat, build_int_cst (TREE_TYPE (stat), 0));
tmp = build3_v (COND_EXPR, tmp,
@@ -6768,7 +6769,7 @@ gfc_trans_deallocate (gfc_code *code)
{
tree cond;
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, stat,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node, stat,
build_int_cst (TREE_TYPE (stat), 0));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (cond, PRED_FORTRAN_FAIL_ALLOC),
@@ -6808,7 +6809,7 @@ gfc_trans_deallocate (gfc_code *code)
slen, errmsg_str, gfc_default_character_kind);
tmp = gfc_finish_block (&errmsg_block);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, stat,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node, stat,
build_int_cst (TREE_TYPE (stat), 0));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (cond, PRED_FORTRAN_FAIL_ALLOC), tmp,
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index c8ca144b896..10a454cf40f 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -62,6 +62,9 @@ tree ppvoid_type_node;
tree pchar_type_node;
tree pfunc_type_node;
+tree logical_type_node;
+tree logical_true_node;
+tree logical_false_node;
tree gfc_charlen_type_node;
tree gfc_float128_type_node = NULL_TREE;
@@ -1003,6 +1006,11 @@ gfc_init_types (void)
wi::mask (n, UNSIGNED,
TYPE_PRECISION (size_type_node)));
+
+ logical_type_node = gfc_get_logical_type (gfc_default_logical_kind);
+ logical_true_node = build_int_cst (logical_type_node, 1);
+ logical_false_node = build_int_cst (logical_type_node, 0);
+
/* ??? Shouldn't this be based on gfc_index_integer_kind or so? */
gfc_charlen_int_kind = 4;
gfc_charlen_type_node = gfc_get_int_type (gfc_charlen_int_kind);
@@ -3266,11 +3274,11 @@ gfc_get_array_descr_info (const_tree type, struct array_descr_info *info)
t = build1 (NOP_EXPR, build_pointer_type (ptr_type_node), t);
info->data_location = build1 (INDIRECT_REF, ptr_type_node, t);
if (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE)
- info->allocated = build2 (NE_EXPR, boolean_type_node,
+ info->allocated = build2 (NE_EXPR, logical_type_node,
info->data_location, null_pointer_node);
else if (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER
|| GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT)
- info->associated = build2 (NE_EXPR, boolean_type_node,
+ info->associated = build2 (NE_EXPR, logical_type_node,
info->data_location, null_pointer_node);
if ((GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_RANK
|| GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_RANK_CONT)
diff --git a/gcc/fortran/trans-types.h b/gcc/fortran/trans-types.h
index 2974e451304..6dba78e3671 100644
--- a/gcc/fortran/trans-types.h
+++ b/gcc/fortran/trans-types.h
@@ -33,6 +33,20 @@ extern GTY(()) tree pchar_type_node;
extern GTY(()) tree gfc_float128_type_node;
extern GTY(()) tree gfc_complex_float128_type_node;
+/* logical_type_node is the Fortran LOGICAL type of default kind. In
+ addition to uses mandated by the Fortran standard, also prefer it
+ for compiler generated temporary variables, is it avoids some minor
+ issues with boolean_type_node (the C/C++ _Bool/bool). Namely:
+ - On x86, partial register stalls with 8/16 bit register access,
+ and length prefix changes.
+ - On s390 there is a compare with immediate and jump instruction,
+ but it works only with 32-bit quantities and not 8-bit such as
+ boolean_type_node.
+*/
+extern GTY(()) tree logical_type_node;
+extern GTY(()) tree logical_true_node;
+extern GTY(()) tree logical_false_node;
+
/* This is the type used to hold the lengths of character variables.
It must be the same as the corresponding definition in gfortran.h. */
/* TODO: This is still hardcoded as kind=4 in some bits of the compiler
diff --git a/gcc/fortran/trans.c b/gcc/fortran/trans.c
index 53bc4285c78..8c1733448f4 100644
--- a/gcc/fortran/trans.c
+++ b/gcc/fortran/trans.c
@@ -320,8 +320,12 @@ get_array_span (tree type, tree decl)
|| DECL_CONTEXT (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
== DECL_CONTEXT (decl)))
{
- span = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
- span = fold_convert (gfc_array_index_type, span);
+ span = fold_convert (gfc_array_index_type,
+ TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
+ span = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (TREE_TYPE (type))),
+ span);
}
/* Likewise for class array or pointer array references. */
else if (TREE_CODE (decl) == FIELD_DECL
@@ -533,9 +537,9 @@ gfc_trans_runtime_check (bool error, bool once, tree cond, stmtblock_t * pblock,
if (once)
{
- tmpvar = gfc_create_var (boolean_type_node, "print_warning");
+ tmpvar = gfc_create_var (logical_type_node, "print_warning");
TREE_STATIC (tmpvar) = 1;
- DECL_INITIAL (tmpvar) = boolean_true_node;
+ DECL_INITIAL (tmpvar) = logical_true_node;
gfc_add_expr_to_block (pblock, tmpvar);
}
@@ -554,7 +558,7 @@ gfc_trans_runtime_check (bool error, bool once, tree cond, stmtblock_t * pblock,
va_end (ap);
if (once)
- gfc_add_modify (&block, tmpvar, boolean_false_node);
+ gfc_add_modify (&block, tmpvar, logical_false_node);
body = gfc_finish_block (&block);
@@ -607,7 +611,7 @@ gfc_call_malloc (stmtblock_t * block, tree type, tree size)
if (gfc_option.rtcheck & GFC_RTCHECK_MEM)
{
null_result = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, res,
+ logical_type_node, res,
build_int_cst (pvoid_type_node, 0));
msg = gfc_build_addr_expr (pchar_type_node,
gfc_build_localized_cstring_const ("Memory allocation failed"));
@@ -693,7 +697,7 @@ gfc_allocate_using_malloc (stmtblock_t * block, tree pointer,
}
error_cond = fold_build2_loc (input_location, EQ_EXPR,
- boolean_type_node, pointer,
+ logical_type_node, pointer,
build_int_cst (prvoid_type_node, 0));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (error_cond, PRED_FORTRAN_FAIL_ALLOC),
@@ -795,7 +799,7 @@ gfc_allocate_allocatable (stmtblock_t * block, tree mem, tree size,
size = fold_convert (size_type_node, size);
null_mem = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
- boolean_type_node, mem,
+ logical_type_node, mem,
build_int_cst (type, 0)),
PRED_FORTRAN_REALLOC);
@@ -873,7 +877,7 @@ gfc_allocate_allocatable (stmtblock_t * block, tree mem, tree size,
{
TREE_USED (label_finish) = 1;
tmp = build1_v (GOTO_EXPR, label_finish);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
status, build_zero_cst (TREE_TYPE (status)));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (cond, PRED_FORTRAN_FAIL_ALLOC),
@@ -1090,12 +1094,12 @@ gfc_add_comp_finalizer_call (stmtblock_t *block, tree decl, gfc_component *comp,
{
tmp = GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (array))
? gfc_conv_descriptor_data_get (array) : array;
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
tmp, fold_convert (TREE_TYPE (tmp),
null_pointer_node));
}
else
- cond = boolean_true_node;
+ cond = logical_true_node;
if (!GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (array)))
{
@@ -1111,12 +1115,12 @@ gfc_add_comp_finalizer_call (stmtblock_t *block, tree decl, gfc_component *comp,
if (!final_expr)
{
- tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
final_fndecl,
fold_convert (TREE_TYPE (final_fndecl),
null_pointer_node));
cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, cond, tmp);
+ logical_type_node, cond, tmp);
}
if (POINTER_TYPE_P (TREE_TYPE (final_fndecl)))
@@ -1212,7 +1216,7 @@ gfc_add_finalizer_call (stmtblock_t *block, gfc_expr *expr2)
gfc_init_se (&se, NULL);
se.want_pointer = 1;
gfc_conv_expr (&se, final_expr);
- cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
se.expr, build_int_cst (TREE_TYPE (se.expr), 0));
/* For CLASS(*) not only sym->_vtab->_final can be NULL
@@ -1230,11 +1234,11 @@ gfc_add_finalizer_call (stmtblock_t *block, gfc_expr *expr2)
gfc_conv_expr (&se, vptr_expr);
gfc_free_expr (vptr_expr);
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
se.expr,
build_int_cst (TREE_TYPE (se.expr), 0));
cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
- boolean_type_node, cond2, cond);
+ logical_type_node, cond2, cond);
}
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
@@ -1340,7 +1344,7 @@ gfc_deallocate_with_status (tree pointer, tree status, tree errmsg,
else if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (pointer)))
pointer = gfc_conv_descriptor_data_get (pointer);
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, pointer,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, pointer,
build_int_cst (TREE_TYPE (pointer), 0));
/* When POINTER is NULL, we set STATUS to 1 if it's present, otherwise
@@ -1367,7 +1371,7 @@ gfc_deallocate_with_status (tree pointer, tree status, tree errmsg,
tree cond2;
status_type = TREE_TYPE (TREE_TYPE (status));
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
status, build_int_cst (TREE_TYPE (status), 0));
tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
fold_build1_loc (input_location, INDIRECT_REF,
@@ -1400,7 +1404,7 @@ gfc_deallocate_with_status (tree pointer, tree status, tree errmsg,
tree status_type = TREE_TYPE (TREE_TYPE (status));
tree cond2;
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
status,
build_int_cst (TREE_TYPE (status), 0));
tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
@@ -1463,7 +1467,7 @@ gfc_deallocate_with_status (tree pointer, tree status, tree errmsg,
TREE_USED (label_finish) = 1;
tmp = build1_v (GOTO_EXPR, label_finish);
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
stat, build_zero_cst (TREE_TYPE (stat)));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (cond2, PRED_FORTRAN_REALLOC),
@@ -1499,7 +1503,7 @@ gfc_deallocate_scalar_with_status (tree pointer, tree status, tree label_finish,
&& comp_ref)
caf_dereg_type = GFC_CAF_COARRAY_DEALLOCATE_ONLY;
- cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, pointer,
+ cond = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, pointer,
build_int_cst (TREE_TYPE (pointer), 0));
/* When POINTER is NULL, we set STATUS to 1 if it's present, otherwise
@@ -1526,7 +1530,7 @@ gfc_deallocate_scalar_with_status (tree pointer, tree status, tree label_finish,
tree status_type = TREE_TYPE (TREE_TYPE (status));
tree cond2;
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
status, build_int_cst (TREE_TYPE (status), 0));
tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
fold_build1_loc (input_location, INDIRECT_REF,
@@ -1571,7 +1575,7 @@ gfc_deallocate_scalar_with_status (tree pointer, tree status, tree label_finish,
tree status_type = TREE_TYPE (TREE_TYPE (status));
tree cond2;
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
status,
build_int_cst (TREE_TYPE (status), 0));
tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
@@ -1621,7 +1625,7 @@ gfc_deallocate_scalar_with_status (tree pointer, tree status, tree label_finish,
TREE_USED (label_finish) = 1;
tmp = build1_v (GOTO_EXPR, label_finish);
- cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ cond2 = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
stat, build_zero_cst (TREE_TYPE (stat)));
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
gfc_unlikely (cond2, PRED_FORTRAN_REALLOC),
@@ -1664,11 +1668,11 @@ gfc_call_realloc (stmtblock_t * block, tree mem, tree size)
builtin_decl_explicit (BUILT_IN_REALLOC), 2,
fold_convert (pvoid_type_node, mem), size);
gfc_add_modify (block, res, fold_convert (type, tmp));
- null_result = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ null_result = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
res, build_int_cst (pvoid_type_node, 0));
- nonzero = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, size,
+ nonzero = fold_build2_loc (input_location, NE_EXPR, logical_type_node, size,
build_int_cst (size_type_node, 0));
- null_result = fold_build2_loc (input_location, TRUTH_AND_EXPR, boolean_type_node,
+ null_result = fold_build2_loc (input_location, TRUTH_AND_EXPR, logical_type_node,
null_result, nonzero);
msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const
("Allocation would exceed memory limit"));
diff --git a/gcc/gcov-dump.c b/gcc/gcov-dump.c
index d24e72ac4a1..e5e649cb18f 100644
--- a/gcc/gcov-dump.c
+++ b/gcc/gcov-dump.c
@@ -217,6 +217,11 @@ dump_gcov_file (const char *filename)
printf ("%s:stamp %lu\n", filename, (unsigned long)stamp);
}
+ /* Support for unexecuted basic blocks. */
+ unsigned support_unexecuted_blocks = gcov_read_unsigned ();
+ if (!support_unexecuted_blocks)
+ printf ("%s: has_unexecuted_block is not supported\n", filename);
+
while (1)
{
gcov_position_t base, position = gcov_position ();
@@ -308,9 +313,15 @@ tag_function (const char *filename ATTRIBUTE_UNUSED,
name = gcov_read_string ();
printf (", `%s'", name ? name : "NULL");
+ unsigned artificial = gcov_read_unsigned ();
name = gcov_read_string ();
printf (" %s", name ? name : "NULL");
- printf (":%u", gcov_read_unsigned ());
+ unsigned line_start = gcov_read_unsigned ();
+ unsigned column_start = gcov_read_unsigned ();
+ unsigned line_end = gcov_read_unsigned ();
+ printf (":%u:%u:%u", line_start, column_start, line_end);
+ if (artificial)
+ printf (", artificial");
}
}
}
diff --git a/gcc/gcov.c b/gcc/gcov.c
index 48bcdc0d4c3..24e6da09fcf 100644
--- a/gcc/gcov.c
+++ b/gcc/gcov.c
@@ -34,6 +34,8 @@ along with Gcov; see the file COPYING3. If not see
#define INCLUDE_ALGORITHM
#define INCLUDE_VECTOR
#define INCLUDE_STRING
+#define INCLUDE_MAP
+#define INCLUDE_SET
#include "system.h"
#include "coretypes.h"
#include "tm.h"
@@ -77,7 +79,7 @@ struct source_info;
/* Describes an arc between two basic blocks. */
-typedef struct arc_info
+struct arc_info
{
/* source and destination blocks. */
struct block_info *src;
@@ -111,7 +113,7 @@ typedef struct arc_info
/* Links to next arc on src and dst lists. */
struct arc_info *succ_next;
struct arc_info *pred_next;
-} arc_t;
+};
/* Describes which locations (lines and files) are associated with
a basic block. */
@@ -129,14 +131,14 @@ struct block_location_info
/* Describes a basic block. Contains lists of arcs to successor and
predecessor blocks. */
-typedef struct block_info
+struct block_info
{
/* Constructor. */
block_info ();
/* Chain of exit and entry arcs. */
- arc_t *succ;
- arc_t *pred;
+ arc_info *succ;
+ arc_info *pred;
/* Number of unprocessed exit and entry arcs. */
gcov_type num_succ;
@@ -164,7 +166,7 @@ typedef struct block_info
{
/* Single line graph cycle workspace. Used for all-blocks
mode. */
- arc_t *arc;
+ arc_info *arc;
unsigned ident;
} cycle; /* Used in all-blocks mode, after blocks are linked onto
lines. */
@@ -173,7 +175,7 @@ typedef struct block_info
line. */
struct block_info *chain;
-} block_t;
+};
block_info::block_info (): succ (NULL), pred (NULL), num_succ (0), num_pred (0),
id (0), count (0), count_valid (0), valid_chain (0), invalid_chain (0),
@@ -183,13 +185,61 @@ block_info::block_info (): succ (NULL), pred (NULL), num_succ (0), num_pred (0),
cycle.arc = NULL;
}
+/* Describes a single line of source. Contains a chain of basic blocks
+ with code on it. */
+
+struct line_info
+{
+ /* Default constructor. */
+ line_info ();
+
+ /* Return true when NEEDLE is one of basic blocks the line belongs to. */
+ bool has_block (block_info *needle);
+
+ /* Execution count. */
+ gcov_type count;
+
+ /* Branches from blocks that end on this line. */
+ vector<arc_info *> branches;
+
+ /* blocks which start on this line. Used in all-blocks mode. */
+ vector<block_info *> blocks;
+
+ unsigned exists : 1;
+ unsigned unexceptional : 1;
+ unsigned has_unexecuted_block : 1;
+};
+
+line_info::line_info (): count (0), branches (), blocks (), exists (false),
+ unexceptional (0), has_unexecuted_block (0)
+{
+}
+
+bool
+line_info::has_block (block_info *needle)
+{
+ return std::find (blocks.begin (), blocks.end (), needle) != blocks.end ();
+}
+
/* Describes a single function. Contains an array of basic blocks. */
-typedef struct function_info
+struct function_info
{
function_info ();
~function_info ();
+ /* Return true when line N belongs to the function in source file SRC_IDX.
+ The line must be defined in body of the function, can't be inlined. */
+ bool group_line_p (unsigned n, unsigned src_idx);
+
+ /* Function filter based on function_info::artificial variable. */
+
+ static inline bool
+ is_artificial (function_info *fn)
+ {
+ return fn->artificial;
+ }
+
/* Name of function. */
char *name;
char *demangled_name;
@@ -200,31 +250,59 @@ typedef struct function_info
/* The graph contains at least one fake incoming edge. */
unsigned has_catch : 1;
+ /* True when the function is artificial and does not exist
+ in a source file. */
+ unsigned artificial : 1;
+
+ /* True when multiple functions start at a line in a source file. */
+ unsigned is_group : 1;
+
/* Array of basic blocks. Like in GCC, the entry block is
at blocks[0] and the exit block is at blocks[1]. */
#define ENTRY_BLOCK (0)
#define EXIT_BLOCK (1)
- vector<block_t> blocks;
+ vector<block_info> blocks;
unsigned blocks_executed;
/* Raw arc coverage counts. */
- gcov_type *counts;
- unsigned num_counts;
+ vector<gcov_type> counts;
- /* First line number & file. */
- unsigned line;
+ /* First line number. */
+ unsigned start_line;
+
+ /* First line column. */
+ unsigned start_column;
+
+ /* Last line number. */
+ unsigned end_line;
+
+ /* Index of source file where the function is defined. */
unsigned src;
- /* Next function in same source file. */
- struct function_info *next_file_fn;
+ /* Vector of line information. */
+ vector<line_info> lines;
/* Next function. */
struct function_info *next;
-} function_t;
+};
+
+/* Function info comparer that will sort functions according to starting
+ line. */
+
+struct function_line_start_cmp
+{
+ inline bool operator() (const function_info *lhs,
+ const function_info *rhs)
+ {
+ return (lhs->start_line == rhs->start_line
+ ? lhs->start_column < rhs->start_column
+ : lhs->start_line < rhs->start_line);
+ }
+};
/* Describes coverage of a file or function. */
-typedef struct coverage_info
+struct coverage_info
{
int lines;
int lines_executed;
@@ -237,44 +315,8 @@ typedef struct coverage_info
int calls_executed;
char *name;
-} coverage_t;
-
-/* Describes a single line of source. Contains a chain of basic blocks
- with code on it. */
-
-struct line_info
-{
- /* Default constructor. */
- line_info ();
-
- /* Return true when NEEDLE is one of basic blocks the line belongs to. */
- bool has_block (block_t *needle);
-
- /* Execution count. */
- gcov_type count;
-
- /* Branches from blocks that end on this line. */
- vector<arc_t *> branches;
-
- /* blocks which start on this line. Used in all-blocks mode. */
- vector<block_t *> blocks;
-
- unsigned exists : 1;
- unsigned unexceptional : 1;
- unsigned has_unexecuted_block : 1;
};
-line_info::line_info (): count (0), branches (), blocks (), exists (false),
- unexceptional (0), has_unexecuted_block (0)
-{
-}
-
-bool
-line_info::has_block (block_t *needle)
-{
- return std::find (blocks.begin (), blocks.end (), needle) != blocks.end ();
-}
-
/* Describes a file mentioned in the block graph. Contains an array
of line info. */
@@ -283,6 +325,11 @@ struct source_info
/* Default constructor. */
source_info ();
+ vector<function_info *> get_functions_at_location (unsigned line_num) const;
+
+ /* Index of the source_info in sources vector. */
+ unsigned index;
+
/* Canonical name of source file. */
char *name;
time_t file_time;
@@ -290,18 +337,35 @@ struct source_info
/* Vector of line information. */
vector<line_info> lines;
- coverage_t coverage;
+ coverage_info coverage;
/* Functions in this source file. These are in ascending line
number order. */
- function_t *functions;
+ vector <function_info *> functions;
};
-source_info::source_info (): name (NULL), file_time (), lines (),
- coverage (), functions (NULL)
+source_info::source_info (): index (0), name (NULL), file_time (),
+ lines (), coverage (), functions ()
{
}
+vector<function_info *>
+source_info::get_functions_at_location (unsigned line_num) const
+{
+ vector<function_info *> r;
+
+ for (vector<function_info *>::const_iterator it = functions.begin ();
+ it != functions.end (); it++)
+ {
+ if ((*it)->start_line == line_num && (*it)->src == index)
+ r.push_back (*it);
+ }
+
+ std::sort (r.begin (), r.end (), function_line_start_cmp ());
+
+ return r;
+}
+
class name_map
{
public:
@@ -335,10 +399,8 @@ public:
unsigned src; /* Source file */
};
-/* Holds a list of function basic block graphs. */
-
-static function_t *functions;
-static function_t **fn_end = &functions;
+/* Vector of all functions. */
+static vector<function_info *> functions;
/* Vector of source files. */
static vector<source_info> sources;
@@ -367,6 +429,9 @@ static char *bbg_file_name;
/* Stamp of the bbg file */
static unsigned bbg_stamp;
+/* Supports has_unexecuted_blocks functionality. */
+static unsigned bbg_supports_has_unexecuted_blocks;
+
/* Name and file pointer of the input file for the count data (gcda). */
static char *da_file_name;
@@ -475,18 +540,18 @@ static void generate_results (const char *);
static void create_file_names (const char *);
static char *canonicalize_name (const char *);
static unsigned find_source (const char *);
-static function_t *read_graph_file (void);
-static int read_count_file (function_t *);
-static void solve_flow_graph (function_t *);
-static void find_exception_blocks (function_t *);
-static void add_branch_counts (coverage_t *, const arc_t *);
-static void add_line_counts (coverage_t *, function_t *);
+static void read_graph_file (void);
+static int read_count_file (void);
+static void solve_flow_graph (function_info *);
+static void find_exception_blocks (function_info *);
+static void add_branch_counts (coverage_info *, const arc_info *);
+static void add_line_counts (coverage_info *, function_info *);
static void executed_summary (unsigned, unsigned);
-static void function_summary (const coverage_t *, const char *);
+static void function_summary (const coverage_info *, const char *);
static const char *format_gcov (gcov_type, gcov_type, int);
static void accumulate_line_counts (source_info *);
static void output_gcov_file (const char *, source_info *);
-static int output_branch_count (FILE *, int, const arc_t *);
+static int output_branch_count (FILE *, int, const arc_info *);
static void output_lines (FILE *, const source_info *);
static char *make_gcov_file_name (const char *, const char *);
static char *mangle_name (const char *, char *);
@@ -495,8 +560,9 @@ extern int main (int, char **);
function_info::function_info (): name (NULL), demangled_name (NULL),
ident (0), lineno_checksum (0), cfg_checksum (0), has_catch (0),
- blocks (), blocks_executed (0), counts (NULL), num_counts (0),
- line (0), src (0), next_file_fn (NULL), next (NULL)
+ artificial (0), is_group (0),
+ blocks (), blocks_executed (0), counts (),
+ start_line (0), start_column (), end_line (0), src (0), lines (), next (NULL)
{
}
@@ -504,7 +570,7 @@ function_info::~function_info ()
{
for (int i = blocks.size () - 1; i >= 0; i--)
{
- arc_t *arc, *arc_n;
+ arc_info *arc, *arc_n;
for (arc = blocks[i].succ; arc; arc = arc_n)
{
@@ -512,12 +578,16 @@ function_info::~function_info ()
free (arc);
}
}
- free (counts);
if (flag_demangled_names && demangled_name != name)
free (demangled_name);
free (name);
}
+bool function_info::group_line_p (unsigned n, unsigned src_idx)
+{
+ return is_group && src == src_idx && start_line <= n && n <= end_line;
+}
+
/* Cycle detection!
There are a bajillion algorithms that do this. Boost's function is named
hawick_cycles, so I used the algorithm by K. A. Hawick and H. A. James in
@@ -530,8 +600,8 @@ function_info::~function_info ()
simple paths)--the node is unblocked only when it participates in a cycle.
*/
-typedef vector<arc_t *> arc_vector_t;
-typedef vector<const block_t *> block_vector_t;
+typedef vector<arc_info *> arc_vector_t;
+typedef vector<const block_info *> block_vector_t;
/* Enum with types of loop in CFG. */
@@ -576,7 +646,7 @@ handle_cycle (const arc_vector_t &edges, int64_t &count)
blocked by U in BLOCK_LISTS. */
static void
-unblock (const block_t *u, block_vector_t &blocked,
+unblock (const block_info *u, block_vector_t &blocked,
vector<block_vector_t > &block_lists)
{
block_vector_t::iterator it = find (blocked.begin (), blocked.end (), u);
@@ -601,7 +671,7 @@ unblock (const block_t *u, block_vector_t &blocked,
Returns what type of loop it contains. */
static loop_type
-circuit (block_t *v, arc_vector_t &path, block_t *start,
+circuit (block_info *v, arc_vector_t &path, block_info *start,
block_vector_t &blocked, vector<block_vector_t> &block_lists,
line_info &linfo, int64_t &count)
{
@@ -612,9 +682,9 @@ circuit (block_t *v, arc_vector_t &path, block_t *start,
blocked.push_back (v);
block_lists.push_back (block_vector_t ());
- for (arc_t *arc = v->succ; arc; arc = arc->succ_next)
+ for (arc_info *arc = v->succ; arc; arc = arc->succ_next)
{
- block_t *w = arc->dst;
+ block_info *w = arc->dst;
if (w < start || !linfo.has_block (w))
continue;
@@ -631,9 +701,9 @@ circuit (block_t *v, arc_vector_t &path, block_t *start,
if (result != NO_LOOP)
unblock (v, blocked, block_lists);
else
- for (arc_t *arc = v->succ; arc; arc = arc->succ_next)
+ for (arc_info *arc = v->succ; arc; arc = arc->succ_next)
{
- block_t *w = arc->dst;
+ block_info *w = arc->dst;
if (w < start || !linfo.has_block (w))
continue;
@@ -662,7 +732,7 @@ get_cycles_count (line_info &linfo, bool handle_negative_cycles = true)
loop_type result = NO_LOOP;
gcov_type count = 0;
- for (vector<block_t *>::iterator it = linfo.blocks.begin ();
+ for (vector<block_info *>::iterator it = linfo.blocks.begin ();
it != linfo.blocks.end (); it++)
{
arc_vector_t path;
@@ -718,11 +788,13 @@ main (int argc, char **argv)
printf ("Processing file %d out of %d\n", argno - first_arg + 1,
argc - first_arg);
process_file (argv[argno]);
- }
-
- generate_results (multiple_files ? NULL : argv[argc - 1]);
- release_structures ();
+ if (flag_intermediate_format || argno == argc - 1)
+ {
+ generate_results (argv[argno]);
+ release_structures ();
+ }
+ }
return 0;
}
@@ -889,6 +961,67 @@ process_args (int argc, char **argv)
return optind;
}
+/* Output intermediate LINE sitting on LINE_NUM to output file F. */
+
+static void
+output_intermediate_line (FILE *f, line_info *line, unsigned line_num)
+{
+ if (!line->exists)
+ return;
+
+ fprintf (f, "lcount:%u,%s,%d\n", line_num,
+ format_gcov (line->count, 0, -1),
+ line->has_unexecuted_block);
+
+ vector<arc_info *>::const_iterator it;
+ if (flag_branches)
+ for (it = line->branches.begin (); it != line->branches.end ();
+ it++)
+ {
+ if (!(*it)->is_unconditional && !(*it)->is_call_non_return)
+ {
+ const char *branch_type;
+ /* branch:<line_num>,<branch_coverage_infoype>
+ branch_coverage_infoype
+ : notexec (Branch not executed)
+ : taken (Branch executed and taken)
+ : nottaken (Branch executed, but not taken)
+ */
+ if ((*it)->src->count)
+ branch_type
+ = ((*it)->count > 0) ? "taken" : "nottaken";
+ else
+ branch_type = "notexec";
+ fprintf (f, "branch:%d,%s\n", line_num, branch_type);
+ }
+ }
+}
+
+/* Get the name of the gcov file. The return value must be free'd.
+
+ It appends the '.gcov' extension to the *basename* of the file.
+ The resulting file name will be in PWD.
+
+ e.g.,
+ input: foo.da, output: foo.da.gcov
+ input: a/b/foo.cc, output: foo.cc.gcov */
+
+static char *
+get_gcov_intermediate_filename (const char *file_name)
+{
+ const char *gcov = ".gcov";
+ char *result;
+ const char *cptr;
+
+ /* Find the 'basename'. */
+ cptr = lbasename (file_name);
+
+ result = XNEWVEC (char, strlen (cptr) + strlen (gcov) + 1);
+ sprintf (result, "%s%s", cptr, gcov);
+
+ return result;
+}
+
/* Output the result in intermediate format used by 'lcov'.
The intermediate format contains a single file named 'foo.cc.gcov',
@@ -902,119 +1035,194 @@ file 'foo.cc.gcov' similar to the above example. */
static void
output_intermediate_file (FILE *gcov_file, source_info *src)
{
- unsigned line_num; /* current line number. */
- const line_info *line; /* current line info ptr. */
- function_t *fn; /* current function info ptr. */
-
fprintf (gcov_file, "file:%s\n", src->name); /* source file name */
- for (fn = src->functions; fn; fn = fn->next_file_fn)
+ std::sort (src->functions.begin (), src->functions.end (),
+ function_line_start_cmp ());
+ for (vector<function_info *>::iterator it = src->functions.begin ();
+ it != src->functions.end (); it++)
{
/* function:<name>,<line_number>,<execution_count> */
- fprintf (gcov_file, "function:%d,%s,%s\n", fn->line,
- format_gcov (fn->blocks[0].count, 0, -1),
- flag_demangled_names ? fn->demangled_name : fn->name);
+ fprintf (gcov_file, "function:%d,%d,%s,%s\n", (*it)->start_line,
+ (*it)->end_line, format_gcov ((*it)->blocks[0].count, 0, -1),
+ flag_demangled_names ? (*it)->demangled_name : (*it)->name);
}
- for (line_num = 1, line = &src->lines[line_num];
- line_num < src->lines.size ();
- line_num++, line++)
+ for (unsigned line_num = 1; line_num <= src->lines.size (); line_num++)
{
- if (line->exists)
- fprintf (gcov_file, "lcount:%u,%s,%d\n", line_num,
- format_gcov (line->count, 0, -1), line->has_unexecuted_block);
- if (flag_branches)
- for (vector<arc_t *>::const_iterator it = line->branches.begin ();
- it != line->branches.end (); it++)
- {
- if (!(*it)->is_unconditional && !(*it)->is_call_non_return)
- {
- const char *branch_type;
- /* branch:<line_num>,<branch_coverage_type>
- branch_coverage_type
- : notexec (Branch not executed)
- : taken (Branch executed and taken)
- : nottaken (Branch executed, but not taken)
- */
- if ((*it)->src->count)
- branch_type = ((*it)->count > 0) ? "taken" : "nottaken";
- else
- branch_type = "notexec";
- fprintf (gcov_file, "branch:%d,%s\n", line_num, branch_type);
- }
- }
+ vector<function_info *> fns = src->get_functions_at_location (line_num);
+
+ /* Print first group functions that begin on the line. */
+ for (vector<function_info *>::iterator it2 = fns.begin ();
+ it2 != fns.end (); it2++)
+ {
+ vector<line_info> &lines = (*it2)->lines;
+ for (unsigned i = 0; i < lines.size (); i++)
+ {
+ line_info *line = &lines[i];
+ output_intermediate_line (gcov_file, line, line_num + i);
+ }
+ }
+
+ /* Follow with lines associated with the source file. */
+ output_intermediate_line (gcov_file, &src->lines[line_num], line_num);
}
}
+/* Function start pair. */
+struct function_start
+{
+ unsigned source_file_idx;
+ unsigned start_line;
+};
+
+/* Traits class for function start hash maps below. */
+
+struct function_start_pair_hash : typed_noop_remove <function_start>
+{
+ typedef function_start value_type;
+ typedef function_start compare_type;
+
+ static hashval_t
+ hash (const function_start &ref)
+ {
+ inchash::hash hstate (0);
+ hstate.add_int (ref.source_file_idx);
+ hstate.add_int (ref.start_line);
+ return hstate.end ();
+ }
+
+ static bool
+ equal (const function_start &ref1, const function_start &ref2)
+ {
+ return (ref1.source_file_idx == ref2.source_file_idx
+ && ref1.start_line == ref2.start_line);
+ }
+
+ static void
+ mark_deleted (function_start &ref)
+ {
+ ref.start_line = ~1U;
+ }
+
+ static void
+ mark_empty (function_start &ref)
+ {
+ ref.start_line = ~2U;
+ }
+
+ static bool
+ is_deleted (const function_start &ref)
+ {
+ return ref.start_line == ~1U;
+ }
+
+ static bool
+ is_empty (const function_start &ref)
+ {
+ return ref.start_line == ~2U;
+ }
+};
+
/* Process a single input file. */
static void
process_file (const char *file_name)
{
- function_t *fns;
-
create_file_names (file_name);
- fns = read_graph_file ();
- if (!fns)
+ read_graph_file ();
+ if (functions.empty ())
return;
- read_count_file (fns);
- while (fns)
+ read_count_file ();
+
+ hash_map<function_start_pair_hash, function_info *> fn_map;
+
+ /* Identify group functions. */
+ for (vector<function_info *>::iterator it = functions.begin ();
+ it != functions.end (); it++)
+ if (!(*it)->artificial)
+ {
+ function_start needle;
+ needle.source_file_idx = (*it)->src;
+ needle.start_line = (*it)->start_line;
+
+ function_info **slot = fn_map.get (needle);
+ if (slot)
+ {
+ gcc_assert ((*slot)->end_line == (*it)->end_line);
+ (*slot)->is_group = 1;
+ (*it)->is_group = 1;
+ }
+ else
+ fn_map.put (needle, *it);
+ }
+
+ /* Remove all artificial function. */
+ functions.erase (remove_if (functions.begin (), functions.end (),
+ function_info::is_artificial), functions.end ());
+
+ for (vector<function_info *>::iterator it = functions.begin ();
+ it != functions.end (); it++)
{
- function_t *fn = fns;
+ function_info *fn = *it;
+ unsigned src = fn->src;
- fns = fn->next;
- fn->next = NULL;
- if (fn->counts || no_data_file)
+ if (!fn->counts.empty () || no_data_file)
{
- unsigned src = fn->src;
- unsigned line = fn->line;
- unsigned block_no;
- function_t *probe, **prev;
-
- /* Now insert it into the source file's list of
- functions. Normally functions will be encountered in
- ascending order, so a simple scan is quick. Note we're
- building this list in reverse order. */
- for (prev = &sources[src].functions;
- (probe = *prev); prev = &probe->next_file_fn)
- if (probe->line <= line)
- break;
- fn->next_file_fn = probe;
- *prev = fn;
+ source_info *s = &sources[src];
+ s->functions.push_back (fn);
/* Mark last line in files touched by function. */
- for (block_no = 0; block_no != fn->blocks.size (); block_no++)
+ for (unsigned block_no = 0; block_no != fn->blocks.size ();
+ block_no++)
{
- block_t *block = &fn->blocks[block_no];
+ block_info *block = &fn->blocks[block_no];
for (unsigned i = 0; i < block->locations.size (); i++)
{
- unsigned s = block->locations[i].source_file_idx;
-
/* Sort lines of locations. */
sort (block->locations[i].lines.begin (),
block->locations[i].lines.end ());
if (!block->locations[i].lines.empty ())
{
+ s = &sources[block->locations[i].source_file_idx];
unsigned last_line
- = block->locations[i].lines.back () + 1;
- if (last_line > sources[s].lines.size ())
- sources[s].lines.resize (last_line);
+ = block->locations[i].lines.back ();
+
+ /* Record new lines for the function. */
+ if (last_line >= s->lines.size ())
+ {
+ s = &sources[block->locations[i].source_file_idx];
+ unsigned last_line
+ = block->locations[i].lines.back ();
+
+ /* Record new lines for the function. */
+ if (last_line >= s->lines.size ())
+ {
+ /* Record new lines for a source file. */
+ s->lines.resize (last_line + 1);
+ }
+ }
}
}
}
+ /* Allocate lines for group function, following start_line
+ and end_line information of the function. */
+ if (fn->is_group)
+ fn->lines.resize (fn->end_line - fn->start_line + 1);
+
+
solve_flow_graph (fn);
if (fn->has_catch)
find_exception_blocks (fn);
- *fn_end = fn;
- fn_end = &fn->next;
}
else
- /* The function was not in the executable -- some other
- instance must have been selected. */
- delete fn;
+ {
+ /* The function was not in the executable -- some other
+ instance must have been selected. */
+ }
}
}
@@ -1027,19 +1235,16 @@ output_gcov_file (const char *file_name, source_info *src)
{
FILE *gcov_file = fopen (gcov_file_name, "w");
if (gcov_file)
- {
- fnotice (stdout, "Creating '%s'\n", gcov_file_name);
-
- if (flag_intermediate_format)
- output_intermediate_file (gcov_file, src);
- else
- output_lines (gcov_file, src);
- if (ferror (gcov_file))
- fnotice (stderr, "Error writing output file '%s'\n", gcov_file_name);
- fclose (gcov_file);
- }
+ {
+ fnotice (stdout, "Creating '%s'\n", gcov_file_name);
+ output_lines (gcov_file, src);
+ if (ferror (gcov_file))
+ fnotice (stderr, "Error writing output file '%s'\n",
+ gcov_file_name);
+ fclose (gcov_file);
+ }
else
- fnotice (stderr, "Could not open output file '%s'\n", gcov_file_name);
+ fnotice (stderr, "Could not open output file '%s'\n", gcov_file_name);
}
else
{
@@ -1052,11 +1257,14 @@ output_gcov_file (const char *file_name, source_info *src)
static void
generate_results (const char *file_name)
{
- function_t *fn;
+ FILE *gcov_intermediate_file = NULL;
+ char *gcov_intermediate_filename = NULL;
- for (fn = functions; fn; fn = fn->next)
+ for (vector<function_info *>::iterator it = functions.begin ();
+ it != functions.end (); it++)
{
- coverage_t coverage;
+ function_info *fn = *it;
+ coverage_info coverage;
memset (&coverage, 0, sizeof (coverage));
coverage.name = flag_demangled_names ? fn->demangled_name : fn->name;
@@ -1081,6 +1289,19 @@ generate_results (const char *file_name)
file_name = canonicalize_name (file_name);
}
+ if (flag_gcov_file && flag_intermediate_format)
+ {
+ /* Open the intermediate file. */
+ gcov_intermediate_filename = get_gcov_intermediate_filename (file_name);
+ gcov_intermediate_file = fopen (gcov_intermediate_filename, "w");
+ if (!gcov_intermediate_file)
+ {
+ fnotice (stderr, "Cannot open intermediate output file %s\n",
+ gcov_intermediate_filename);
+ return;
+ }
+ }
+
for (vector<source_info>::iterator it = sources.begin ();
it != sources.end (); it++)
{
@@ -1105,9 +1326,21 @@ generate_results (const char *file_name)
total_executed += src->coverage.lines_executed;
if (flag_gcov_file)
{
- output_gcov_file (file_name, src);
- fnotice (stdout, "\n");
- }
+ if (flag_intermediate_format)
+ /* Output the intermediate format without requiring source
+ files. This outputs a section to a *single* file. */
+ output_intermediate_file (gcov_intermediate_file, src);
+ else
+ output_gcov_file (file_name, src);
+ fnotice (stdout, "\n");
+ }
+ }
+
+ if (flag_gcov_file && flag_intermediate_format)
+ {
+ /* Now we've finished writing the intermediate file. */
+ fclose (gcov_intermediate_file);
+ XDELETEVEC (gcov_intermediate_filename);
}
if (!file_name)
@@ -1119,13 +1352,13 @@ generate_results (const char *file_name)
static void
release_structures (void)
{
- function_t *fn;
+ for (vector<function_info *>::iterator it = functions.begin ();
+ it != functions.end (); it++)
+ delete (*it);
- while ((fn = functions))
- {
- functions = fn->next;
- delete fn;
- }
+ sources.resize (0);
+ names.resize (0);
+ functions.resize (0);
}
/* Generate the names of the graph and data files. If OBJECT_DIRECTORY
@@ -1237,6 +1470,7 @@ find_source (const char *file_name)
src = &sources.back ();
src->name = canon;
src->coverage.name = src->name;
+ src->index = idx;
if (source_length
#if HAVE_DOS_BASED_FILE_SYSTEM
/* You lose if separators don't match exactly in the
@@ -1282,29 +1516,26 @@ find_source (const char *file_name)
return idx;
}
-/* Read the notes file. Return list of functions read -- in reverse order. */
+/* Read the notes file. Save functions to FUNCTIONS global vector. */
-static function_t *
+static void
read_graph_file (void)
{
unsigned version;
unsigned current_tag = 0;
- function_t *fn = NULL;
- function_t *fns = NULL;
- function_t **fns_end = &fns;
unsigned tag;
if (!gcov_open (bbg_file_name, 1))
{
fnotice (stderr, "%s:cannot open notes file\n", bbg_file_name);
- return fns;
+ return;
}
bbg_file_time = gcov_time ();
if (!gcov_magic (gcov_read_unsigned (), GCOV_NOTE_MAGIC))
{
fnotice (stderr, "%s:not a gcov notes file\n", bbg_file_name);
gcov_close ();
- return fns;
+ return;
}
version = gcov_read_unsigned ();
@@ -1319,7 +1550,9 @@ read_graph_file (void)
bbg_file_name, v, e);
}
bbg_stamp = gcov_read_unsigned ();
+ bbg_supports_has_unexecuted_blocks = gcov_read_unsigned ();
+ function_info *fn = NULL;
while ((tag = gcov_read_unsigned ()))
{
unsigned length = gcov_read_unsigned ();
@@ -1328,17 +1561,21 @@ read_graph_file (void)
if (tag == GCOV_TAG_FUNCTION)
{
char *function_name;
- unsigned ident, lineno;
+ unsigned ident;
unsigned lineno_checksum, cfg_checksum;
ident = gcov_read_unsigned ();
lineno_checksum = gcov_read_unsigned ();
cfg_checksum = gcov_read_unsigned ();
function_name = xstrdup (gcov_read_string ());
+ unsigned artificial = gcov_read_unsigned ();
unsigned src_idx = find_source (gcov_read_string ());
- lineno = gcov_read_unsigned ();
+ unsigned start_line = gcov_read_unsigned ();
+ unsigned start_column = gcov_read_unsigned ();
+ unsigned end_line = gcov_read_unsigned ();
- fn = new function_t;
+ fn = new function_info ();
+ functions.push_back (fn);
fn->name = function_name;
if (flag_demangled_names)
{
@@ -1350,12 +1587,11 @@ read_graph_file (void)
fn->lineno_checksum = lineno_checksum;
fn->cfg_checksum = cfg_checksum;
fn->src = src_idx;
- fn->line = lineno;
+ fn->start_line = start_line;
+ fn->start_column = start_column;
+ fn->end_line = end_line;
+ fn->artificial = artificial;
- fn->next_file_fn = NULL;
- fn->next = NULL;
- *fns_end = fn;
- fns_end = &fn->next;
current_tag = tag;
}
else if (fn && tag == GCOV_TAG_BLOCKS)
@@ -1371,7 +1607,7 @@ read_graph_file (void)
unsigned src = gcov_read_unsigned ();
fn->blocks[src].id = src;
unsigned num_dests = GCOV_TAG_ARCS_NUM (length);
- block_t *src_blk = &fn->blocks[src];
+ block_info *src_blk = &fn->blocks[src];
unsigned mark_catches = 0;
struct arc_info *arc;
@@ -1385,7 +1621,7 @@ read_graph_file (void)
if (dest >= fn->blocks.size ())
goto corrupt;
- arc = XCNEW (arc_t);
+ arc = XCNEW (arc_info);
arc->dst = &fn->blocks[dest];
arc->src = src_blk;
@@ -1424,7 +1660,7 @@ read_graph_file (void)
}
if (!arc->on_tree)
- fn->num_counts++;
+ fn->counts.push_back (0);
}
if (mark_catches)
@@ -1444,7 +1680,7 @@ read_graph_file (void)
else if (fn && tag == GCOV_TAG_LINES)
{
unsigned blockno = gcov_read_unsigned ();
- block_t *block = &fn->blocks[blockno];
+ block_info *block = &fn->blocks[blockno];
if (blockno >= fn->blocks.size ())
goto corrupt;
@@ -1481,22 +1717,20 @@ read_graph_file (void)
}
gcov_close ();
- if (!fns)
+ if (functions.empty ())
fnotice (stderr, "%s:no functions found\n", bbg_file_name);
-
- return fns;
}
/* Reads profiles from the count file and attach to each
function. Return nonzero if fatal error. */
static int
-read_count_file (function_t *fns)
+read_count_file (void)
{
unsigned ix;
unsigned version;
unsigned tag;
- function_t *fn = NULL;
+ function_info *fn = NULL;
int error = 0;
if (!gcov_open (da_file_name, 1))
@@ -1548,26 +1782,20 @@ read_count_file (function_t *fns)
else if (tag == GCOV_TAG_FUNCTION && length == GCOV_TAG_FUNCTION_LENGTH)
{
unsigned ident;
- struct function_info *fn_n;
/* Try to find the function in the list. To speed up the
search, first start from the last function found. */
ident = gcov_read_unsigned ();
- fn_n = fns;
- for (fn = fn ? fn->next : NULL; ; fn = fn->next)
+
+ fn = NULL;
+ for (vector<function_info *>::reverse_iterator it
+ = functions.rbegin (); it != functions.rend (); it++)
{
- if (fn)
- ;
- else if ((fn = fn_n))
- fn_n = NULL;
- else
+ if ((*it)->ident == ident)
{
- fnotice (stderr, "%s:unknown function '%u'\n",
- da_file_name, ident);
+ fn = *it;
break;
}
- if (fn->ident == ident)
- break;
}
if (!fn)
@@ -1583,13 +1811,10 @@ read_count_file (function_t *fns)
}
else if (tag == GCOV_TAG_FOR_COUNTER (GCOV_COUNTER_ARCS) && fn)
{
- if (length != GCOV_TAG_COUNTER_LENGTH (fn->num_counts))
+ if (length != GCOV_TAG_COUNTER_LENGTH (fn->counts.size ()))
goto mismatch;
- if (!fn->counts)
- fn->counts = XCNEWVEC (gcov_type, fn->num_counts);
-
- for (ix = 0; ix != fn->num_counts; ix++)
+ for (ix = 0; ix != fn->counts.size (); ix++)
fn->counts[ix] += gcov_read_counter ();
}
gcov_sync (base, length);
@@ -1612,19 +1837,19 @@ read_count_file (function_t *fns)
to the blocks and the uninstrumented arcs. */
static void
-solve_flow_graph (function_t *fn)
+solve_flow_graph (function_info *fn)
{
unsigned ix;
- arc_t *arc;
- gcov_type *count_ptr = fn->counts;
- block_t *blk;
- block_t *valid_blocks = NULL; /* valid, but unpropagated blocks. */
- block_t *invalid_blocks = NULL; /* invalid, but inferable blocks. */
+ arc_info *arc;
+ gcov_type *count_ptr = &fn->counts.front ();
+ block_info *blk;
+ block_info *valid_blocks = NULL; /* valid, but unpropagated blocks. */
+ block_info *invalid_blocks = NULL; /* invalid, but inferable blocks. */
/* The arcs were built in reverse order. Fix that now. */
for (ix = fn->blocks.size (); ix--;)
{
- arc_t *arc_p, *arc_n;
+ arc_info *arc_p, *arc_n;
for (arc_p = NULL, arc = fn->blocks[ix].succ; arc;
arc_p = arc, arc = arc_n)
@@ -1670,7 +1895,7 @@ solve_flow_graph (function_t *fn)
for (unsigned i = 0; i < fn->blocks.size (); i++)
{
blk = &fn->blocks[i];
- block_t const *prev_dst = NULL;
+ block_info const *prev_dst = NULL;
int out_of_order = 0;
int non_fake_succ = 0;
@@ -1717,12 +1942,12 @@ solve_flow_graph (function_t *fn)
smart sort. */
if (out_of_order)
{
- arc_t *start = blk->succ;
+ arc_info *start = blk->succ;
unsigned changes = 1;
while (changes)
{
- arc_t *arc, *arc_p, *arc_n;
+ arc_info *arc, *arc_p, *arc_n;
changes = 0;
for (arc_p = NULL, arc = start; (arc_n = arc->succ_next);)
@@ -1760,7 +1985,7 @@ solve_flow_graph (function_t *fn)
while ((blk = invalid_blocks))
{
gcov_type total = 0;
- const arc_t *arc;
+ const arc_info *arc;
invalid_blocks = blk->chain;
blk->invalid_chain = 0;
@@ -1782,13 +2007,13 @@ solve_flow_graph (function_t *fn)
while ((blk = valid_blocks))
{
gcov_type total;
- arc_t *arc, *inv_arc;
+ arc_info *arc, *inv_arc;
valid_blocks = blk->chain;
blk->valid_chain = 0;
if (blk->num_succ == 1)
{
- block_t *dst;
+ block_info *dst;
total = blk->count;
inv_arc = NULL;
@@ -1824,7 +2049,7 @@ solve_flow_graph (function_t *fn)
}
if (blk->num_pred == 1)
{
- block_t *src;
+ block_info *src;
total = blk->count;
inv_arc = NULL;
@@ -1875,10 +2100,10 @@ solve_flow_graph (function_t *fn)
/* Mark all the blocks only reachable via an incoming catch. */
static void
-find_exception_blocks (function_t *fn)
+find_exception_blocks (function_info *fn)
{
unsigned ix;
- block_t **queue = XALLOCAVEC (block_t *, fn->blocks.size ());
+ block_info **queue = XALLOCAVEC (block_info *, fn->blocks.size ());
/* First mark all blocks as exceptional. */
for (ix = fn->blocks.size (); ix--;)
@@ -1889,8 +2114,8 @@ find_exception_blocks (function_t *fn)
queue[0]->exceptional = 0;
for (ix = 1; ix;)
{
- block_t *block = queue[--ix];
- const arc_t *arc;
+ block_info *block = queue[--ix];
+ const arc_info *arc;
for (arc = block->succ; arc; arc = arc->succ_next)
if (!arc->fake && !arc->is_throw && arc->dst->exceptional)
@@ -1905,7 +2130,7 @@ find_exception_blocks (function_t *fn)
/* Increment totals in COVERAGE according to arc ARC. */
static void
-add_branch_counts (coverage_t *coverage, const arc_t *arc)
+add_branch_counts (coverage_info *coverage, const arc_info *arc)
{
if (arc->is_call_non_return)
{
@@ -2017,7 +2242,7 @@ executed_summary (unsigned lines, unsigned executed)
/* Output summary info for a function or file. */
static void
-function_summary (const coverage_t *coverage, const char *title)
+function_summary (const coverage_info *coverage, const char *title)
{
fnotice (stdout, "%s '%s'\n", title, coverage->name);
executed_summary (coverage->lines, coverage->lines_executed);
@@ -2254,60 +2479,78 @@ mangle_name (char const *base, char *ptr)
the appropriate basic block. */
static void
-add_line_counts (coverage_t *coverage, function_t *fn)
+add_line_counts (coverage_info *coverage, function_info *fn)
{
bool has_any_line = false;
/* Scan each basic block. */
for (unsigned ix = 0; ix != fn->blocks.size (); ix++)
{
line_info *line = NULL;
- block_t *block = &fn->blocks[ix];
+ block_info *block = &fn->blocks[ix];
if (block->count && ix && ix + 1 != fn->blocks.size ())
fn->blocks_executed++;
for (unsigned i = 0; i < block->locations.size (); i++)
{
- source_info *src = &sources[block->locations[i].source_file_idx];
-
+ unsigned src_idx = block->locations[i].source_file_idx;
vector<unsigned> &lines = block->locations[i].lines;
+
+ block->cycle.arc = NULL;
+ block->cycle.ident = ~0U;
+
for (unsigned j = 0; j < lines.size (); j++)
{
- line = &src->lines[lines[j]];
- if (coverage)
+ unsigned ln = lines[j];
+
+ /* Line belongs to a function that is in a group. */
+ if (fn->group_line_p (ln, src_idx))
{
- if (!line->exists)
- coverage->lines++;
- if (!line->count && block->count)
- coverage->lines_executed++;
+ gcc_assert (lines[j] - fn->start_line < fn->lines.size ());
+ line = &(fn->lines[lines[j] - fn->start_line]);
+ line->exists = 1;
+ if (!block->exceptional)
+ {
+ line->unexceptional = 1;
+ if (block->count == 0)
+ line->has_unexecuted_block = 1;
+ }
+ line->count += block->count;
}
- line->exists = 1;
- if (!block->exceptional)
+ else
{
- line->unexceptional = 1;
- if (block->count == 0)
- line->has_unexecuted_block = 1;
+ gcc_assert (ln < sources[src_idx].lines.size ());
+ line = &(sources[src_idx].lines[ln]);
+ if (coverage)
+ {
+ if (!line->exists)
+ coverage->lines++;
+ if (!line->count && block->count)
+ coverage->lines_executed++;
+ }
+ line->exists = 1;
+ if (!block->exceptional)
+ {
+ line->unexceptional = 1;
+ if (block->count == 0)
+ line->has_unexecuted_block = 1;
+ }
+ line->count += block->count;
}
- line->count += block->count;
}
- }
- block->cycle.arc = NULL;
- block->cycle.ident = ~0U;
- has_any_line = true;
- if (!ix || ix + 1 == fn->blocks.size ())
- /* Entry or exit block */;
- else if (line != NULL)
- {
- line->blocks.push_back (block);
+ has_any_line = true;
- if (flag_branches)
+ if (!ix || ix + 1 == fn->blocks.size ())
+ /* Entry or exit block. */;
+ else if (line != NULL)
{
- arc_t *arc;
+ line->blocks.push_back (block);
- for (arc = block->succ; arc; arc = arc->succ_next)
+ if (flag_branches)
{
- line->branches.push_back (arc);
- if (coverage && !arc->is_unconditional)
- add_branch_counts (coverage, arc);
+ arc_info *arc;
+
+ for (arc = block->succ; arc; arc = arc->succ_next)
+ line->branches.push_back (arc);
}
}
}
@@ -2317,79 +2560,120 @@ add_line_counts (coverage_t *coverage, function_t *fn)
fnotice (stderr, "%s:no lines for '%s'\n", bbg_file_name, fn->name);
}
+/* Accumulate info for LINE that belongs to SRC source file. If ADD_COVERAGE
+ is set to true, update source file summary. */
+
+static void accumulate_line_info (line_info *line, source_info *src,
+ bool add_coverage)
+{
+ if (add_coverage)
+ for (vector<arc_info *>::iterator it = line->branches.begin ();
+ it != line->branches.end (); it++)
+ add_branch_counts (&src->coverage, *it);
+
+ if (!line->blocks.empty ())
+ {
+ /* The user expects the line count to be the number of times
+ a line has been executed. Simply summing the block count
+ will give an artificially high number. The Right Thing
+ is to sum the entry counts to the graph of blocks on this
+ line, then find the elementary cycles of the local graph
+ and add the transition counts of those cycles. */
+ gcov_type count = 0;
+
+ /* Cycle detection. */
+ for (vector<block_info *>::iterator it = line->blocks.begin ();
+ it != line->blocks.end (); it++)
+ {
+ for (arc_info *arc = (*it)->pred; arc; arc = arc->pred_next)
+ if (!line->has_block (arc->src))
+ count += arc->count;
+ for (arc_info *arc = (*it)->succ; arc; arc = arc->succ_next)
+ arc->cs_count = arc->count;
+ }
+
+ /* Now, add the count of loops entirely on this line. */
+ count += get_cycles_count (*line);
+ line->count = count;
+ }
+
+ if (line->exists && add_coverage)
+ {
+ src->coverage.lines++;
+ if (line->count)
+ src->coverage.lines_executed++;
+ }
+}
+
/* Accumulate the line counts of a file. */
static void
accumulate_line_counts (source_info *src)
{
- function_t *fn, *fn_p, *fn_n;
- unsigned ix = 0;
-
- /* Reverse the function order. */
- for (fn = src->functions, fn_p = NULL; fn; fn_p = fn, fn = fn_n)
+ /* First work on group functions. */
+ for (vector<function_info *>::iterator it = src->functions.begin ();
+ it != src->functions.end (); it++)
{
- fn_n = fn->next_file_fn;
- fn->next_file_fn = fn_p;
+ function_info *fn = *it;
+
+ if (fn->src != src->index || !fn->is_group)
+ continue;
+
+ for (vector<line_info>::iterator it2 = fn->lines.begin ();
+ it2 != fn->lines.end (); it2++)
+ {
+ line_info *line = &(*it2);
+ accumulate_line_info (line, src, false);
+ }
}
- src->functions = fn_p;
- for (vector<line_info>::reverse_iterator it = src->lines.rbegin ();
- it != src->lines.rend (); it++)
- {
- line_info *line = &(*it);
- if (!line->blocks.empty ())
- {
- /* The user expects the line count to be the number of times
- a line has been executed. Simply summing the block count
- will give an artificially high number. The Right Thing
- is to sum the entry counts to the graph of blocks on this
- line, then find the elementary cycles of the local graph
- and add the transition counts of those cycles. */
- gcov_type count = 0;
-
- /* Sum the entry arcs. */
- for (vector<block_t *>::iterator it = line->blocks.begin ();
- it != line->blocks.end (); it++)
- {
- arc_t *arc;
+ /* Work on global lines that line in source file SRC. */
+ for (vector<line_info>::iterator it = src->lines.begin ();
+ it != src->lines.end (); it++)
+ accumulate_line_info (&(*it), src, true);
- for (arc = (*it)->pred; arc; arc = arc->pred_next)
- if (flag_branches)
- add_branch_counts (&src->coverage, arc);
- }
+ /* If not using intermediate mode, sum lines of group functions and
+ add them to lines that live in a source file. */
+ if (!flag_intermediate_format)
+ for (vector<function_info *>::iterator it = src->functions.begin ();
+ it != src->functions.end (); it++)
+ {
+ function_info *fn = *it;
- /* Cycle detection. */
- for (vector<block_t *>::iterator it = line->blocks.begin ();
- it != line->blocks.end (); it++)
- {
- for (arc_t *arc = (*it)->pred; arc; arc = arc->pred_next)
- if (!line->has_block (arc->src))
- count += arc->count;
- for (arc_t *arc = (*it)->succ; arc; arc = arc->succ_next)
- arc->cs_count = arc->count;
- }
+ if (fn->src != src->index || !fn->is_group)
+ continue;
- /* Now, add the count of loops entirely on this line. */
- count += get_cycles_count (*line);
- line->count = count;
- }
+ for (unsigned i = 0; i < fn->lines.size (); i++)
+ {
+ line_info *fn_line = &fn->lines[i];
+ if (fn_line->exists)
+ {
+ unsigned ln = fn->start_line + i;
+ line_info *src_line = &src->lines[ln];
- if (line->exists)
- {
- src->coverage.lines++;
- if (line->count)
- src->coverage.lines_executed++;
- }
+ if (!src_line->exists)
+ src->coverage.lines++;
+ if (!src_line->count && fn_line->count)
+ src->coverage.lines_executed++;
- ix++;
- }
+ src_line->count += fn_line->count;
+ src_line->exists = 1;
+
+ if (fn_line->has_unexecuted_block)
+ src_line->has_unexecuted_block = 1;
+
+ if (fn_line->unexceptional)
+ src_line->unexceptional = 1;
+ }
+ }
+ }
}
/* Output information about ARC number IX. Returns nonzero if
anything is output. */
static int
-output_branch_count (FILE *gcov_file, int ix, const arc_t *arc)
+output_branch_count (FILE *gcov_file, int ix, const arc_info *arc)
{
if (arc->is_call_non_return)
{
@@ -2495,12 +2779,14 @@ output_line_beginning (FILE *f, bool exists, bool unexceptional,
if (count > 0)
{
s = format_gcov (count, 0, -1);
- if (has_unexecuted_block)
+ if (has_unexecuted_block
+ && bbg_supports_has_unexecuted_blocks)
{
if (flag_use_colors)
{
pad_count_string (s);
- s = SGR_SEQ (COLOR_BG_MAGENTA COLOR_SEPARATOR COLOR_FG_WHITE);
+ s.insert (0, SGR_SEQ (COLOR_BG_MAGENTA
+ COLOR_SEPARATOR COLOR_FG_WHITE));
s += SGR_RESET;
}
else
@@ -2538,6 +2824,86 @@ output_line_beginning (FILE *f, bool exists, bool unexceptional,
fprintf (f, "%s:%5u", s.c_str (), line_num);
}
+static void
+print_source_line (FILE *f, const vector<const char *> &source_lines,
+ unsigned line)
+{
+ gcc_assert (line >= 1);
+ gcc_assert (line <= source_lines.size ());
+
+ fprintf (f, ":%s\n", source_lines[line - 1]);
+}
+
+/* Output line details for LINE and print it to F file. LINE lives on
+ LINE_NUM. */
+
+static void
+output_line_details (FILE *f, const line_info *line, unsigned line_num)
+{
+ if (flag_all_blocks)
+ {
+ arc_info *arc;
+ int ix, jx;
+
+ ix = jx = 0;
+ for (vector<block_info *>::const_iterator it = line->blocks.begin ();
+ it != line->blocks.end (); it++)
+ {
+ if (!(*it)->is_call_return)
+ {
+ output_line_beginning (f, line->exists,
+ (*it)->exceptional, false,
+ (*it)->count, line_num,
+ "%%%%%", "$$$$$");
+ fprintf (f, "-block %2d", ix++);
+ if (flag_verbose)
+ fprintf (f, " (BB %u)", (*it)->id);
+ fprintf (f, "\n");
+ }
+ if (flag_branches)
+ for (arc = (*it)->succ; arc; arc = arc->succ_next)
+ jx += output_branch_count (f, jx, arc);
+ }
+ }
+ else if (flag_branches)
+ {
+ int ix;
+
+ ix = 0;
+ for (vector<arc_info *>::const_iterator it = line->branches.begin ();
+ it != line->branches.end (); it++)
+ ix += output_branch_count (f, ix, (*it));
+ }
+}
+
+/* Output detail statistics about function FN to file F. */
+
+static void
+output_function_details (FILE *f, const function_info *fn)
+{
+ if (!flag_branches)
+ return;
+
+ arc_info *arc = fn->blocks[EXIT_BLOCK].pred;
+ gcov_type return_count = fn->blocks[EXIT_BLOCK].count;
+ gcov_type called_count = fn->blocks[ENTRY_BLOCK].count;
+
+ for (; arc; arc = arc->pred_next)
+ if (arc->fake)
+ return_count -= arc->count;
+
+ fprintf (f, "function %s",
+ flag_demangled_names ? fn->demangled_name : fn->name);
+ fprintf (f, " called %s",
+ format_gcov (called_count, 0, -1));
+ fprintf (f, " returned %s",
+ format_gcov (return_count, called_count, 0));
+ fprintf (f, " blocks executed %s",
+ format_gcov (fn->blocks_executed, fn->blocks.size () - 2,
+ 0));
+ fprintf (f, "\n");
+}
+
/* Read in the source file one line at a time, and output that line to
the gcov file preceded by its execution count and other
information. */
@@ -2546,12 +2912,10 @@ static void
output_lines (FILE *gcov_file, const source_info *src)
{
#define DEFAULT_LINE_START " -: 0:"
+#define FN_SEPARATOR "------------------\n"
FILE *source_file;
- unsigned line_num; /* current line number. */
- const line_info *line; /* current line info ptr. */
- const char *retval = ""; /* status of source file reading. */
- function_t *fn = NULL;
+ const char *retval;
fprintf (gcov_file, DEFAULT_LINE_START "Source:%s\n", src->coverage.name);
if (!multiple_files)
@@ -2565,43 +2929,40 @@ output_lines (FILE *gcov_file, const source_info *src)
source_file = fopen (src->name, "r");
if (!source_file)
- {
- fnotice (stderr, "Cannot open source file %s\n", src->name);
- retval = NULL;
- }
+ fnotice (stderr, "Cannot open source file %s\n", src->name);
else if (src->file_time == 0)
fprintf (gcov_file, DEFAULT_LINE_START "Source is newer than graph\n");
- if (flag_branches)
- fn = src->functions;
+ vector<const char *> source_lines;
+ if (source_file)
+ while ((retval = read_line (source_file)) != NULL)
+ source_lines.push_back (xstrdup (retval));
+
+ unsigned line_start_group = 0;
+ vector<function_info *> fns;
- for (line_num = 1, line = &src->lines[line_num];
- line_num < src->lines.size (); line_num++, line++)
+ for (unsigned line_num = 1; line_num <= source_lines.size (); line_num++)
{
- for (; fn && fn->line == line_num; fn = fn->next_file_fn)
+ if (line_num >= src->lines.size ())
{
- arc_t *arc = fn->blocks[EXIT_BLOCK].pred;
- gcov_type return_count = fn->blocks[EXIT_BLOCK].count;
- gcov_type called_count = fn->blocks[ENTRY_BLOCK].count;
-
- for (; arc; arc = arc->pred_next)
- if (arc->fake)
- return_count -= arc->count;
-
- fprintf (gcov_file, "function %s", flag_demangled_names ?
- fn->demangled_name : fn->name);
- fprintf (gcov_file, " called %s",
- format_gcov (called_count, 0, -1));
- fprintf (gcov_file, " returned %s",
- format_gcov (return_count, called_count, 0));
- fprintf (gcov_file, " blocks executed %s",
- format_gcov (fn->blocks_executed, fn->blocks.size () - 2,
- 0));
- fprintf (gcov_file, "\n");
+ fprintf (gcov_file, "%9s:%5u", "-", line_num);
+ print_source_line (gcov_file, source_lines, line_num);
+ continue;
}
- if (retval)
- retval = read_line (source_file);
+ const line_info *line = &src->lines[line_num];
+
+ if (line_start_group == 0)
+ {
+ fns = src->get_functions_at_location (line_num);
+ if (fns.size () > 1)
+ line_start_group = fns[0]->end_line;
+ else if (fns.size () == 1)
+ {
+ function_info *fn = fns[0];
+ output_function_details (gcov_file, fn);
+ }
+ }
/* For lines which don't exist in the .bb file, print '-' before
the source line. For lines which exist but were never
@@ -2610,54 +2971,64 @@ output_lines (FILE *gcov_file, const source_info *src)
There are 16 spaces of indentation added before the source
line so that tabs won't be messed up. */
output_line_beginning (gcov_file, line->exists, line->unexceptional,
- line->has_unexecuted_block, line->count, line_num,
- "=====", "#####");
- fprintf (gcov_file, ":%s\n", retval ? retval : "/*EOF*/");
+ line->has_unexecuted_block, line->count,
+ line_num, "=====", "#####");
- if (flag_all_blocks)
- {
- arc_t *arc;
- int ix, jx;
+ print_source_line (gcov_file, source_lines, line_num);
+ output_line_details (gcov_file, line, line_num);
- ix = jx = 0;
- for (vector<block_t *>::const_iterator it = line->blocks.begin ();
- it != line->blocks.end (); it++)
+ if (line_start_group == line_num)
+ {
+ for (vector<function_info *>::iterator it = fns.begin ();
+ it != fns.end (); it++)
{
- if (!(*it)->is_call_return)
+ function_info *fn = *it;
+ vector<line_info> &lines = fn->lines;
+
+ fprintf (gcov_file, FN_SEPARATOR);
+
+ string fn_name
+ = flag_demangled_names ? fn->demangled_name : fn->name;
+
+ if (flag_use_colors)
{
+ fn_name.insert (0, SGR_SEQ (COLOR_FG_CYAN));
+ fn_name += SGR_RESET;
+ }
+
+ fprintf (gcov_file, "%s:\n", fn_name.c_str ());
+
+ output_function_details (gcov_file, fn);
+
+ /* Print all lines covered by the function. */
+ for (unsigned i = 0; i < lines.size (); i++)
+ {
+ line_info *line = &lines[i];
+ unsigned l = fn->start_line + i;
+
+ /* For lines which don't exist in the .bb file, print '-'
+ before the source line. For lines which exist but
+ were never executed, print '#####' or '=====' before
+ the source line. Otherwise, print the execution count
+ before the source line.
+ There are 16 spaces of indentation added before the source
+ line so that tabs won't be messed up. */
output_line_beginning (gcov_file, line->exists,
- (*it)->exceptional, false,
- (*it)->count, line_num,
- "%%%%%", "$$$$$");
- fprintf (gcov_file, "-block %2d", ix++);
- if (flag_verbose)
- fprintf (gcov_file, " (BB %u)", (*it)->id);
- fprintf (gcov_file, "\n");
+ line->unexceptional,
+ line->has_unexecuted_block,
+ line->count,
+ l, "=====", "#####");
+
+ print_source_line (gcov_file, source_lines, l);
+ output_line_details (gcov_file, line, l);
}
- if (flag_branches)
- for (arc = (*it)->succ; arc; arc = arc->succ_next)
- jx += output_branch_count (gcov_file, jx, arc);
}
- }
- else if (flag_branches)
- {
- int ix;
- ix = 0;
- for (vector<arc_t *>::const_iterator it = line->branches.begin ();
- it != line->branches.end (); it++)
- ix += output_branch_count (gcov_file, ix, (*it));
+ fprintf (gcov_file, FN_SEPARATOR);
+ line_start_group = 0;
}
}
- /* Handle all remaining source lines. There may be lines after the
- last line of code. */
- if (retval)
- {
- for (; (retval = read_line (source_file)); line_num++)
- fprintf (gcov_file, "%9s:%5u:%s\n", "-", line_num, retval);
- }
-
if (source_file)
fclose (source_file);
}
diff --git a/gcc/genmodes.c b/gcc/genmodes.c
index 64074629ea1..9f3799a6739 100644
--- a/gcc/genmodes.c
+++ b/gcc/genmodes.c
@@ -486,9 +486,11 @@ make_complex_modes (enum mode_class cl,
/* For all modes in class CL, construct vector modes of width
WIDTH, having as many components as necessary. */
-#define VECTOR_MODES(C, W) make_vector_modes (MODE_##C, W, __FILE__, __LINE__)
+#define VECTOR_MODES_WITH_PREFIX(PREFIX, C, W) \
+ make_vector_modes (MODE_##C, #PREFIX, W, __FILE__, __LINE__)
+#define VECTOR_MODES(C, W) VECTOR_MODES_WITH_PREFIX (V, C, W)
static void ATTRIBUTE_UNUSED
-make_vector_modes (enum mode_class cl, unsigned int width,
+make_vector_modes (enum mode_class cl, const char *prefix, unsigned int width,
const char *file, unsigned int line)
{
struct mode_data *m;
@@ -519,8 +521,8 @@ make_vector_modes (enum mode_class cl, unsigned int width,
if (cl == MODE_INT && m->precision == 1)
continue;
- if ((size_t)snprintf (buf, sizeof buf, "V%u%s", ncomponents, m->name)
- >= sizeof buf)
+ if ((size_t) snprintf (buf, sizeof buf, "%s%u%s", prefix,
+ ncomponents, m->name) >= sizeof buf)
{
error ("%s:%d: mode name \"%s\" is too long",
m->file, m->line, m->name);
@@ -533,13 +535,14 @@ make_vector_modes (enum mode_class cl, unsigned int width,
}
}
-/* Create a vector of booleans with COUNT elements and BYTESIZE bytes
- in total. */
-#define VECTOR_BOOL_MODE(COUNT, BYTESIZE) \
- make_vector_bool_mode (COUNT, BYTESIZE, __FILE__, __LINE__)
+/* Create a vector of booleans called NAME with COUNT elements and
+ BYTESIZE bytes in total. */
+#define VECTOR_BOOL_MODE(NAME, COUNT, BYTESIZE) \
+ make_vector_bool_mode (#NAME, COUNT, BYTESIZE, __FILE__, __LINE__)
static void ATTRIBUTE_UNUSED
-make_vector_bool_mode (unsigned int count, unsigned int bytesize,
- const char *file, unsigned int line)
+make_vector_bool_mode (const char *name, unsigned int count,
+ unsigned int bytesize, const char *file,
+ unsigned int line)
{
struct mode_data *m = find_mode ("BI");
if (!m)
@@ -548,16 +551,7 @@ make_vector_bool_mode (unsigned int count, unsigned int bytesize,
return;
}
- char buf[8];
- if ((size_t) snprintf (buf, sizeof buf, "V%uBI", count) >= sizeof buf)
- {
- error ("%s:%d: number of vector elements is too high",
- file, line);
- return;
- }
-
- struct mode_data *v = new_mode (MODE_VECTOR_BOOL,
- xstrdup (buf), file, line);
+ struct mode_data *v = new_mode (MODE_VECTOR_BOOL, name, file, line);
v->component = m;
v->ncomponents = count;
v->bytesize = bytesize;
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index ed474edb2fc..46fdf84a54c 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-iterator.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
+#include "tree-object-size.h"
#include "tree-ssa.h"
#include "tree-ssa-propagate.h"
#include "ipa-utils.h"
@@ -59,6 +60,8 @@ along with GCC; see the file COPYING3. If not see
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
+#include "diagnostic-core.h"
+#include "intl.h"
/* Return true when DECL can be referenced from current unit.
FROM_DECL (if non-null) specify constructor of variable DECL was taken from.
@@ -1546,12 +1549,28 @@ static bool
gimple_fold_builtin_strncpy (gimple_stmt_iterator *gsi,
tree dest, tree src, tree len)
{
- location_t loc = gimple_location (gsi_stmt (*gsi));
- tree fn;
+ gimple *stmt = gsi_stmt (*gsi);
+ location_t loc = gimple_location (stmt);
/* If the LEN parameter is zero, return DEST. */
if (integer_zerop (len))
{
+ tree fndecl = gimple_call_fndecl (stmt);
+ gcall *call = as_a <gcall *> (stmt);
+
+ /* Warn about the lack of nul termination: the result is not
+ a (nul-terminated) string. */
+ tree slen = get_maxval_strlen (src, 0);
+ if (slen && !integer_zerop (slen))
+ warning_at (loc, OPT_Wstringop_truncation,
+ "%G%qD destination unchanged after copying no bytes "
+ "from a string of length %E",
+ call, fndecl, slen);
+ else
+ warning_at (loc, OPT_Wstringop_truncation,
+ "%G%qD destination unchanged after copying no bytes",
+ call, fndecl);
+
replace_call_with_value (gsi, dest);
return true;
}
@@ -1566,16 +1585,66 @@ gimple_fold_builtin_strncpy (gimple_stmt_iterator *gsi,
if (!slen || TREE_CODE (slen) != INTEGER_CST)
return false;
- slen = size_binop_loc (loc, PLUS_EXPR, slen, ssize_int (1));
+ /* The size of the source string including the terminating nul. */
+ tree ssize = size_binop_loc (loc, PLUS_EXPR, slen, ssize_int (1));
/* We do not support simplification of this case, though we do
support it when expanding trees into RTL. */
/* FIXME: generate a call to __builtin_memset. */
- if (tree_int_cst_lt (slen, len))
+ if (tree_int_cst_lt (ssize, len))
return false;
+ if (tree_int_cst_lt (len, slen))
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ gcall *call = as_a <gcall *> (stmt);
+
+ warning_at (loc, OPT_Wstringop_truncation,
+ (tree_int_cst_equal (size_one_node, len)
+ ? G_("%G%qD output truncated copying %E byte "
+ "from a string of length %E")
+ : G_("%G%qD output truncated copying %E bytes "
+ "from a string of length %E")),
+ call, fndecl, len, slen);
+ }
+ else if (tree_int_cst_equal (len, slen))
+ {
+ tree decl = dest;
+ if (TREE_CODE (decl) == SSA_NAME)
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (decl);
+ if (is_gimple_assign (def_stmt))
+ {
+ tree_code code = gimple_assign_rhs_code (def_stmt);
+ if (code == ADDR_EXPR || code == VAR_DECL)
+ decl = gimple_assign_rhs1 (def_stmt);
+ }
+ }
+
+ if (TREE_CODE (decl) == ADDR_EXPR)
+ decl = TREE_OPERAND (decl, 0);
+
+ if (TREE_CODE (decl) == COMPONENT_REF)
+ decl = TREE_OPERAND (decl, 1);
+
+ tree fndecl = gimple_call_fndecl (stmt);
+ gcall *call = as_a <gcall *> (stmt);
+
+ if (!DECL_P (decl)
+ || !lookup_attribute ("nonstring", DECL_ATTRIBUTES (decl)))
+ warning_at (loc, OPT_Wstringop_truncation,
+ (tree_int_cst_equal (size_one_node, len)
+ ? G_("%G%qD output truncated before terminating nul "
+ "copying %E byte from a string of the same "
+ "length")
+ : G_("%G%qD output truncated before terminating nul "
+ "copying %E bytes from a string of the same "
+ "length")),
+ call, fndecl, len);
+ }
+
/* OK transform into builtin memcpy. */
- fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
+ tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return false;
@@ -1584,6 +1653,7 @@ gimple_fold_builtin_strncpy (gimple_stmt_iterator *gsi,
NULL_TREE, true, GSI_SAME_STMT);
gimple *repl = gimple_build_call (fn, 3, dest, src, len);
replace_call_with_call_and_fold (gsi, repl);
+
return true;
}
@@ -1880,24 +1950,71 @@ gimple_fold_builtin_strncat (gimple_stmt_iterator *gsi)
return true;
}
- /* If the requested len is greater than or equal to the string
- length, call strcat. */
- if (TREE_CODE (len) == INTEGER_CST && p
- && compare_tree_int (len, strlen (p)) >= 0)
+ if (TREE_CODE (len) != INTEGER_CST || !p)
+ return false;
+
+ unsigned srclen = strlen (p);
+
+ int cmpsrc = compare_tree_int (len, srclen);
+
+ /* Return early if the requested len is less than the string length.
+ Warnings will be issued elsewhere later. */
+ if (cmpsrc < 0)
+ return false;
+
+ unsigned HOST_WIDE_INT dstsize;
+
+ bool nowarn = gimple_no_warning_p (stmt);
+
+ if (!nowarn && compute_builtin_object_size (dst, 1, &dstsize))
{
- tree fn = builtin_decl_implicit (BUILT_IN_STRCAT);
+ int cmpdst = compare_tree_int (len, dstsize);
- /* If the replacement _DECL isn't initialized, don't do the
- transformation. */
- if (!fn)
- return false;
+ if (cmpdst >= 0)
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+
+ /* Strncat copies (at most) LEN bytes and always appends
+ the terminating NUL so the specified bound should never
+ be equal to (or greater than) the size of the destination.
+ If it is, the copy could overflow. */
+ location_t loc = gimple_location (stmt);
+ nowarn = warning_at (loc, OPT_Wstringop_overflow_,
+ cmpdst == 0
+ ? G_("%G%qD specified bound %E equals "
+ "destination size")
+ : G_("%G%qD specified bound %E exceeds "
+ "destination size %wu"),
+ stmt, fndecl, len, dstsize);
+ if (nowarn)
+ gimple_set_no_warning (stmt, true);
+ }
+ }
- gcall *repl = gimple_build_call (fn, 2, dst, src);
- replace_call_with_call_and_fold (gsi, repl);
- return true;
+ if (!nowarn && cmpsrc == 0)
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+
+ /* To avoid certain truncation the specified bound should also
+ not be equal to (or less than) the length of the source. */
+ location_t loc = gimple_location (stmt);
+ if (warning_at (loc, OPT_Wstringop_overflow_,
+ "%G%qD specified bound %E equals source length",
+ stmt, fndecl, len))
+ gimple_set_no_warning (stmt, true);
}
- return false;
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCAT);
+
+ /* If the replacement _DECL isn't initialized, don't do the
+ transformation. */
+ if (!fn)
+ return false;
+
+ /* Otherwise, emit a call to strcat. */
+ gcall *repl = gimple_build_call (fn, 2, dst, src);
+ replace_call_with_call_and_fold (gsi, repl);
+ return true;
}
/* Fold a call to the __strncat_chk builtin with arguments DEST, SRC,
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index 3b74cc540f6..d9d02d305ef 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -83,7 +83,6 @@ static void
update_call_edge_frequencies (gimple_seq_node first, basic_block bb)
{
struct cgraph_node *cfun_node = NULL;
- int bb_freq = 0;
gimple_seq_node n;
for (n = first; n ; n = n->next)
@@ -94,15 +93,11 @@ update_call_edge_frequencies (gimple_seq_node first, basic_block bb)
/* These function calls are expensive enough that we want
to avoid calling them if we never see any calls. */
if (cfun_node == NULL)
- {
- cfun_node = cgraph_node::get (current_function_decl);
- bb_freq = (compute_call_stmt_bb_frequency
- (current_function_decl, bb));
- }
+ cfun_node = cgraph_node::get (current_function_decl);
e = cfun_node->get_edge (n);
if (e != NULL)
- e->frequency = bb_freq;
+ e->count = bb->count;
}
}
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 8ee84a7cc0d..52c4be5f741 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -86,7 +86,7 @@ dump_profile (profile_count &count)
{
char *buf;
if (!count.initialized_p ())
- return NULL;
+ return "";
if (count.ipa_p ())
buf = xasprintf ("[count: %" PRId64 "]",
count.to_gcov_type ());
diff --git a/gcc/gimple-ssa-evrp.c b/gcc/gimple-ssa-evrp.c
new file mode 100644
index 00000000000..13ba31d7cd7
--- /dev/null
+++ b/gcc/gimple-ssa-evrp.c
@@ -0,0 +1,624 @@
+/* Support routines for Value Range Propagation (VRP).
+ Copyright (C) 2005-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "gimple.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "gimple-pretty-print.h"
+#include "cfganal.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-iterator.h"
+#include "tree-cfg.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-ssa-loop.h"
+#include "cfgloop.h"
+#include "tree-scalar-evolution.h"
+#include "tree-ssa-propagate.h"
+#include "alloc-pool.h"
+#include "domwalk.h"
+#include "tree-cfgcleanup.h"
+#include "vr-values.h"
+
+class evrp_folder : public substitute_and_fold_engine
+{
+ public:
+ tree get_value (tree) FINAL OVERRIDE;
+
+ class vr_values *vr_values;
+};
+
+tree
+evrp_folder::get_value (tree op)
+{
+ return vr_values->op_with_constant_singleton_value_range (op);
+}
+
+/* evrp_dom_walker visits the basic blocks in the dominance order and set
+ the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
+ discover more VRs. */
+
+class evrp_dom_walker : public dom_walker
+{
+public:
+ evrp_dom_walker ()
+ : dom_walker (CDI_DOMINATORS), stack (10)
+ {
+ need_eh_cleanup = BITMAP_ALLOC (NULL);
+ }
+ ~evrp_dom_walker ()
+ {
+ BITMAP_FREE (need_eh_cleanup);
+ }
+ virtual edge before_dom_children (basic_block);
+ virtual void after_dom_children (basic_block);
+ void push_value_range (tree var, value_range *vr);
+ value_range *pop_value_range (tree var);
+ value_range *try_find_new_range (tree, tree op, tree_code code, tree limit);
+
+ /* Cond_stack holds the old VR. */
+ auto_vec<std::pair <tree, value_range*> > stack;
+ bitmap need_eh_cleanup;
+ auto_vec<gimple *> stmts_to_fixup;
+ auto_vec<gimple *> stmts_to_remove;
+
+ class vr_values vr_values;
+
+ /* Temporary delegators. */
+ value_range *get_value_range (const_tree op)
+ { return vr_values.get_value_range (op); }
+ bool update_value_range (const_tree op, value_range *vr)
+ { return vr_values.update_value_range (op, vr); }
+ void extract_range_from_phi_node (gphi *phi, value_range *vr)
+ { vr_values.extract_range_from_phi_node (phi, vr); }
+ void extract_range_for_var_from_comparison_expr (tree var,
+ enum tree_code cond_code,
+ tree op, tree limit,
+ value_range *vr_p)
+ { vr_values.extract_range_for_var_from_comparison_expr (var, cond_code,
+ op, limit, vr_p); }
+ void adjust_range_with_scev (value_range *vr, struct loop *loop,
+ gimple *stmt, tree var)
+ { vr_values.adjust_range_with_scev (vr, loop, stmt, var); }
+ tree op_with_constant_singleton_value_range (tree op)
+ { return vr_values.op_with_constant_singleton_value_range (op); }
+ void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
+ tree *output_p, value_range *vr)
+ { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
+ void set_defs_to_varying (gimple *stmt)
+ { return vr_values.set_defs_to_varying (stmt); }
+ void set_vr_value (tree name, value_range *vr)
+ { vr_values.set_vr_value (name, vr); }
+ void simplify_cond_using_ranges_2 (gcond *stmt)
+ { vr_values.simplify_cond_using_ranges_2 (stmt); }
+ void vrp_visit_cond_stmt (gcond *cond, edge *e)
+ { vr_values.vrp_visit_cond_stmt (cond, e); }
+};
+
+/* Find new range for NAME such that (OP CODE LIMIT) is true. */
+
+value_range *
+evrp_dom_walker::try_find_new_range (tree name,
+ tree op, tree_code code, tree limit)
+{
+ value_range vr = VR_INITIALIZER;
+ value_range *old_vr = get_value_range (name);
+
+ /* Discover VR when condition is true. */
+ extract_range_for_var_from_comparison_expr (name, code, op,
+ limit, &vr);
+ /* If we found any usable VR, set the VR to ssa_name and create a
+ PUSH old value in the stack with the old VR. */
+ if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)
+ {
+ if (old_vr->type == vr.type
+ && vrp_operand_equal_p (old_vr->min, vr.min)
+ && vrp_operand_equal_p (old_vr->max, vr.max))
+ return NULL;
+ value_range *new_vr = vr_values.vrp_value_range_pool.allocate ();
+ *new_vr = vr;
+ return new_vr;
+ }
+ return NULL;
+}
+
+/* See if there is any new scope is entered with new VR and set that VR to
+ ssa_name before visiting the statements in the scope. */
+
+edge
+evrp_dom_walker::before_dom_children (basic_block bb)
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Visiting BB%d\n", bb->index);
+
+ stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL));
+
+ edge pred_e = single_pred_edge_ignoring_loop_edges (bb, false);
+ if (pred_e)
+ {
+ gimple *stmt = last_stmt (pred_e->src);
+ tree op0 = NULL_TREE;
+
+ if (stmt
+ && gimple_code (stmt) == GIMPLE_COND
+ && (op0 = gimple_cond_lhs (stmt))
+ && TREE_CODE (op0) == SSA_NAME
+ && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
+ || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Visiting controlling predicate ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+ /* Entering a new scope. Try to see if we can find a VR
+ here. */
+ tree op1 = gimple_cond_rhs (stmt);
+ if (TREE_OVERFLOW_P (op1))
+ op1 = drop_tree_overflow (op1);
+ tree_code code = gimple_cond_code (stmt);
+
+ auto_vec<assert_info, 8> asserts;
+ register_edge_assert_for (op0, pred_e, code, op0, op1, asserts);
+ if (TREE_CODE (op1) == SSA_NAME)
+ register_edge_assert_for (op1, pred_e, code, op0, op1, asserts);
+
+ auto_vec<std::pair<tree, value_range *>, 8> vrs;
+ for (unsigned i = 0; i < asserts.length (); ++i)
+ {
+ value_range *vr = try_find_new_range (asserts[i].name,
+ asserts[i].expr,
+ asserts[i].comp_code,
+ asserts[i].val);
+ if (vr)
+ vrs.safe_push (std::make_pair (asserts[i].name, vr));
+ }
+ /* Push updated ranges only after finding all of them to avoid
+ ordering issues that can lead to worse ranges. */
+ for (unsigned i = 0; i < vrs.length (); ++i)
+ push_value_range (vrs[i].first, vrs[i].second);
+ }
+ }
+
+ /* Visit PHI stmts and discover any new VRs possible. */
+ bool has_unvisited_preds = false;
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (e->flags & EDGE_EXECUTABLE
+ && !(e->src->flags & BB_VISITED))
+ {
+ has_unvisited_preds = true;
+ break;
+ }
+
+ for (gphi_iterator gpi = gsi_start_phis (bb);
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+ tree lhs = PHI_RESULT (phi);
+ if (virtual_operand_p (lhs))
+ continue;
+ value_range vr_result = VR_INITIALIZER;
+ bool interesting = stmt_interesting_for_vrp (phi);
+ if (interesting && dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Visiting PHI node ");
+ print_gimple_stmt (dump_file, phi, 0);
+ }
+ if (!has_unvisited_preds
+ && interesting)
+ extract_range_from_phi_node (phi, &vr_result);
+ else
+ {
+ set_value_range_to_varying (&vr_result);
+ /* When we have an unvisited executable predecessor we can't
+ use PHI arg ranges which may be still UNDEFINED but have
+ to use VARYING for them. But we can still resort to
+ SCEV for loop header PHIs. */
+ struct loop *l;
+ if (interesting
+ && (l = loop_containing_stmt (phi))
+ && l->header == gimple_bb (phi))
+ adjust_range_with_scev (&vr_result, l, phi, lhs);
+ }
+ update_value_range (lhs, &vr_result);
+
+ /* Mark PHIs whose lhs we fully propagate for removal. */
+ tree val = op_with_constant_singleton_value_range (lhs);
+ if (val && may_propagate_copy (lhs, val))
+ {
+ stmts_to_remove.safe_push (phi);
+ continue;
+ }
+
+ /* Set the SSA with the value range. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
+ {
+ if ((vr_result.type == VR_RANGE
+ || vr_result.type == VR_ANTI_RANGE)
+ && (TREE_CODE (vr_result.min) == INTEGER_CST)
+ && (TREE_CODE (vr_result.max) == INTEGER_CST))
+ set_range_info (lhs, vr_result.type,
+ wi::to_wide (vr_result.min),
+ wi::to_wide (vr_result.max));
+ }
+ else if (POINTER_TYPE_P (TREE_TYPE (lhs))
+ && ((vr_result.type == VR_RANGE
+ && range_includes_zero_p (vr_result.min,
+ vr_result.max) == 0)
+ || (vr_result.type == VR_ANTI_RANGE
+ && range_includes_zero_p (vr_result.min,
+ vr_result.max) == 1)))
+ set_ptr_nonnull (lhs);
+ }
+
+ edge taken_edge = NULL;
+
+ /* Visit all other stmts and discover any new VRs possible. */
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ tree output = NULL_TREE;
+ gimple *old_stmt = stmt;
+ bool was_noreturn = (is_gimple_call (stmt)
+ && gimple_call_noreturn_p (stmt));
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Visiting stmt ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ if (gcond *cond = dyn_cast <gcond *> (stmt))
+ {
+ vrp_visit_cond_stmt (cond, &taken_edge);
+ if (taken_edge)
+ {
+ if (taken_edge->flags & EDGE_TRUE_VALUE)
+ gimple_cond_make_true (cond);
+ else if (taken_edge->flags & EDGE_FALSE_VALUE)
+ gimple_cond_make_false (cond);
+ else
+ gcc_unreachable ();
+ update_stmt (stmt);
+ }
+ }
+ else if (stmt_interesting_for_vrp (stmt))
+ {
+ edge taken_edge;
+ value_range vr = VR_INITIALIZER;
+ extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
+ if (output
+ && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE))
+ {
+ update_value_range (output, &vr);
+ vr = *get_value_range (output);
+
+ /* Mark stmts whose output we fully propagate for removal. */
+ tree val;
+ if ((val = op_with_constant_singleton_value_range (output))
+ && may_propagate_copy (output, val)
+ && !stmt_could_throw_p (stmt)
+ && !gimple_has_side_effects (stmt))
+ {
+ stmts_to_remove.safe_push (stmt);
+ continue;
+ }
+
+ /* Set the SSA with the value range. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (output)))
+ {
+ if ((vr.type == VR_RANGE
+ || vr.type == VR_ANTI_RANGE)
+ && (TREE_CODE (vr.min) == INTEGER_CST)
+ && (TREE_CODE (vr.max) == INTEGER_CST))
+ set_range_info (output, vr.type,
+ wi::to_wide (vr.min),
+ wi::to_wide (vr.max));
+ }
+ else if (POINTER_TYPE_P (TREE_TYPE (output))
+ && ((vr.type == VR_RANGE
+ && range_includes_zero_p (vr.min,
+ vr.max) == 0)
+ || (vr.type == VR_ANTI_RANGE
+ && range_includes_zero_p (vr.min,
+ vr.max) == 1)))
+ set_ptr_nonnull (output);
+ }
+ else
+ set_defs_to_varying (stmt);
+ }
+ else
+ set_defs_to_varying (stmt);
+
+ /* See if we can derive a range for any of STMT's operands. */
+ tree op;
+ ssa_op_iter i;
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
+ {
+ tree value;
+ enum tree_code comp_code;
+
+ /* If OP is used in such a way that we can infer a value
+ range for it, and we don't find a previous assertion for
+ it, create a new assertion location node for OP. */
+ if (infer_value_range (stmt, op, &comp_code, &value))
+ {
+ /* If we are able to infer a nonzero value range for OP,
+ then walk backwards through the use-def chain to see if OP
+ was set via a typecast.
+ If so, then we can also infer a nonzero value range
+ for the operand of the NOP_EXPR. */
+ if (comp_code == NE_EXPR && integer_zerop (value))
+ {
+ tree t = op;
+ gimple *def_stmt = SSA_NAME_DEF_STMT (t);
+ while (is_gimple_assign (def_stmt)
+ && CONVERT_EXPR_CODE_P
+ (gimple_assign_rhs_code (def_stmt))
+ && TREE_CODE
+ (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
+ && POINTER_TYPE_P
+ (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
+ {
+ t = gimple_assign_rhs1 (def_stmt);
+ def_stmt = SSA_NAME_DEF_STMT (t);
+
+ /* Add VR when (T COMP_CODE value) condition is
+ true. */
+ value_range *op_range
+ = try_find_new_range (t, t, comp_code, value);
+ if (op_range)
+ push_value_range (t, op_range);
+ }
+ }
+ /* Add VR when (OP COMP_CODE value) condition is true. */
+ value_range *op_range = try_find_new_range (op, op,
+ comp_code, value);
+ if (op_range)
+ push_value_range (op, op_range);
+ }
+ }
+
+ /* Try folding stmts with the VR discovered. */
+ class evrp_folder evrp_folder;
+ evrp_folder.vr_values = &vr_values;
+ bool did_replace = evrp_folder.replace_uses_in (stmt);
+ if (fold_stmt (&gsi, follow_single_use_edges)
+ || did_replace)
+ {
+ stmt = gsi_stmt (gsi);
+ update_stmt (stmt);
+ did_replace = true;
+ }
+
+ if (did_replace)
+ {
+ /* If we cleaned up EH information from the statement,
+ remove EH edges. */
+ if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
+ bitmap_set_bit (need_eh_cleanup, bb->index);
+
+ /* If we turned a not noreturn call into a noreturn one
+ schedule it for fixup. */
+ if (!was_noreturn
+ && is_gimple_call (stmt)
+ && gimple_call_noreturn_p (stmt))
+ stmts_to_fixup.safe_push (stmt);
+
+ if (gimple_assign_single_p (stmt))
+ {
+ tree rhs = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (rhs) == ADDR_EXPR)
+ recompute_tree_invariant_for_addr_expr (rhs);
+ }
+ }
+ }
+
+ /* Visit BB successor PHI nodes and replace PHI args. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ for (gphi_iterator gpi = gsi_start_phis (e->dest);
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+ use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
+ tree arg = USE_FROM_PTR (use_p);
+ if (TREE_CODE (arg) != SSA_NAME
+ || virtual_operand_p (arg))
+ continue;
+ tree val = op_with_constant_singleton_value_range (arg);
+ if (val && may_propagate_copy (arg, val))
+ propagate_value (use_p, val);
+ }
+ }
+
+ bb->flags |= BB_VISITED;
+
+ return taken_edge;
+}
+
+/* Restore/pop VRs valid only for BB when we leave BB. */
+
+void
+evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED)
+{
+ gcc_checking_assert (!stack.is_empty ());
+ while (stack.last ().first != NULL_TREE)
+ pop_value_range (stack.last ().first);
+ stack.pop ();
+}
+
+/* Push the Value Range of VAR to the stack and update it with new VR. */
+
+void
+evrp_dom_walker::push_value_range (tree var, value_range *vr)
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "pushing new range for ");
+ print_generic_expr (dump_file, var);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, vr);
+ fprintf (dump_file, "\n");
+ }
+ stack.safe_push (std::make_pair (var, get_value_range (var)));
+ set_vr_value (var, vr);
+}
+
+/* Pop the Value Range from the vrp_stack and update VAR with it. */
+
+value_range *
+evrp_dom_walker::pop_value_range (tree var)
+{
+ value_range *vr = stack.last ().second;
+ gcc_checking_assert (var == stack.last ().first);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "popping range for ");
+ print_generic_expr (dump_file, var);
+ fprintf (dump_file, ", restoring ");
+ dump_value_range (dump_file, vr);
+ fprintf (dump_file, "\n");
+ }
+ set_vr_value (var, vr);
+ stack.pop ();
+ return vr;
+}
+
+
+/* Main entry point for the early vrp pass which is a simplified non-iterative
+ version of vrp where basic blocks are visited in dominance order. Value
+ ranges discovered in early vrp will also be used by ipa-vrp. */
+
+static unsigned int
+execute_early_vrp ()
+{
+ edge e;
+ edge_iterator ei;
+ basic_block bb;
+
+ loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
+ rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
+ scev_initialize ();
+ calculate_dominance_info (CDI_DOMINATORS);
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ bb->flags &= ~BB_VISITED;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ e->flags |= EDGE_EXECUTABLE;
+ }
+
+ /* Walk stmts in dominance order and propagate VRP. */
+ evrp_dom_walker walker;
+ walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
+ walker.vr_values.dump_all_value_ranges (dump_file);
+ fprintf (dump_file, "\n");
+ }
+
+ /* Remove stmts in reverse order to make debug stmt creation possible. */
+ while (! walker.stmts_to_remove.is_empty ())
+ {
+ gimple *stmt = walker.stmts_to_remove.pop ();
+ if (dump_file && dump_flags & TDF_DETAILS)
+ {
+ fprintf (dump_file, "Removing dead stmt ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ remove_phi_node (&gsi, true);
+ else
+ {
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi, true);
+ release_defs (stmt);
+ }
+ }
+
+ if (!bitmap_empty_p (walker.need_eh_cleanup))
+ gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
+
+ /* Fixup stmts that became noreturn calls. This may require splitting
+ blocks and thus isn't possible during the dominator walk. Do this
+ in reverse order so we don't inadvertedly remove a stmt we want to
+ fixup by visiting a dominating now noreturn call first. */
+ while (!walker.stmts_to_fixup.is_empty ())
+ {
+ gimple *stmt = walker.stmts_to_fixup.pop ();
+ fixup_noreturn_call (stmt);
+ }
+
+ scev_finalize ();
+ loop_optimizer_finalize ();
+ return 0;
+}
+
+namespace {
+
+const pass_data pass_data_early_vrp =
+{
+ GIMPLE_PASS, /* type */
+ "evrp", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_TREE_EARLY_VRP, /* tv_id */
+ PROP_ssa, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
+};
+
+class pass_early_vrp : public gimple_opt_pass
+{
+public:
+ pass_early_vrp (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_early_vrp, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_early_vrp (m_ctxt); }
+ virtual bool gate (function *)
+ {
+ return flag_tree_vrp != 0;
+ }
+ virtual unsigned int execute (function *)
+ { return execute_early_vrp (); }
+
+}; // class pass_vrp
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_early_vrp (gcc::context *ctxt)
+{
+ return new pass_early_vrp (ctxt);
+}
+
diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c
index c5f01d774f5..d31c4b0829b 100644
--- a/gcc/gimple-ssa-store-merging.c
+++ b/gcc/gimple-ssa-store-merging.c
@@ -178,17 +178,18 @@ struct store_operand_info
{
tree val;
tree base_addr;
- unsigned HOST_WIDE_INT bitsize;
- unsigned HOST_WIDE_INT bitpos;
- unsigned HOST_WIDE_INT bitregion_start;
- unsigned HOST_WIDE_INT bitregion_end;
+ poly_uint64 bitsize;
+ poly_uint64 bitpos;
+ poly_uint64 bitregion_start;
+ poly_uint64 bitregion_end;
gimple *stmt;
+ bool bit_not_p;
store_operand_info ();
};
store_operand_info::store_operand_info ()
: val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
- bitregion_start (0), bitregion_end (0), stmt (NULL)
+ bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
{
}
@@ -208,12 +209,17 @@ struct store_immediate_info
/* INTEGER_CST for constant stores, MEM_REF for memory copy or
BIT_*_EXPR for logical bitwise operation. */
enum tree_code rhs_code;
+ /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
+ bool bit_not_p;
+ /* True if ops have been swapped and thus ops[1] represents
+ rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
+ bool ops_swapped_p;
/* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
just the first one. */
store_operand_info ops[2];
store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
- gimple *, unsigned int, enum tree_code,
+ gimple *, unsigned int, enum tree_code, bool,
const store_operand_info &,
const store_operand_info &);
};
@@ -225,10 +231,12 @@ store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
gimple *st,
unsigned int ord,
enum tree_code rhscode,
+ bool bitnotp,
const store_operand_info &op0r,
const store_operand_info &op1r)
: bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
- stmt (st), order (ord), rhs_code (rhscode)
+ stmt (st), order (ord), rhs_code (rhscode), bit_not_p (bitnotp),
+ ops_swapped_p (false)
#if __cplusplus >= 201103L
, ops { op0r, op1r }
{
@@ -253,7 +261,7 @@ struct merged_store_group
/* The size of the allocated memory for val and mask. */
unsigned HOST_WIDE_INT buf_size;
unsigned HOST_WIDE_INT align_base;
- unsigned HOST_WIDE_INT load_align_base[2];
+ poly_uint64 load_align_base[2];
unsigned int align;
unsigned int load_align[2];
@@ -910,8 +918,7 @@ private:
void process_store (gimple *);
bool terminate_and_process_all_chains ();
- bool terminate_all_aliasing_chains (imm_store_chain_info **,
- gimple *);
+ bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
bool terminate_and_release_chain (imm_store_chain_info *);
}; // class pass_store_merging
@@ -930,13 +937,9 @@ pass_store_merging::terminate_and_process_all_chains ()
return ret;
}
-/* Terminate all chains that are affected by the assignment to DEST, appearing
- in statement STMT and ultimately points to the object BASE. Return true if
- at least one aliasing chain was terminated. BASE and DEST are allowed to
- be NULL_TREE. In that case the aliasing checks are performed on the whole
- statement rather than a particular operand in it. VAR_OFFSET_P signifies
- whether STMT represents a store to BASE offset by a variable amount.
- If that is the case we have to terminate any chain anchored at BASE. */
+/* Terminate all chains that are affected by the statement STMT.
+ CHAIN_INFO is the chain we should ignore from the checks if
+ non-NULL. */
bool
pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
@@ -949,54 +952,37 @@ pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
if (!gimple_vuse (stmt))
return false;
- /* Check if the assignment destination (BASE) is part of a store chain.
- This is to catch non-constant stores to destinations that may be part
- of a chain. */
- if (chain_info)
+ tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
+ for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
{
+ next = cur->next;
+
+ /* We already checked all the stores in chain_info and terminated the
+ chain if necessary. Skip it here. */
+ if (chain_info && *chain_info == cur)
+ continue;
+
store_immediate_info *info;
unsigned int i;
- FOR_EACH_VEC_ELT ((*chain_info)->m_store_info, i, info)
+ FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
{
- if (ref_maybe_used_by_stmt_p (stmt, gimple_assign_lhs (info->stmt))
- || stmt_may_clobber_ref_p (stmt, gimple_assign_lhs (info->stmt)))
+ tree lhs = gimple_assign_lhs (info->stmt);
+ if (ref_maybe_used_by_stmt_p (stmt, lhs)
+ || stmt_may_clobber_ref_p (stmt, lhs)
+ || (store_lhs && refs_output_dependent_p (store_lhs, lhs)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "stmt causes chain termination:\n");
print_gimple_stmt (dump_file, stmt, 0);
}
- terminate_and_release_chain (*chain_info);
+ terminate_and_release_chain (cur);
ret = true;
break;
}
}
}
- /* Check for aliasing with all other store chains. */
- for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
- {
- next = cur->next;
-
- /* We already checked all the stores in chain_info and terminated the
- chain if necessary. Skip it here. */
- if (chain_info && (*chain_info) == cur)
- continue;
-
- /* We can't use the base object here as that does not reliably exist.
- Build a ao_ref from the base object address (if we know the
- minimum and maximum offset and the maximum size we could improve
- things here). */
- ao_ref chain_ref;
- ao_ref_init_from_ptr_and_size (&chain_ref, cur->base_addr, NULL_TREE);
- if (ref_maybe_used_by_stmt_p (stmt, &chain_ref)
- || stmt_may_clobber_ref_p_1 (stmt, &chain_ref))
- {
- terminate_and_release_chain (cur);
- ret = true;
- }
- }
-
return ret;
}
@@ -1053,8 +1039,8 @@ compatible_load_p (merged_store_group *merged_store,
{
store_immediate_info *infof = merged_store->stores[0];
if (!info->ops[idx].base_addr
- || (info->ops[idx].bitpos - infof->ops[idx].bitpos
- != info->bitpos - infof->bitpos)
+ || may_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
+ info->bitpos - infof->bitpos)
|| !operand_equal_p (info->ops[idx].base_addr,
infof->ops[idx].base_addr, 0))
return false;
@@ -1084,7 +1070,7 @@ compatible_load_p (merged_store_group *merged_store,
the construction of the immediate chain info guarantees no intervening
stores, so no further checks are needed. Example:
_1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
- if (info->ops[idx].bitpos == info->bitpos
+ if (must_eq (info->ops[idx].bitpos, info->bitpos)
&& operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
return true;
@@ -1202,15 +1188,20 @@ imm_store_chain_info::coalesce_immediate_stores ()
&& infof->ops[1].base_addr
&& info->ops[0].base_addr
&& info->ops[1].base_addr
- && (info->ops[1].bitpos - infof->ops[0].bitpos
- == info->bitpos - infof->bitpos)
+ && must_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
+ info->bitpos - infof->bitpos)
&& operand_equal_p (info->ops[1].base_addr,
infof->ops[0].base_addr, 0))
- std::swap (info->ops[0], info->ops[1]);
- if ((!infof->ops[0].base_addr
- || compatible_load_p (merged_store, info, base_addr, 0))
- && (!infof->ops[1].base_addr
- || compatible_load_p (merged_store, info, base_addr, 1)))
+ {
+ std::swap (info->ops[0], info->ops[1]);
+ info->ops_swapped_p = true;
+ }
+ if ((infof->ops[0].base_addr
+ ? compatible_load_p (merged_store, info, base_addr, 0)
+ : !info->ops[0].base_addr)
+ && (infof->ops[1].base_addr
+ ? compatible_load_p (merged_store, info, base_addr, 1)
+ : !info->ops[1].base_addr))
{
merged_store->merge_into (info);
continue;
@@ -1393,6 +1384,79 @@ find_constituent_stores (struct merged_store_group *group,
return ret;
}
+/* Return how many SSA_NAMEs used to compute value to store in the INFO
+ store have multiple uses. If any SSA_NAME has multiple uses, also
+ count statements needed to compute it. */
+
+static unsigned
+count_multiple_uses (store_immediate_info *info)
+{
+ gimple *stmt = info->stmt;
+ unsigned ret = 0;
+ switch (info->rhs_code)
+ {
+ case INTEGER_CST:
+ return 0;
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (info->bit_not_p)
+ {
+ if (!has_single_use (gimple_assign_rhs1 (stmt)))
+ ret = 1; /* Fall through below to return
+ the BIT_NOT_EXPR stmt and then
+ BIT_{AND,IOR,XOR}_EXPR and anything it
+ uses. */
+ else
+ /* stmt is after this the BIT_NOT_EXPR. */
+ stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+ }
+ if (!has_single_use (gimple_assign_rhs1 (stmt)))
+ {
+ ret += 1 + info->ops[0].bit_not_p;
+ if (info->ops[1].base_addr)
+ ret += 1 + info->ops[1].bit_not_p;
+ return ret + 1;
+ }
+ stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+ /* stmt is now the BIT_*_EXPR. */
+ if (!has_single_use (gimple_assign_rhs1 (stmt)))
+ ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
+ else if (info->ops[info->ops_swapped_p].bit_not_p)
+ {
+ gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+ if (!has_single_use (gimple_assign_rhs1 (stmt2)))
+ ++ret;
+ }
+ if (info->ops[1].base_addr == NULL_TREE)
+ {
+ gcc_checking_assert (!info->ops_swapped_p);
+ return ret;
+ }
+ if (!has_single_use (gimple_assign_rhs2 (stmt)))
+ ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
+ else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
+ {
+ gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
+ if (!has_single_use (gimple_assign_rhs1 (stmt2)))
+ ++ret;
+ }
+ return ret;
+ case MEM_REF:
+ if (!has_single_use (gimple_assign_rhs1 (stmt)))
+ return 1 + info->ops[0].bit_not_p;
+ else if (info->ops[0].bit_not_p)
+ {
+ stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+ if (!has_single_use (gimple_assign_rhs1 (stmt)))
+ return 1;
+ }
+ return 0;
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Split a merged store described by GROUP by populating the SPLIT_STORES
vector (if non-NULL) with split_store structs describing the byte offset
(from the base), the bit size and alignment of each store as well as the
@@ -1408,7 +1472,9 @@ find_constituent_stores (struct merged_store_group *group,
static unsigned int
split_group (merged_store_group *group, bool allow_unaligned_store,
bool allow_unaligned_load,
- vec<struct split_store *> *split_stores)
+ vec<struct split_store *> *split_stores,
+ unsigned *total_orig,
+ unsigned *total_new)
{
unsigned HOST_WIDE_INT pos = group->bitregion_start;
unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
@@ -1416,6 +1482,7 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
unsigned HOST_WIDE_INT group_align = group->align;
unsigned HOST_WIDE_INT align_base = group->align_base;
unsigned HOST_WIDE_INT group_load_align = group_align;
+ bool any_orig = false;
gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
@@ -1423,6 +1490,39 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
unsigned HOST_WIDE_INT try_pos = bytepos;
group->stores.qsort (sort_by_bitpos);
+ if (total_orig)
+ {
+ unsigned int i;
+ store_immediate_info *info = group->stores[0];
+
+ total_new[0] = 0;
+ total_orig[0] = 1; /* The orig store. */
+ info = group->stores[0];
+ if (info->ops[0].base_addr)
+ total_orig[0]++;
+ if (info->ops[1].base_addr)
+ total_orig[0]++;
+ switch (info->rhs_code)
+ {
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
+ break;
+ default:
+ break;
+ }
+ total_orig[0] *= group->stores.length ();
+
+ FOR_EACH_VEC_ELT (group->stores, i, info)
+ {
+ total_new[0] += count_multiple_uses (info);
+ total_orig[0] += (info->bit_not_p
+ + info->ops[0].bit_not_p
+ + info->ops[1].bit_not_p);
+ }
+ }
+
if (!allow_unaligned_load)
for (int i = 0; i < 2; ++i)
if (group->load_align[i])
@@ -1460,11 +1560,12 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
for (int i = 0; i < 2; ++i)
if (group->load_align[i])
{
- align_bitpos = try_bitpos - group->stores[0]->bitpos;
- align_bitpos += group->stores[0]->ops[i].bitpos;
- align_bitpos -= group->load_align_base[i];
- align_bitpos &= (group_load_align - 1);
- if (align_bitpos)
+ align_bitpos
+ = known_alignment (try_bitpos
+ - group->stores[0]->bitpos
+ + group->stores[0]->ops[i].bitpos
+ - group->load_align_base[i]);
+ if (align_bitpos & (group_load_align - 1))
{
unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
load_align = MIN (load_align, a);
@@ -1547,7 +1648,10 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
if (info
&& info->bitpos >= try_bitpos
&& info->bitpos + info->bitsize <= try_bitpos + try_size)
- store->orig = true;
+ {
+ store->orig = true;
+ any_orig = true;
+ }
split_stores->safe_push (store);
}
@@ -1555,9 +1659,121 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
size -= try_size;
}
+ if (total_orig)
+ {
+ unsigned int i;
+ struct split_store *store;
+ /* If we are reusing some original stores and any of the
+ original SSA_NAMEs had multiple uses, we need to subtract
+ those now before we add the new ones. */
+ if (total_new[0] && any_orig)
+ {
+ FOR_EACH_VEC_ELT (*split_stores, i, store)
+ if (store->orig)
+ total_new[0] -= count_multiple_uses (store->orig_stores[0]);
+ }
+ total_new[0] += ret; /* The new store. */
+ store_immediate_info *info = group->stores[0];
+ if (info->ops[0].base_addr)
+ total_new[0] += ret;
+ if (info->ops[1].base_addr)
+ total_new[0] += ret;
+ switch (info->rhs_code)
+ {
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
+ break;
+ default:
+ break;
+ }
+ FOR_EACH_VEC_ELT (*split_stores, i, store)
+ {
+ unsigned int j;
+ bool bit_not_p[3] = { false, false, false };
+ /* If all orig_stores have certain bit_not_p set, then
+ we'd use a BIT_NOT_EXPR stmt and need to account for it.
+ If some orig_stores have certain bit_not_p set, then
+ we'd use a BIT_XOR_EXPR with a mask and need to account for
+ it. */
+ FOR_EACH_VEC_ELT (store->orig_stores, j, info)
+ {
+ if (info->ops[0].bit_not_p)
+ bit_not_p[0] = true;
+ if (info->ops[1].bit_not_p)
+ bit_not_p[1] = true;
+ if (info->bit_not_p)
+ bit_not_p[2] = true;
+ }
+ total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
+ }
+
+ }
+
return ret;
}
+/* Return the operation through which the operand IDX (if < 2) or
+ result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
+ is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
+ the bits should be xored with mask. */
+
+static enum tree_code
+invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
+{
+ unsigned int i;
+ store_immediate_info *info;
+ unsigned int cnt = 0;
+ FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
+ {
+ bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
+ if (bit_not_p)
+ ++cnt;
+ }
+ mask = NULL_TREE;
+ if (cnt == 0)
+ return NOP_EXPR;
+ if (cnt == split_store->orig_stores.length ())
+ return BIT_NOT_EXPR;
+
+ unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
+ unsigned buf_size = split_store->size / BITS_PER_UNIT;
+ unsigned char *buf
+ = XALLOCAVEC (unsigned char, buf_size);
+ memset (buf, ~0U, buf_size);
+ FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
+ {
+ bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
+ if (!bit_not_p)
+ continue;
+ /* Clear regions with bit_not_p and invert afterwards, rather than
+ clear regions with !bit_not_p, so that gaps in between stores aren't
+ set in the mask. */
+ unsigned HOST_WIDE_INT bitsize = info->bitsize;
+ unsigned int pos_in_buffer = 0;
+ if (info->bitpos < try_bitpos)
+ {
+ gcc_assert (info->bitpos + bitsize > try_bitpos);
+ bitsize -= (try_bitpos - info->bitpos);
+ }
+ else
+ pos_in_buffer = info->bitpos - try_bitpos;
+ if (pos_in_buffer + bitsize > split_store->size)
+ bitsize = split_store->size - pos_in_buffer;
+ unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
+ if (BYTES_BIG_ENDIAN)
+ clear_bit_region_be (p, (BITS_PER_UNIT - 1
+ - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
+ else
+ clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
+ }
+ for (unsigned int i = 0; i < buf_size; ++i)
+ buf[i] = ~buf[i];
+ mask = native_interpret_expr (int_type, buf, buf_size);
+ return BIT_XOR_EXPR;
+}
+
/* Given a merged store group GROUP output the widened version of it.
The store chain is against the base object BASE.
Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
@@ -1587,26 +1803,35 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
for unaligned and how many stores we'd emit for aligned stores.
Only use unaligned stores if it allows fewer stores than aligned. */
unsigned aligned_cnt
- = split_group (group, false, allow_unaligned_load, NULL);
+ = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL);
unsigned unaligned_cnt
- = split_group (group, true, allow_unaligned_load, NULL);
+ = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL);
if (aligned_cnt <= unaligned_cnt)
allow_unaligned_store = false;
}
+ unsigned total_orig, total_new;
split_group (group, allow_unaligned_store, allow_unaligned_load,
- &split_stores);
+ &split_stores, &total_orig, &total_new);
if (split_stores.length () >= orig_num_stmts)
{
/* We didn't manage to reduce the number of statements. Bail out. */
if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Exceeded original number of stmts (%u)."
- " Not profitable to emit new sequence.\n",
- orig_num_stmts);
- }
+ fprintf (dump_file, "Exceeded original number of stmts (%u)."
+ " Not profitable to emit new sequence.\n",
+ orig_num_stmts);
return false;
}
+ if (total_orig <= total_new)
+ {
+ /* If number of estimated new statements is above estimated original
+ statements, bail out too. */
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Estimated number of original stmts (%u)"
+ " not larger than estimated number of new"
+ " stmts (%u).\n",
+ total_orig, total_new);
+ }
gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
gimple_seq seq = NULL;
@@ -1642,10 +1867,14 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
else if (operand_equal_p (base_addr, op.base_addr, 0))
load_addr[j] = addr;
else
- load_addr[j]
- = force_gimple_operand_1 (unshare_expr (op.base_addr),
- &seq, is_gimple_mem_ref_addr,
- NULL_TREE);
+ {
+ gimple_seq this_seq;
+ load_addr[j]
+ = force_gimple_operand_1 (unshare_expr (op.base_addr),
+ &this_seq, is_gimple_mem_ref_addr,
+ NULL_TREE);
+ gimple_seq_add_seq_without_update (&seq, this_seq);
+ }
}
FOR_EACH_VEC_ELT (split_stores, i, split_store)
@@ -1709,10 +1938,10 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
unsigned HOST_WIDE_INT load_align = group->load_align[j];
unsigned HOST_WIDE_INT align_bitpos
- = (try_pos * BITS_PER_UNIT
- - split_store->orig_stores[0]->bitpos
- + op.bitpos) & (load_align - 1);
- if (align_bitpos)
+ = known_alignment (try_pos * BITS_PER_UNIT
+ - split_store->orig_stores[0]->bitpos
+ + op.bitpos);
+ if (align_bitpos & (load_align - 1))
load_align = least_bit_hwi (align_bitpos);
tree load_int_type
@@ -1720,10 +1949,11 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
load_int_type
= build_aligned_type (load_int_type, load_align);
- unsigned HOST_WIDE_INT load_pos
- = (try_pos * BITS_PER_UNIT
- - split_store->orig_stores[0]->bitpos
- + op.bitpos) / BITS_PER_UNIT;
+ poly_uint64 load_pos
+ = exact_div (try_pos * BITS_PER_UNIT
+ - split_store->orig_stores[0]->bitpos
+ + op.bitpos,
+ BITS_PER_UNIT);
ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
build_int_cst (offset_type, load_pos));
if (TREE_CODE (ops[j]) == MEM_REF)
@@ -1751,6 +1981,22 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
gimple_seq_add_stmt_without_update (&seq, stmt);
}
ops[j] = gimple_assign_lhs (stmt);
+ tree xor_mask;
+ enum tree_code inv_op
+ = invert_op (split_store, j, int_type, xor_mask);
+ if (inv_op != NOP_EXPR)
+ {
+ stmt = gimple_build_assign (make_ssa_name (int_type),
+ inv_op, ops[j], xor_mask);
+ gimple_set_location (stmt, load_loc);
+ ops[j] = gimple_assign_lhs (stmt);
+
+ if (gsi_bb (load_gsi[j]))
+ gimple_seq_add_stmt_without_update (&load_seq[j],
+ stmt);
+ else
+ gimple_seq_add_stmt_without_update (&seq, stmt);
+ }
}
else
ops[j] = native_interpret_expr (int_type,
@@ -1791,6 +2037,20 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
else
gimple_seq_add_stmt_without_update (&seq, stmt);
src = gimple_assign_lhs (stmt);
+ tree xor_mask;
+ enum tree_code inv_op;
+ inv_op = invert_op (split_store, 2, int_type, xor_mask);
+ if (inv_op != NOP_EXPR)
+ {
+ stmt = gimple_build_assign (make_ssa_name (int_type),
+ inv_op, src, xor_mask);
+ gimple_set_location (stmt, bit_loc);
+ if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
+ gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
+ else
+ gimple_seq_add_stmt_without_update (&seq, stmt);
+ src = gimple_assign_lhs (stmt);
+ }
break;
default:
src = ops[0];
@@ -1982,33 +2242,28 @@ rhs_valid_for_store_merging_p (tree rhs)
case. */
static tree
-mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize,
- unsigned HOST_WIDE_INT *pbitpos,
- unsigned HOST_WIDE_INT *pbitregion_start,
- unsigned HOST_WIDE_INT *pbitregion_end)
+mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
+ poly_uint64 *pbitpos,
+ poly_uint64 *pbitregion_start,
+ poly_uint64 *pbitregion_end)
{
- poly_int64 var_bitsize, var_bitpos;
- poly_uint64 var_bitregion_start = 0, var_bitregion_end = 0;
+ poly_int64 bitsize, bitpos;
+ poly_uint64 bitregion_start = 0, bitregion_end = 0;
machine_mode mode;
int unsignedp = 0, reversep = 0, volatilep = 0;
tree offset;
- tree base_addr = get_inner_reference (mem, &var_bitsize, &var_bitpos,
- &offset, &mode, &unsignedp, &reversep,
- &volatilep);
- if (must_eq (var_bitsize, 0))
- {
- *pbitsize = 0;
- return NULL_TREE;
- }
+ tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
+ &unsignedp, &reversep, &volatilep);
+ *pbitsize = bitsize;
+ if (must_eq (bitsize, 0))
+ return NULL_TREE;
- *pbitsize = -1;
if (TREE_CODE (mem) == COMPONENT_REF
&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
{
- get_bit_range (&var_bitregion_start, &var_bitregion_end, mem,
- &var_bitpos, &offset);
- if (may_ne (var_bitregion_end, 0U))
- var_bitregion_end += 1;
+ get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
+ if (may_ne (bitregion_end, 0U))
+ bitregion_end += 1;
}
if (reversep)
@@ -2026,22 +2281,22 @@ mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize,
{
poly_offset_int byte_off = mem_ref_offset (base_addr);
poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
- bit_off += var_bitpos;
- if (bit_off.to_shwi (&var_bitpos))
+ bit_off += bitpos;
+ if (must_ge (bit_off, 0) && bit_off.to_shwi (&bitpos))
{
- if (may_ne (var_bitregion_end, 0U))
+ if (may_ne (bitregion_end, 0U))
{
bit_off = byte_off << LOG2_BITS_PER_UNIT;
- bit_off += var_bitregion_start;
- if (bit_off.to_uhwi (&var_bitregion_start))
+ bit_off += bitregion_start;
+ if (bit_off.to_uhwi (&bitregion_start))
{
bit_off = byte_off << LOG2_BITS_PER_UNIT;
- bit_off += var_bitregion_end;
- if (!bit_off.to_uhwi (&var_bitregion_end))
- var_bitregion_end = 0;
+ bit_off += bitregion_end;
+ if (!bit_off.to_uhwi (&bitregion_end))
+ bitregion_end = 0;
}
else
- var_bitregion_end = 0;
+ bitregion_end = 0;
}
}
else
@@ -2052,25 +2307,15 @@ mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize,
address now. */
else
{
- if (may_lt (var_bitpos, 0))
+ if (may_lt (bitpos, 0))
return NULL_TREE;
base_addr = build_fold_addr_expr (base_addr);
}
- HOST_WIDE_INT bitsize, bitpos;
- if (!var_bitsize.is_constant (&bitsize)
- || !var_bitpos.is_constant (&bitpos))
- return NULL_TREE;
-
- unsigned HOST_WIDE_INT bitregion_start, bitregion_end;
- if (!var_bitregion_start.is_constant (&bitregion_start)
- || !var_bitregion_end.is_constant (&bitregion_end))
- return NULL_TREE;
-
- if (!bitregion_end)
+ if (must_eq (bitregion_end, 0U))
{
- bitregion_start = ROUND_DOWN (bitpos, BITS_PER_UNIT);
- bitregion_end = ROUND_UP (bitpos + bitsize, BITS_PER_UNIT);
+ bitregion_start = round_down_to_byte_boundary (bitpos);
+ bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
}
if (offset != NULL_TREE)
@@ -2102,13 +2347,30 @@ mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize,
static bool
handled_load (gimple *stmt, store_operand_info *op,
- unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end)
+ poly_uint64 bitsize, poly_uint64 bitpos,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end)
{
- if (!is_gimple_assign (stmt) || !gimple_vuse (stmt))
+ if (!is_gimple_assign (stmt))
return false;
- if (gimple_assign_load_p (stmt)
+ if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
+ {
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
+ bitregion_start, bitregion_end))
+ {
+ /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
+ been optimized earlier, but if allowed here, would confuse the
+ multiple uses counting. */
+ if (op->bit_not_p)
+ return false;
+ op->bit_not_p = !op->bit_not_p;
+ return true;
+ }
+ return false;
+ }
+ if (gimple_vuse (stmt)
+ && gimple_assign_load_p (stmt)
&& !stmt_can_throw_internal (stmt)
&& !gimple_has_volatile_ops (stmt))
{
@@ -2118,13 +2380,16 @@ handled_load (gimple *stmt, store_operand_info *op,
&op->bitregion_start,
&op->bitregion_end);
if (op->base_addr != NULL_TREE
- && op->bitsize == bitsize
- && ((op->bitpos - bitpos) % BITS_PER_UNIT) == 0
- && op->bitpos - op->bitregion_start >= bitpos - bitregion_start
- && op->bitregion_end - op->bitpos >= bitregion_end - bitpos)
+ && must_eq (op->bitsize, bitsize)
+ && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
+ && must_ge (op->bitpos - op->bitregion_start,
+ bitpos - bitregion_start)
+ && must_ge (op->bitregion_end - op->bitpos,
+ bitregion_end - bitpos))
{
op->stmt = stmt;
op->val = mem;
+ op->bit_not_p = false;
return true;
}
}
@@ -2139,19 +2404,19 @@ pass_store_merging::process_store (gimple *stmt)
{
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
- unsigned HOST_WIDE_INT bitsize, bitpos;
- unsigned HOST_WIDE_INT bitregion_start;
- unsigned HOST_WIDE_INT bitregion_end;
+ poly_uint64 bitsize, bitpos;
+ poly_uint64 bitregion_start, bitregion_end;
tree base_addr
= mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
&bitregion_start, &bitregion_end);
- if (bitsize == 0)
+ if (must_eq (bitsize, 0U))
return;
bool invalid = (base_addr == NULL_TREE
- || ((bitsize > MAX_BITSIZE_MODE_ANY_INT)
- && (TREE_CODE (rhs) != INTEGER_CST)));
+ || (may_gt (bitsize, (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
+ && (TREE_CODE (rhs) != INTEGER_CST)));
enum tree_code rhs_code = ERROR_MARK;
+ bool bit_not_p = false;
store_operand_info ops[2];
if (invalid)
;
@@ -2160,7 +2425,7 @@ pass_store_merging::process_store (gimple *stmt)
rhs_code = INTEGER_CST;
ops[0].val = rhs;
}
- else if (TREE_CODE (rhs) != SSA_NAME || !has_single_use (rhs))
+ else if (TREE_CODE (rhs) != SSA_NAME)
invalid = true;
else
{
@@ -2170,7 +2435,17 @@ pass_store_merging::process_store (gimple *stmt)
else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
bitregion_start, bitregion_end))
rhs_code = MEM_REF;
- else
+ else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
+ {
+ tree rhs1 = gimple_assign_rhs1 (def_stmt);
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
+ {
+ bit_not_p = true;
+ def_stmt = SSA_NAME_DEF_STMT (rhs1);
+ }
+ }
+ if (rhs_code == ERROR_MARK && !invalid)
switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
{
case BIT_AND_EXPR:
@@ -2180,7 +2455,7 @@ pass_store_merging::process_store (gimple *stmt)
rhs1 = gimple_assign_rhs1 (def_stmt);
rhs2 = gimple_assign_rhs2 (def_stmt);
invalid = true;
- if (TREE_CODE (rhs1) != SSA_NAME || !has_single_use (rhs1))
+ if (TREE_CODE (rhs1) != SSA_NAME)
break;
def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
if (!is_gimple_assign (def_stmt1)
@@ -2189,7 +2464,7 @@ pass_store_merging::process_store (gimple *stmt)
break;
if (rhs_valid_for_store_merging_p (rhs2))
ops[1].val = rhs2;
- else if (TREE_CODE (rhs2) != SSA_NAME || !has_single_use (rhs2))
+ else if (TREE_CODE (rhs2) != SSA_NAME)
break;
else
{
@@ -2208,22 +2483,30 @@ pass_store_merging::process_store (gimple *stmt)
}
}
- struct imm_store_chain_info **chain_info = NULL;
- if (base_addr)
- chain_info = m_stores.get (base_addr);
-
- if (invalid)
+ unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
+ unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
+ if (invalid
+ || !bitsize.is_constant (&const_bitsize)
+ || !bitpos.is_constant (&const_bitpos)
+ || !bitregion_start.is_constant (&const_bitregion_start)
+ || !bitregion_end.is_constant (&const_bitregion_end))
{
- terminate_all_aliasing_chains (chain_info, stmt);
+ terminate_all_aliasing_chains (NULL, stmt);
return;
}
+ struct imm_store_chain_info **chain_info = NULL;
+ if (base_addr)
+ chain_info = m_stores.get (base_addr);
+
store_immediate_info *info;
if (chain_info)
{
unsigned int ord = (*chain_info)->m_store_info.length ();
- info = new store_immediate_info (bitsize, bitpos, bitregion_start,
- bitregion_end, stmt, ord, rhs_code,
+ info = new store_immediate_info (const_bitsize, const_bitpos,
+ const_bitregion_start,
+ const_bitregion_end,
+ stmt, ord, rhs_code, bit_not_p,
ops[0], ops[1]);
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -2231,6 +2514,7 @@ pass_store_merging::process_store (gimple *stmt)
print_gimple_stmt (dump_file, stmt, 0);
}
(*chain_info)->m_store_info.safe_push (info);
+ terminate_all_aliasing_chains (chain_info, stmt);
/* If we reach the limit of stores to merge in a chain terminate and
process the chain now. */
if ((*chain_info)->m_store_info.length ()
@@ -2245,12 +2529,13 @@ pass_store_merging::process_store (gimple *stmt)
}
/* Store aliases any existing chain? */
- terminate_all_aliasing_chains (chain_info, stmt);
+ terminate_all_aliasing_chains (NULL, stmt);
/* Start a new chain. */
struct imm_store_chain_info *new_chain
= new imm_store_chain_info (m_stores_head, base_addr);
- info = new store_immediate_info (bitsize, bitpos, bitregion_start,
- bitregion_end, stmt, 0, rhs_code,
+ info = new store_immediate_info (const_bitsize, const_bitpos,
+ const_bitregion_start, const_bitregion_end,
+ stmt, 0, rhs_code, bit_not_p,
ops[0], ops[1]);
new_chain->m_store_info.safe_push (info);
m_stores.put (base_addr, new_chain);
diff --git a/gcc/gimple-streamer-in.c b/gcc/gimple-streamer-in.c
index 0dabe1adcf6..56f748a23ae 100644
--- a/gcc/gimple-streamer-in.c
+++ b/gcc/gimple-streamer-in.c
@@ -264,8 +264,11 @@ input_bb (struct lto_input_block *ib, enum LTO_tags tag,
index = streamer_read_uhwi (ib);
bb = BASIC_BLOCK_FOR_FN (fn, index);
- bb->count = profile_count::stream_in (ib).apply_scale
- (count_materialization_scale, REG_BR_PROB_BASE);
+ bb->count = profile_count::stream_in (ib);
+ if (count_materialization_scale != REG_BR_PROB_BASE
+ && bb->count.ipa ().nonzero_p ())
+ bb->count
+ = bb->count.apply_scale (count_materialization_scale, REG_BR_PROB_BASE);
bb->flags = streamer_read_hwi (ib);
/* LTO_bb1 has statements. LTO_bb0 does not. */
diff --git a/gcc/gimple.c b/gcc/gimple.c
index af49405929a..58499bae7f7 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -361,6 +361,7 @@ gimple_build_call_from_tree (tree t, tree fnptrtype)
gimple_call_set_arg (call, i, CALL_EXPR_ARG (t, i));
gimple_set_block (call, TREE_BLOCK (t));
+ gimple_set_location (call, EXPR_LOCATION (t));
/* Carry all the CALL_EXPR flags to the new GIMPLE_CALL. */
gimple_call_set_chain (call, CALL_EXPR_STATIC_CHAIN (t));
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 334def89398..eef5a75fe76 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -6319,11 +6319,18 @@ gimple_expr_type (const gimple *stmt)
if (code == GIMPLE_CALL)
{
const gcall *call_stmt = as_a <const gcall *> (stmt);
- if (gimple_call_internal_p (call_stmt)
- && gimple_call_internal_fn (call_stmt) == IFN_MASK_STORE)
- return TREE_TYPE (gimple_call_arg (call_stmt, 3));
- else
- return gimple_call_return_type (call_stmt);
+ if (gimple_call_internal_p (call_stmt))
+ switch (gimple_call_internal_fn (call_stmt))
+ {
+ case IFN_MASK_STORE:
+ case IFN_SCATTER_STORE:
+ return TREE_TYPE (gimple_call_arg (call_stmt, 3));
+ case IFN_MASK_SCATTER_STORE:
+ return TREE_TYPE (gimple_call_arg (call_stmt, 4));
+ default:
+ break;
+ }
+ return gimple_call_return_type (call_stmt);
}
else if (code == GIMPLE_ASSIGN)
{
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 540d128a70d..f312fb5e261 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -2234,8 +2234,10 @@ expand_FALLTHROUGH_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
break;
}
}
+ else if (gimple_call_internal_p (stmt, IFN_ASAN_MARK))
+ ;
else
- /* Something other than a label. That's not expected. */
+ /* Something other is not expected. */
break;
gsi_next (&gsi2);
}
diff --git a/gcc/ginclude/tgmath.h b/gcc/ginclude/tgmath.h
index be3f5be9df1..97968ad2302 100644
--- a/gcc/ginclude/tgmath.h
+++ b/gcc/ginclude/tgmath.h
@@ -38,68 +38,24 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
__TGMATH_CPLX*, __TGMATH_REAL*, and __TGMATH_CPLX_ONLY. _CPLX
means the generic argument(s) may be real or complex, _REAL means
real only, _CPLX means complex only. If there is no suffix, we are
- defining a function of one generic argument. If the suffix is _n
- it is a function of n generic arguments. If the suffix is _m_n it
- is a function of n arguments, the first m of which are generic. We
- only define these macros for values of n and/or m that are needed. */
-
-/* The general rules for generic macros are given in 7.22 paragraphs 1 and 2.
- If any generic parameter is complex, we use a complex version. Otherwise
- we use a real version. If the real part of any generic parameter is long
- double, we use the long double version. Otherwise if the real part of any
- generic parameter is double or of integer type, we use the double version.
- Otherwise we use the float version. */
-
-#define __tg_cplx(expr) \
- __builtin_classify_type(expr) == 9
-
-#define __tg_ldbl(expr) \
- __builtin_types_compatible_p(__typeof__(expr), long double)
-
-#define __tg_dbl(expr) \
- (__builtin_types_compatible_p(__typeof__(expr), double) \
- || __builtin_classify_type(expr) == 1)
-
-#define __tg_choose(x,f,d,l) \
- __builtin_choose_expr(__tg_ldbl(x), l, \
- __builtin_choose_expr(__tg_dbl(x), d, \
- f))
-
-#define __tg_choose_2(x,y,f,d,l) \
- __builtin_choose_expr(__tg_ldbl(x) || __tg_ldbl(y), l, \
- __builtin_choose_expr(__tg_dbl(x) || __tg_dbl(y), d, \
- f))
-
-#define __tg_choose_3(x,y,z,f,d,l) \
- __builtin_choose_expr(__tg_ldbl(x) || __tg_ldbl(y) || __tg_ldbl(z), l, \
- __builtin_choose_expr(__tg_dbl(x) || __tg_dbl(y) \
- || __tg_dbl(z), d, \
- f))
-
-#define __TGMATH_CPLX(z,R,C) \
- __builtin_choose_expr (__tg_cplx(z), \
- __tg_choose (__real__(z), C##f(z), (C)(z), C##l(z)), \
- __tg_choose (z, R##f(z), (R)(z), R##l(z)))
-
-#define __TGMATH_CPLX_2(z1,z2,R,C) \
- __builtin_choose_expr (__tg_cplx(z1) || __tg_cplx(z2), \
- __tg_choose_2 (__real__(z1), __real__(z2), \
- C##f(z1,z2), (C)(z1,z2), C##l(z1,z2)), \
- __tg_choose_2 (z1, z2, \
- R##f(z1,z2), (R)(z1,z2), R##l(z1,z2)))
+ defining a function of one argument. If the suffix is _n
+ it is a function of n arguments. We only define these macros for
+ values of n that are needed. */
+
+#define __TGMATH_CPLX(z,R,C) \
+ __builtin_tgmath (R##f, R, R##l, C##f, C, C##l, (z))
+
+#define __TGMATH_CPLX_2(z1,z2,R,C) \
+ __builtin_tgmath (R##f, R, R##l, C##f, C, C##l, (z1), (z2))
#define __TGMATH_REAL(x,R) \
- __tg_choose (x, R##f(x), (R)(x), R##l(x))
+ __builtin_tgmath (R##f, R, R##l, (x))
#define __TGMATH_REAL_2(x,y,R) \
- __tg_choose_2 (x, y, R##f(x,y), (R)(x,y), R##l(x,y))
+ __builtin_tgmath (R##f, R, R##l, (x), (y))
#define __TGMATH_REAL_3(x,y,z,R) \
- __tg_choose_3 (x, y, z, R##f(x,y,z), (R)(x,y,z), R##l(x,y,z))
-#define __TGMATH_REAL_1_2(x,y,R) \
- __tg_choose (x, R##f(x,y), (R)(x,y), R##l(x,y))
-#define __TGMATH_REAL_2_3(x,y,z,R) \
- __tg_choose_2 (x, y, R##f(x,y,z), (R)(x,y,z), R##l(x,y,z))
+ __builtin_tgmath (R##f, R, R##l, (x), (y), (z))
#define __TGMATH_CPLX_ONLY(z,C) \
- __tg_choose (__real__(z), C##f(z), (C)(z), C##l(z))
+ __builtin_tgmath (C##f, C, C##l, (z))
/* Functions defined in both <math.h> and <complex.h> (7.22p4) */
#define acos(z) __TGMATH_CPLX(z, acos, cacos)
@@ -135,10 +91,10 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define fmax(x,y) __TGMATH_REAL_2(x, y, fmax)
#define fmin(x,y) __TGMATH_REAL_2(x, y, fmin)
#define fmod(x,y) __TGMATH_REAL_2(x, y, fmod)
-#define frexp(x,y) __TGMATH_REAL_1_2(x, y, frexp)
+#define frexp(x,y) __TGMATH_REAL_2(x, y, frexp)
#define hypot(x,y) __TGMATH_REAL_2(x, y, hypot)
#define ilogb(x) __TGMATH_REAL(x, ilogb)
-#define ldexp(x,y) __TGMATH_REAL_1_2(x, y, ldexp)
+#define ldexp(x,y) __TGMATH_REAL_2(x, y, ldexp)
#define lgamma(x) __TGMATH_REAL(x, lgamma)
#define llrint(x) __TGMATH_REAL(x, llrint)
#define llround(x) __TGMATH_REAL(x, llround)
@@ -150,13 +106,13 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define lround(x) __TGMATH_REAL(x, lround)
#define nearbyint(x) __TGMATH_REAL(x, nearbyint)
#define nextafter(x,y) __TGMATH_REAL_2(x, y, nextafter)
-#define nexttoward(x,y) __TGMATH_REAL_1_2(x, y, nexttoward)
+#define nexttoward(x,y) __TGMATH_REAL_2(x, y, nexttoward)
#define remainder(x,y) __TGMATH_REAL_2(x, y, remainder)
-#define remquo(x,y,z) __TGMATH_REAL_2_3(x, y, z, remquo)
+#define remquo(x,y,z) __TGMATH_REAL_3(x, y, z, remquo)
#define rint(x) __TGMATH_REAL(x, rint)
#define round(x) __TGMATH_REAL(x, round)
-#define scalbn(x,y) __TGMATH_REAL_1_2(x, y, scalbn)
-#define scalbln(x,y) __TGMATH_REAL_1_2(x, y, scalbln)
+#define scalbn(x,y) __TGMATH_REAL_2(x, y, scalbn)
+#define scalbln(x,y) __TGMATH_REAL_2(x, y, scalbln)
#define tgamma(x) __TGMATH_REAL(x, tgamma)
#define trunc(x) __TGMATH_REAL(x, trunc)
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index 1c0ef932914..947f49cd48b 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,7 @@
+2017-11-14 Than McIntosh <thanm@google.com>
+
+ * go-gcc.cc (var_expression): Remove Varexpr_context parameter.
+
2017-10-11 Tony Reix <tony.reix@atos.net>
* go-system.h (__STDC_FORMAT_MACROS): Define before including any
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index 04912f0ed01..a50abdd856b 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -276,7 +276,7 @@ class Gcc_backend : public Backend
{ return this->make_expression(null_pointer_node); }
Bexpression*
- var_expression(Bvariable* var, Varexpr_context, Location);
+ var_expression(Bvariable* var, Location);
Bexpression*
indirect_expression(Btype*, Bexpression* expr, bool known_valid, Location);
@@ -1256,7 +1256,7 @@ Gcc_backend::zero_expression(Btype* btype)
// An expression that references a variable.
Bexpression*
-Gcc_backend::var_expression(Bvariable* var, Varexpr_context, Location location)
+Gcc_backend::var_expression(Bvariable* var, Location location)
{
tree ret = var->get_tree(location);
if (ret == error_mark_node)
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index 0fa2cccebfe..4832c782946 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-64d570c590a76921cbdca4efb22e4675e19cc809
+cb5dc1ce98857884a2215c461dd1d7de530f9f5e
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
diff --git a/gcc/go/gofrontend/backend.h b/gcc/go/gofrontend/backend.h
index 9951a72e45c..48dbe7eb0da 100644
--- a/gcc/go/gofrontend/backend.h
+++ b/gcc/go/gofrontend/backend.h
@@ -254,7 +254,7 @@ class Backend
// Create a reference to a variable.
virtual Bexpression*
- var_expression(Bvariable* var, Varexpr_context in_lvalue_pos, Location) = 0;
+ var_expression(Bvariable* var, Location) = 0;
// Create an expression that indirects through the pointer expression EXPR
// (i.e., return the expression for *EXPR). KNOWN_VALID is true if the pointer
diff --git a/gcc/go/gofrontend/escape.cc b/gcc/go/gofrontend/escape.cc
index dea21188c92..bae8c924b85 100644
--- a/gcc/go/gofrontend/escape.cc
+++ b/gcc/go/gofrontend/escape.cc
@@ -692,6 +692,12 @@ Gogo::analyze_escape()
if (!optimize_allocation_flag.is_enabled() || saw_errors())
return;
+ // Currently runtime is hard-coded to non-escape in various places.
+ // Don't run escape analysis for runtime.
+ // TODO: remove this once it works for runtime.
+ if (this->compiling_runtime() && this->package_name() == "runtime")
+ return;
+
// Discover strongly connected groups of functions to analyze for escape
// information in this package.
this->discover_analysis_sets();
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index dad22ebd2c9..7f816110d62 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -771,7 +771,7 @@ Var_expression::do_get_backend(Translate_context* context)
go_unreachable();
Bexpression* ret =
- context->backend()->var_expression(bvar, this->in_lvalue_pos_, loc);
+ context->backend()->var_expression(bvar, loc);
if (is_in_heap)
ret = context->backend()->indirect_expression(btype, ret, true, loc);
return ret;
@@ -898,10 +898,7 @@ Temporary_reference_expression::do_get_backend(Translate_context* context)
{
Gogo* gogo = context->gogo();
Bvariable* bvar = this->statement_->get_backend_variable(context);
- Varexpr_context ve_ctxt = (this->is_lvalue_ ? VE_lvalue : VE_rvalue);
-
- Bexpression* ret = gogo->backend()->var_expression(bvar, ve_ctxt,
- this->location());
+ Bexpression* ret = gogo->backend()->var_expression(bvar, this->location());
// The backend can't always represent the same set of recursive types
// that the Go frontend can. In some cases this means that a
@@ -972,7 +969,7 @@ Set_and_use_temporary_expression::do_get_backend(Translate_context* context)
Location loc = this->location();
Gogo* gogo = context->gogo();
Bvariable* bvar = this->statement_->get_backend_variable(context);
- Bexpression* lvar_ref = gogo->backend()->var_expression(bvar, VE_lvalue, loc);
+ Bexpression* lvar_ref = gogo->backend()->var_expression(bvar, loc);
Named_object* fn = context->function();
go_assert(fn != NULL);
@@ -980,7 +977,7 @@ Set_and_use_temporary_expression::do_get_backend(Translate_context* context)
Bexpression* bexpr = this->expr_->get_backend(context);
Bstatement* set = gogo->backend()->assignment_statement(bfn, lvar_ref,
bexpr, loc);
- Bexpression* var_ref = gogo->backend()->var_expression(bvar, VE_rvalue, loc);
+ Bexpression* var_ref = gogo->backend()->var_expression(bvar, loc);
Bexpression* ret = gogo->backend()->compound_expression(set, var_ref, loc);
return ret;
}
@@ -1084,11 +1081,11 @@ Sink_expression::do_get_backend(Translate_context* context)
gogo->backend()->temporary_variable(fn_ctx, context->bblock(), bt, NULL,
false, loc, &decl);
Bexpression* var_ref =
- gogo->backend()->var_expression(this->bvar_, VE_lvalue, loc);
+ gogo->backend()->var_expression(this->bvar_, loc);
var_ref = gogo->backend()->compound_expression(decl, var_ref, loc);
return var_ref;
}
- return gogo->backend()->var_expression(this->bvar_, VE_lvalue, loc);
+ return gogo->backend()->var_expression(this->bvar_, loc);
}
// Ast dump for sink expression.
@@ -1302,7 +1299,7 @@ Func_descriptor_expression::do_get_backend(Translate_context* context)
Named_object* no = this->fn_;
Location loc = no->location();
if (this->dvar_ != NULL)
- return context->backend()->var_expression(this->dvar_, VE_rvalue, loc);
+ return context->backend()->var_expression(this->dvar_, loc);
Gogo* gogo = context->gogo();
std::string var_name(gogo->function_descriptor_name(no));
@@ -1340,7 +1337,7 @@ Func_descriptor_expression::do_get_backend(Translate_context* context)
}
this->dvar_ = bvar;
- return gogo->backend()->var_expression(bvar, VE_rvalue, loc);
+ return gogo->backend()->var_expression(bvar, loc);
}
// Print a function descriptor expression.
@@ -4286,7 +4283,7 @@ Unary_expression::do_get_backend(Translate_context* context)
Temporary_statement* temp = sut->temporary();
Bvariable* bvar = temp->get_backend_variable(context);
Bexpression* bvar_expr =
- gogo->backend()->var_expression(bvar, VE_lvalue, loc);
+ gogo->backend()->var_expression(bvar, loc);
Bexpression* bval = sut->expression()->get_backend(context);
Named_object* fn = context->function();
@@ -4373,7 +4370,7 @@ Unary_expression::do_get_backend(Translate_context* context)
gogo->backend()->implicit_variable_set_init(implicit, var_name, btype,
true, copy_to_heap, false,
bexpr);
- bexpr = gogo->backend()->var_expression(implicit, VE_rvalue, loc);
+ bexpr = gogo->backend()->var_expression(implicit, loc);
// If we are not copying a slice initializer to the heap,
// then it can be changed by the program, so if it can
@@ -4383,7 +4380,7 @@ Unary_expression::do_get_backend(Translate_context* context)
&& this->expr_->type()->has_pointer())
{
Bexpression* root =
- gogo->backend()->var_expression(implicit, VE_rvalue, loc);
+ gogo->backend()->var_expression(implicit, loc);
root = gogo->backend()->address_expression(root, loc);
Type* type = Type::make_pointer_type(this->expr_->type());
gogo->add_gc_root(Expression::make_backend(root, type, loc));
@@ -4400,7 +4397,7 @@ Unary_expression::do_get_backend(Translate_context* context)
true, false, btype, loc);
gogo->backend()->immutable_struct_set_init(decl, var_name, true,
false, btype, loc, bexpr);
- bexpr = gogo->backend()->var_expression(decl, VE_rvalue, loc);
+ bexpr = gogo->backend()->var_expression(decl, loc);
}
go_assert(!this->create_temp_ || this->expr_->is_variable());
@@ -14309,7 +14306,7 @@ Heap_expression::do_get_backend(Translate_context* context)
Bstatement* assn;
if (!etype->has_pointer())
{
- space = gogo->backend()->var_expression(space_temp, VE_lvalue, loc);
+ space = gogo->backend()->var_expression(space_temp, loc);
Bexpression* ref =
gogo->backend()->indirect_expression(expr_btype, space, true, loc);
assn = gogo->backend()->assignment_statement(fndecl, ref, bexpr, loc);
@@ -14322,12 +14319,12 @@ Heap_expression::do_get_backend(Translate_context* context)
expr_btype, bexpr, true, loc,
&edecl);
Bexpression* btempref = gogo->backend()->var_expression(btemp,
- VE_lvalue, loc);
+ loc);
Bexpression* addr = gogo->backend()->address_expression(btempref, loc);
Expression* td = Expression::make_type_descriptor(etype, loc);
Type* etype_ptr = Type::make_pointer_type(etype);
- space = gogo->backend()->var_expression(space_temp, VE_rvalue, loc);
+ space = gogo->backend()->var_expression(space_temp, loc);
Expression* elhs = Expression::make_backend(space, etype_ptr, loc);
Expression* erhs = Expression::make_backend(addr, etype_ptr, loc);
Expression* call = Runtime::make_call(Runtime::TYPEDMEMMOVE, loc, 3,
@@ -14337,7 +14334,7 @@ Heap_expression::do_get_backend(Translate_context* context)
assn = gogo->backend()->compound_statement(edecl, s);
}
decl = gogo->backend()->compound_statement(decl, assn);
- space = gogo->backend()->var_expression(space_temp, VE_rvalue, loc);
+ space = gogo->backend()->var_expression(space_temp, loc);
return gogo->backend()->compound_expression(decl, space, loc);
}
@@ -14661,7 +14658,7 @@ Ptrmask_symbol_expression::do_get_backend(Translate_context* context)
Bvariable* bvar = this->type_->gc_ptrmask_var(gogo, ptrsize, ptrdata);
Location bloc = Linemap::predeclared_location();
- Bexpression* bref = gogo->backend()->var_expression(bvar, VE_rvalue, bloc);
+ Bexpression* bref = gogo->backend()->var_expression(bvar, bloc);
Bexpression* baddr = gogo->backend()->address_expression(bref, bloc);
Type* uint8_type = Type::lookup_integer_type("uint8");
@@ -15380,8 +15377,7 @@ Interface_mtable_expression::do_get_backend(Translate_context* context)
Gogo* gogo = context->gogo();
Location loc = Linemap::predeclared_location();
if (this->bvar_ != NULL)
- return gogo->backend()->var_expression(this->bvar_, VE_rvalue,
- this->location());
+ return gogo->backend()->var_expression(this->bvar_, this->location());
const Typed_identifier_list* interface_methods = this->itype_->methods();
go_assert(!interface_methods->empty());
@@ -15421,8 +15417,7 @@ Interface_mtable_expression::do_get_backend(Translate_context* context)
this->bvar_ =
gogo->backend()->immutable_struct_reference(mangled_name, asm_name,
btype, loc);
- return gogo->backend()->var_expression(this->bvar_, VE_rvalue,
- this->location());
+ return gogo->backend()->var_expression(this->bvar_, this->location());
}
// The first element is the type descriptor.
@@ -15487,7 +15482,7 @@ Interface_mtable_expression::do_get_backend(Translate_context* context)
!is_public, btype, loc);
gogo->backend()->immutable_struct_set_init(this->bvar_, mangled_name, false,
!is_public, btype, loc, ctor);
- return gogo->backend()->var_expression(this->bvar_, VE_lvalue, loc);
+ return gogo->backend()->var_expression(this->bvar_, loc);
}
void
diff --git a/gcc/go/gofrontend/expressions.h b/gcc/go/gofrontend/expressions.h
index 0c742fd92df..9f58f497f8e 100644
--- a/gcc/go/gofrontend/expressions.h
+++ b/gcc/go/gofrontend/expressions.h
@@ -1298,7 +1298,7 @@ class Var_expression : public Expression
public:
Var_expression(Named_object* variable, Location location)
: Expression(EXPRESSION_VAR_REFERENCE, location),
- variable_(variable), in_lvalue_pos_(VE_rvalue)
+ variable_(variable)
{ }
// Return the variable.
@@ -1306,16 +1306,6 @@ class Var_expression : public Expression
named_object() const
{ return this->variable_; }
- // Does this var expression appear in an lvalue (assigned-to) context?
- bool
- in_lvalue_pos() const
- { return this->in_lvalue_pos_ == VE_lvalue; }
-
- // Mark a var_expression as appearing in an lvalue context.
- void
- set_in_lvalue_pos()
- { this->in_lvalue_pos_ = VE_lvalue; }
-
protected:
Expression*
do_lower(Gogo*, Named_object*, Statement_inserter*, int);
@@ -1346,8 +1336,6 @@ class Var_expression : public Expression
private:
// The variable we are referencing.
Named_object* variable_;
- // Set to TRUE if var expression appears in lvalue context
- Varexpr_context in_lvalue_pos_;
};
// A reference to a variable within an enclosing function.
diff --git a/gcc/go/gofrontend/gogo.cc b/gcc/go/gofrontend/gogo.cc
index c986963f1b2..54617360060 100644
--- a/gcc/go/gofrontend/gogo.cc
+++ b/gcc/go/gofrontend/gogo.cc
@@ -1394,7 +1394,7 @@ Gogo::write_globals()
{
Location loc = var->location();
Bexpression* var_expr =
- this->backend()->var_expression(bvar, VE_lvalue, loc);
+ this->backend()->var_expression(bvar, loc);
var_init_stmt =
this->backend()->assignment_statement(init_bfn, var_expr,
var_binit, loc);
@@ -5798,8 +5798,7 @@ Function::return_value(Gogo* gogo, Named_object* named_function,
{
Named_object* no = (*this->results_)[i];
Bvariable* bvar = no->get_backend_variable(gogo, named_function);
- Bexpression* val = gogo->backend()->var_expression(bvar, VE_rvalue,
- location);
+ Bexpression* val = gogo->backend()->var_expression(bvar, location);
if (no->result_var_value()->is_in_heap())
{
Btype* bt = no->result_var_value()->type()->get_backend(gogo);
@@ -6632,7 +6631,7 @@ Variable::get_init_block(Gogo* gogo, Named_object* function,
Expression::make_cast(this->type(), this->init_, loc);
Bexpression* val = val_expr->get_backend(&context);
Bexpression* var_ref =
- gogo->backend()->var_expression(var_decl, VE_lvalue, loc);
+ gogo->backend()->var_expression(var_decl, loc);
decl_init = gogo->backend()->assignment_statement(bfunction, var_ref,
val, loc);
}
diff --git a/gcc/go/gofrontend/names.cc b/gcc/go/gofrontend/names.cc
index 20f7c57ee49..4353a00ef6a 100644
--- a/gcc/go/gofrontend/names.cc
+++ b/gcc/go/gofrontend/names.cc
@@ -238,8 +238,8 @@ int
Gogo::nested_function_num(const std::string& name)
{
std::string n(Gogo::unpack_hidden_name(name));
- go_assert(n.compare(0, 8, ".$nested") == 0);
- return strtol(n.substr(8).c_str(), NULL, 0);
+ go_assert(n.compare(0, 7, "$nested") == 0);
+ return strtol(n.substr(7).c_str(), NULL, 0);
}
// Return the name to use for a sink function, a function whose name
diff --git a/gcc/go/gofrontend/operator.h b/gcc/go/gofrontend/operator.h
index e0a97d05f31..f3e0fd07434 100644
--- a/gcc/go/gofrontend/operator.h
+++ b/gcc/go/gofrontend/operator.h
@@ -63,10 +63,4 @@ enum Operator
OPERATOR_RSQUARE // ]
};
-// Whether a variable expression appears in lvalue (assignment) context.
-enum Varexpr_context {
- VE_rvalue,
- VE_lvalue
-};
-
#endif // !defined(GO_OPERATOR_H)
diff --git a/gcc/go/gofrontend/statements.cc b/gcc/go/gofrontend/statements.cc
index bea57cd6836..b22f690b3d6 100644
--- a/gcc/go/gofrontend/statements.cc
+++ b/gcc/go/gofrontend/statements.cc
@@ -836,100 +836,6 @@ Assignment_statement::do_flatten(Gogo*, Named_object*, Block*,
return this;
}
-
-// Helper class to locate a root Var_expression within an expression
-// tree and mark it as being in an "lvalue" or assignment
-// context. Examples:
-//
-// x, y = 40, foo(w)
-// x[2] = bar(v)
-// x.z.w[blah(v + u)], y.another = 2, 3
-//
-// In the code above, vars "x" and "y" appear in lvalue / assignment
-// context, whereas the other vars "v", "u", etc are in rvalue context.
-//
-// Note: at the moment the Var_expression version of "do_copy()"
-// defaults to returning the original object, not a new object,
-// meaning that a given Var_expression can be referenced from more
-// than one place in the tree. This means that when we want to mark a
-// Var_expression as having lvalue semantics, we need to make a copy
-// of it. Example:
-//
-// mystruct.myfield += 42
-//
-// When this is lowered to eliminate the += operator, we get a tree
-//
-// mystruct.myfield = mystruct.field + 42
-//
-// in which the "mystruct" same Var_expression is referenced on both
-// LHS and RHS subtrees. This in turn means that if we try to mark the
-// LHS Var_expression the RHS Var_expression will also be marked. To
-// address this issue, the code below clones any var_expression before
-// applying an lvalue marking.
-//
-
-class Mark_lvalue_varexprs : public Traverse
-{
- public:
- Mark_lvalue_varexprs()
- : Traverse(traverse_expressions)
- { }
-
- protected:
- int
- expression(Expression**);
-
- private:
-};
-
-int Mark_lvalue_varexprs::expression(Expression** ppexpr)
-{
- Expression* e = *ppexpr;
-
- Var_expression* ve = e->var_expression();
- if (ve)
- {
- ve = new Var_expression(ve->named_object(), ve->location());
- ve->set_in_lvalue_pos();
- *ppexpr = ve;
- return TRAVERSE_EXIT;
- }
-
- Field_reference_expression* fre = e->field_reference_expression();
- if (fre != NULL)
- return TRAVERSE_CONTINUE;
-
- Array_index_expression* aie = e->array_index_expression();
- if (aie != NULL)
- {
- Mark_lvalue_varexprs mlve;
- aie->set_is_lvalue();
- aie->array()->traverse_subexpressions(&mlve);
- return TRAVERSE_EXIT;
- }
-
- Unary_expression* ue = e->unary_expression();
- if (ue && ue->op() == OPERATOR_MULT)
- return TRAVERSE_CONTINUE;
-
- Type_conversion_expression* ce = e->conversion_expression();
- if (ce)
- return TRAVERSE_CONTINUE;
-
- Temporary_reference_expression* tre =
- e->temporary_reference_expression();
- if (tre)
- {
- tre = new Temporary_reference_expression(tre->statement(),
- tre->location());
- *ppexpr = tre;
- tre->set_is_lvalue();
- return TRAVERSE_EXIT;
- }
-
- return TRAVERSE_EXIT;
-}
-
// Convert an assignment statement to the backend representation.
Bstatement*
@@ -942,9 +848,6 @@ Assignment_statement::do_get_backend(Translate_context* context)
return context->backend()->expression_statement(bfunction, rhs);
}
- Mark_lvalue_varexprs mlve;
- Expression::traverse(&this->lhs_, &mlve);
-
Bexpression* lhs = this->lhs_->get_backend(context);
Expression* conv =
Expression::convert_for_assignment(context->gogo(), this->lhs_->type(),
diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc
index 5b0c84a0f56..247f40a5202 100644
--- a/gcc/go/gofrontend/types.cc
+++ b/gcc/go/gofrontend/types.cc
@@ -1206,8 +1206,7 @@ Type::type_descriptor_pointer(Gogo* gogo, Location location)
go_assert(t->type_descriptor_var_ != NULL);
}
Bexpression* var_expr =
- gogo->backend()->var_expression(t->type_descriptor_var_,
- VE_rvalue, location);
+ gogo->backend()->var_expression(t->type_descriptor_var_, location);
Bexpression* var_addr =
gogo->backend()->address_expression(var_expr, location);
Type* td_type = Type::make_type_descriptor_type();
@@ -2385,7 +2384,7 @@ Type::gc_symbol_pointer(Gogo* gogo)
}
Location bloc = Linemap::predeclared_location();
Bexpression* var_expr =
- gogo->backend()->var_expression(t->gc_symbol_var_, VE_rvalue, bloc);
+ gogo->backend()->var_expression(t->gc_symbol_var_, bloc);
Bexpression* addr_expr =
gogo->backend()->address_expression(var_expr, bloc);
@@ -7395,7 +7394,6 @@ Array_type::get_value_pointer(Gogo*, Expression* array, bool is_lvalue) const
else if (ve != NULL)
{
ve = new Var_expression(ve->named_object(), ve->location());
- ve->set_in_lvalue_pos();
array = ve;
}
}
diff --git a/gcc/go/gofrontend/wb.cc b/gcc/go/gofrontend/wb.cc
index cbefc11c816..d0226fca6bb 100644
--- a/gcc/go/gofrontend/wb.cc
+++ b/gcc/go/gofrontend/wb.cc
@@ -175,7 +175,6 @@ Write_barriers::variable(Named_object* no)
// Replace the initializer.
Location loc = init->location();
Expression* ref = Expression::make_var_reference(no, loc);
- ref->var_expression()->set_in_lvalue_pos();
Statement_inserter inserter(this->gogo_, var);
Statement* s = this->gogo_->assign_with_write_barrier(NULL, NULL, &inserter,
diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c
index 1bef380b32a..0c7d626bf05 100644
--- a/gcc/graphite-scop-detection.c
+++ b/gcc/graphite-scop-detection.c
@@ -81,7 +81,7 @@ public:
#define DEBUG_PRINT(args) do \
{ \
if (dump_file && (dump_flags & TDF_DETAILS)) { args; } \
- } while (0);
+ } while (0)
/* Pretty print to FILE all the SCoPs in DOT format and mark them with
different colors. If there are not enough colors, paint the
diff --git a/gcc/hash-map-traits.h b/gcc/hash-map-traits.h
index 2b5fddf2d09..a92f0cb00f4 100644
--- a/gcc/hash-map-traits.h
+++ b/gcc/hash-map-traits.h
@@ -32,6 +32,7 @@ template <typename H, typename Value>
struct simple_hashmap_traits
{
typedef typename H::value_type key_type;
+ static const bool maybe_mx = true;
static inline hashval_t hash (const key_type &);
static inline bool equal_keys (const key_type &, const key_type &);
template <typename T> static inline void remove (T &);
@@ -97,6 +98,12 @@ simple_hashmap_traits <H, Value>::mark_deleted (T &entry)
H::mark_deleted (entry.m_key);
}
+template <typename H, typename Value>
+struct simple_cache_map_traits: public simple_hashmap_traits<H,Value>
+{
+ static const bool maybe_mx = false;
+};
+
/* Implement traits for a hash_map with values of type Value for cases
in which the key cannot represent empty and deleted slots. Instead
record empty and deleted entries in Value. Derived classes must
diff --git a/gcc/hash-map.h b/gcc/hash-map.h
index 73f1c5427a0..6b8365a9d0a 100644
--- a/gcc/hash-map.h
+++ b/gcc/hash-map.h
@@ -62,6 +62,12 @@ class GTY((user)) hash_map
gt_ggc_mx (e.m_value);
}
+ static void ggc_maybe_mx (hash_entry &e)
+ {
+ if (Traits::maybe_mx)
+ ggc_mx (e);
+ }
+
static void pch_nx (hash_entry &e)
{
gt_pch_nx (e.m_key);
@@ -74,6 +80,11 @@ class GTY((user)) hash_map
pch_nx_helper (e.m_value, op, c);
}
+ static int keep_cache_entry (hash_entry &e)
+ {
+ return ggc_marked_p (e.m_key);
+ }
+
private:
template<typename T>
static void
@@ -237,7 +248,8 @@ private:
template<typename T, typename U, typename V> friend void gt_ggc_mx (hash_map<T, U, V> *);
template<typename T, typename U, typename V> friend void gt_pch_nx (hash_map<T, U, V> *);
- template<typename T, typename U, typename V> friend void gt_pch_nx (hash_map<T, U, V> *, gt_pointer_operator, void *);
+ template<typename T, typename U, typename V> friend void gt_pch_nx (hash_map<T, U, V> *, gt_pointer_operator, void *);
+ template<typename T, typename U, typename V> friend void gt_cleare_cache (hash_map<T, U, V> *);
hash_table<hash_entry> m_table;
};
@@ -260,6 +272,13 @@ gt_pch_nx (hash_map<K, V, H> *h)
template<typename K, typename V, typename H>
static inline void
+gt_cleare_cache (hash_map<K, V, H> *h)
+{
+ gt_cleare_cache (&h->m_table);
+}
+
+template<typename K, typename V, typename H>
+static inline void
gt_pch_nx (hash_map<K, V, H> *h, gt_pointer_operator op, void *cookie)
{
op (&h->m_table.m_entries, cookie);
diff --git a/gcc/hash-table.h b/gcc/hash-table.h
index 64d3157953c..b86a1d1b278 100644
--- a/gcc/hash-table.h
+++ b/gcc/hash-table.h
@@ -1044,7 +1044,9 @@ gt_ggc_mx (hash_table<E> *h)
|| table::is_deleted (h->m_entries[i]))
continue;
- E::ggc_mx (h->m_entries[i]);
+ /* Use ggc_maxbe_mx so we don't mark right away for cache tables; we'll
+ mark in gt_cleare_cache if appropriate. */
+ E::ggc_maybe_mx (h->m_entries[i]);
}
}
@@ -1094,7 +1096,6 @@ template<typename H>
inline void
gt_cleare_cache (hash_table<H> *h)
{
- extern void gt_ggc_mx (typename H::value_type &t);
typedef hash_table<H> table;
if (!h)
return;
@@ -1106,7 +1107,7 @@ gt_cleare_cache (hash_table<H> *h)
if (res == 0)
h->clear_slot (&*iter);
else if (res != -1)
- gt_ggc_mx (*iter);
+ H::ggc_mx (*iter);
}
}
diff --git a/gcc/hash-traits.h b/gcc/hash-traits.h
index a5c4f103474..6a613c45811 100644
--- a/gcc/hash-traits.h
+++ b/gcc/hash-traits.h
@@ -235,6 +235,13 @@ struct ggc_remove
gt_ggc_mx (p);
}
+ /* Overridden in ggc_cache_remove. */
+ static void
+ ggc_maybe_mx (T &p)
+ {
+ ggc_mx (p);
+ }
+
static void
pch_nx (T &p)
{
@@ -256,7 +263,7 @@ template<typename T>
struct ggc_cache_remove : ggc_remove<T>
{
/* Entries are weakly held because this is for caches. */
- static void ggc_mx (T &) {}
+ static void ggc_maybe_mx (T &) {}
static int
keep_cache_entry (T &e)
diff --git a/gcc/hooks.c b/gcc/hooks.c
index e69b7a03c2c..408110758f2 100644
--- a/gcc/hooks.c
+++ b/gcc/hooks.c
@@ -525,18 +525,8 @@ hook_bool_mode_reg_class_t_reg_class_t_false (machine_mode, reg_class_t,
return false;
}
-/* Generic hook that takes a bool and two unsigned ints and returns false. */
-
-bool
-hook_bool_bool_uint_uint_false (bool a ATTRIBUTE_UNUSED,
- unsigned int b ATTRIBUTE_UNUSED,
- unsigned int c ATTRIBUTE_UNUSED)
-{
- return false;
-}
-
/* Generic hook that takes a mode and an unsigned HOST_WIDE_INT and
- returns BLKmode. */
+ returns no mode. */
opt_machine_mode
hook_optmode_mode_uhwi_none (machine_mode, unsigned HOST_WIDE_INT)
diff --git a/gcc/hooks.h b/gcc/hooks.h
index a211a637307..acb0847d674 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -124,7 +124,6 @@ extern const char *hook_constcharptr_const_rtx_insn_null (const rtx_insn *);
extern const char *hook_constcharptr_const_tree_const_tree_null (const_tree, const_tree);
extern const char *hook_constcharptr_int_const_tree_null (int, const_tree);
extern const char *hook_constcharptr_int_const_tree_const_tree_null (int, const_tree, const_tree);
-extern bool hook_bool_bool_uint_uint_false (bool, unsigned int, unsigned int);
extern opt_machine_mode hook_optmode_mode_uhwi_none (machine_mode,
unsigned HOST_WIDE_INT);
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index c4dcb7fb13e..08c3fff4efd 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -46,6 +46,9 @@ along with GCC; see the file COPYING3. If not see
#include "recog.h"
#include "builtins.h"
#include "optabs-tree.h"
+#include "gimple-ssa.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
/* The names of each internal function, indexed by function number. */
const char *const internal_fn_name_array[] = {
@@ -76,7 +79,6 @@ init_internal_fns ()
/* Create static initializers for the information returned by
direct_internal_fn. */
-
#define NOT_VECTORIZABLE false, false, 0
#define VECTORIZABLE true, false, 0
#define VECTORIZABLE_COND true, true, 0
@@ -85,18 +87,19 @@ init_internal_fns ()
#define mask_load_direct { -1, 2, NOT_VECTORIZABLE }
#define load_lanes_direct { -1, -1, NOT_VECTORIZABLE }
#define mask_load_lanes_direct { -1, -1, NOT_VECTORIZABLE }
-#define gather_load_direct { -1, -1, NOT_VECTORIZABLE }
+#define gather_load_direct { -1, 1, NOT_VECTORIZABLE }
#define mask_store_direct { 3, 2, NOT_VECTORIZABLE }
#define store_lanes_direct { 0, 0, NOT_VECTORIZABLE }
#define mask_store_lanes_direct { 0, 0, NOT_VECTORIZABLE }
-#define scatter_store_direct { 3, 3, NOT_VECTORIZABLE }
+#define scatter_store_direct { 3, 1, NOT_VECTORIZABLE }
#define unary_direct { 0, 0, VECTORIZABLE }
#define binary_direct { 0, 0, VECTORIZABLE }
-#define cond_binary_direct { 1, 1, VECTORIZABLE_COND }
#define ternary_direct { 0, 0, VECTORIZABLE }
+#define cond_unary_direct { 1, 1, VECTORIZABLE_COND }
+#define cond_binary_direct { 1, 1, VECTORIZABLE_COND }
#define cond_ternary_direct { 1, 1, VECTORIZABLE_COND }
#define while_direct { 0, 2, NOT_VECTORIZABLE }
-#define clastb_direct { 2, 2, NOT_VECTORIZABLE }
+#define fold_extract_direct { 2, 2, NOT_VECTORIZABLE }
#define firstfault_load_direct { -1, -1, NOT_VECTORIZABLE }
#define read_nf_direct { -1, -1, NOT_VECTORIZABLE }
#define write_nf_direct { 1, 1, NOT_VECTORIZABLE }
@@ -1193,6 +1196,35 @@ expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
}
}
+/* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
+ mode MODE can be expanded without using a libcall. */
+
+static bool
+can_widen_mult_without_libcall (scalar_int_mode wmode, scalar_int_mode mode,
+ rtx op0, rtx op1, bool uns)
+{
+ if (find_widening_optab_handler (umul_widen_optab, wmode, mode)
+ != CODE_FOR_nothing)
+ return true;
+
+ if (find_widening_optab_handler (smul_widen_optab, wmode, mode)
+ != CODE_FOR_nothing)
+ return true;
+
+ rtx_insn *last = get_last_insn ();
+ if (CONSTANT_P (op0))
+ op0 = convert_modes (wmode, mode, op0, uns);
+ else
+ op0 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 1);
+ if (CONSTANT_P (op1))
+ op1 = convert_modes (wmode, mode, op1, uns);
+ else
+ op1 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 2);
+ rtx ret = expand_mult (wmode, op0, op1, NULL_RTX, uns, true);
+ delete_insns_since (last);
+ return ret != NULL_RTX;
+}
+
/* Add mul overflow checking to the statement STMT. */
static void
@@ -1486,9 +1518,29 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
ops.op1 = make_tree (type, op1);
ops.op2 = NULL_TREE;
ops.location = loc;
+
+ /* Optimize unsigned overflow check where we don't use the
+ multiplication result, just whether overflow happened.
+ If we can do MULT_HIGHPART_EXPR, that followed by
+ comparison of the result against zero is cheapest.
+ We'll still compute res, but it should be DCEd later. */
+ use_operand_p use;
+ gimple *use_stmt;
+ if (!is_ubsan
+ && lhs
+ && uns
+ && !(uns0_p && uns1_p && !unsr_p)
+ && can_mult_highpart_p (mode, uns) == 1
+ && single_imm_use (lhs, &use, &use_stmt)
+ && is_gimple_assign (use_stmt)
+ && gimple_assign_rhs_code (use_stmt) == IMAGPART_EXPR)
+ goto highpart;
+
if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
- && targetm.scalar_mode_supported_p (wmode))
+ && targetm.scalar_mode_supported_p (wmode)
+ && can_widen_mult_without_libcall (wmode, mode, op0, op1, uns))
{
+ twoxwider:
ops.code = WIDEN_MULT_EXPR;
ops.type
= build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
@@ -1516,6 +1568,35 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
profile_probability::very_likely ());
}
}
+ else if (can_mult_highpart_p (mode, uns) == 1)
+ {
+ highpart:
+ ops.code = MULT_HIGHPART_EXPR;
+ ops.type = type;
+
+ rtx hipart = expand_expr_real_2 (&ops, NULL_RTX, mode,
+ EXPAND_NORMAL);
+ ops.code = MULT_EXPR;
+ res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
+ if (uns)
+ /* For the unsigned multiplication, there was overflow if
+ HIPART is non-zero. */
+ do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
+ NULL_RTX, NULL, done_label,
+ profile_probability::very_likely ());
+ else
+ {
+ rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
+ NULL_RTX, 0);
+ /* RES is low half of the double width result, HIPART
+ the high half. There was overflow if
+ HIPART is different from RES < 0 ? -1 : 0. */
+ do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
+ NULL_RTX, NULL, done_label,
+ profile_probability::very_likely ());
+ }
+
+ }
else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
&& 2 * GET_MODE_PRECISION (hmode) == prec)
{
@@ -1821,6 +1902,11 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
emit_move_insn (res, tem);
}
+ else if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
+ && targetm.scalar_mode_supported_p (wmode))
+ /* Even emitting a libcall is better than not detecting overflow
+ at all. */
+ goto twoxwider;
else
{
gcc_assert (!is_ubsan);
@@ -2299,14 +2385,13 @@ expand_LOOP_DIST_ALIAS (internal_fn, gcall *)
}
/* Return a memory reference of type TYPE for argument INDEX of STMT.
- If we need to create a new MEM_REF, use argument INDEX + 1 to derive
- the second (TBAA) operand. */
+ Use argument INDEX + 1 to derive the second (TBAA) operand. */
static tree
expand_call_mem_ref (tree type, gcall *stmt, int index)
{
tree addr = gimple_call_arg (stmt, index);
- tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, index + 1)), 0);
+ tree alias_ptr_type = TREE_TYPE (gimple_call_arg (stmt, index + 1));
unsigned int align = tree_to_shwi (gimple_call_arg (stmt, index + 1));
if (TYPE_ALIGN (type) != align)
type = build_aligned_type (type, align);
@@ -2324,11 +2409,12 @@ expand_call_mem_ref (tree type, gcall *stmt, int index)
tree mem = TREE_OPERAND (tmp, 0);
if (TREE_CODE (mem) == TARGET_MEM_REF
&& types_compatible_p (TREE_TYPE (mem), type)
- && operand_equal_p (TMR_OFFSET (mem), ptr, 0))
+ && alias_ptr_type == TREE_TYPE (TMR_OFFSET (mem))
+ && integer_zerop (TMR_OFFSET (mem)))
return mem;
}
- return fold_build2 (MEM_REF, type, addr, ptr);
+ return fold_build2 (MEM_REF, type, addr, build_int_cst (alias_ptr_type, 0));
}
/* Expand MASK_LOAD{,_LANES} call STMT using optab OPTAB. */
@@ -2663,84 +2749,66 @@ expand_LAUNDER (internal_fn, gcall *call)
static void
expand_scatter_store_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
{
- struct expand_operand ops[5];
- tree idxtype, rhs[4];
- rtx addr, indexes;
- enum insn_code icode;
-
- rhs[0] = gimple_call_arg (stmt, 0);
- rhs[1] = gimple_call_arg (stmt, 1);
- rhs[2] = gimple_call_arg (stmt, 2);
- rhs[3] = gimple_call_arg (stmt, 3);
-
- addr = expand_normal (rhs[0]);
-
- idxtype = TREE_TYPE (rhs[1]);
- indexes = expand_normal (rhs[1]);
-
- gcc_assert (TREE_CODE (rhs[2]) == INTEGER_CST);
+ internal_fn ifn = gimple_call_internal_fn (stmt);
+ int rhs_index = internal_fn_stored_value_index (ifn);
+ int mask_index = internal_fn_mask_index (ifn);
+ tree base = gimple_call_arg (stmt, 0);
+ tree offset = gimple_call_arg (stmt, 1);
+ tree scale = gimple_call_arg (stmt, 2);
+ tree rhs = gimple_call_arg (stmt, rhs_index);
+
+ rtx base_rtx = expand_normal (base);
+ rtx offset_rtx = expand_normal (offset);
+ HOST_WIDE_INT scale_int = tree_to_shwi (scale);
+ rtx rhs_rtx = expand_normal (rhs);
+ struct expand_operand ops[6];
int i = 0;
- create_address_operand (&ops[i++], addr);
- create_input_operand (&ops[i++], indexes, TYPE_MODE (idxtype));
- create_integer_operand (&ops[i++], TREE_INT_CST_LOW (rhs[2]));
-
- machine_mode mode = TYPE_MODE (TREE_TYPE (rhs[3]));
- rtx src = expand_normal (rhs[3]);
- create_input_operand (&ops[i++], src, mode);
-
- if (optab == vec_mask_scatter_stores_optab
- || optab == vec_mask_scatter_storeu_optab)
+ create_address_operand (&ops[i++], base_rtx);
+ create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
+ create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
+ create_integer_operand (&ops[i++], scale_int);
+ create_input_operand (&ops[i++], rhs_rtx, TYPE_MODE (TREE_TYPE (rhs)));
+ if (mask_index >= 0)
{
- tree mask_tree = gimple_call_arg (stmt, 4);
- rtx mask_rtx = expand_normal (mask_tree);
- create_input_operand (&ops[i++], mask_rtx,
- TYPE_MODE (TREE_TYPE (mask_tree)));
+ tree mask = gimple_call_arg (stmt, mask_index);
+ rtx mask_rtx = expand_normal (mask);
+ create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
}
- icode = direct_optab_handler (optab, mode);
+ insn_code icode = direct_optab_handler (optab, TYPE_MODE (TREE_TYPE (rhs)));
expand_insn (icode, i, ops);
}
-/* Expand {MASK_,}GATHER_LOAD{S,U} call CALL using optab OPTAB. */
+/* Expand {MASK_,}GATHER_LOAD call CALL using optab OPTAB. */
static void
expand_gather_load_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
{
- struct expand_operand ops[5];
- tree type, idxtype, lhs, rhs[3];
- rtx target, addr, indexes;
- enum insn_code icode;
-
- lhs = gimple_call_lhs (stmt);
- rhs[0] = gimple_call_arg (stmt, 0);
- rhs[1] = gimple_call_arg (stmt, 1);
- rhs[2] = gimple_call_arg (stmt, 2);
-
- type = TREE_TYPE (lhs);
- target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
-
- addr = expand_normal (rhs[0]);
-
- idxtype = TREE_TYPE (rhs[1]);
- indexes = expand_normal (rhs[1]);
+ tree lhs = gimple_call_lhs (stmt);
+ tree base = gimple_call_arg (stmt, 0);
+ tree offset = gimple_call_arg (stmt, 1);
+ tree scale = gimple_call_arg (stmt, 2);
- gcc_assert (TREE_CODE (rhs[2]) == INTEGER_CST);
+ rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ rtx base_rtx = expand_normal (base);
+ rtx offset_rtx = expand_normal (offset);
+ HOST_WIDE_INT scale_int = tree_to_shwi (scale);
int i = 0;
- create_output_operand (&ops[i++], target, TYPE_MODE (type));
- create_address_operand (&ops[i++], addr);
- create_input_operand (&ops[i++], indexes, TYPE_MODE (idxtype));
- create_integer_operand (&ops[i++], TREE_INT_CST_LOW (rhs[2]));
- if (optab == vec_mask_gather_loads_optab
- || optab == vec_mask_gather_loadu_optab)
- {
- tree mask_tree = gimple_call_arg (stmt, 3);
- rtx mask_rtx = expand_normal (mask_tree);
- create_input_operand (&ops[i++], mask_rtx,
- TYPE_MODE (TREE_TYPE (mask_tree)));
- }
- icode = direct_optab_handler (optab, TYPE_MODE (type));
+ struct expand_operand ops[6];
+ create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
+ create_address_operand (&ops[i++], base_rtx);
+ create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
+ create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
+ create_integer_operand (&ops[i++], scale_int);
+ if (optab == mask_gather_load_optab)
+ {
+ tree mask = gimple_call_arg (stmt, 3);
+ rtx mask_rtx = expand_normal (mask);
+ create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
+ }
+ insn_code icode = direct_optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)));
expand_insn (icode, i, ops);
}
@@ -2788,7 +2856,7 @@ expand_DIVMOD (internal_fn, gcall *call_stmt)
expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
make_tree (TREE_TYPE (arg0), quotient),
make_tree (TREE_TYPE (arg1), remainder)),
- target, VOIDmode, EXPAND_NORMAL);
+ target, VOIDmode, EXPAND_NORMAL);
}
/* Expand a call to FN using the operands in STMT. FN has a single
@@ -2899,13 +2967,16 @@ expand_while_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
#define expand_ternary_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 3)
+#define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
+ expand_direct_optab_fn (FN, STMT, OPTAB, 2)
+
#define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 3)
#define expand_cond_ternary_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 4)
-#define expand_clastb_optab_fn(FN, STMT, OPTAB) \
+#define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 3)
/* RETURN_TYPE and ARGS are a return type and argument list that are
@@ -2961,7 +3032,7 @@ convert_optab_supported_p (convert_optab optab, tree_pair types,
optimization_type opt_type)
{
return (convert_optab_handler (optab, TYPE_MODE (types.first),
- TYPE_MODE (types.second), opt_type)
+ TYPE_MODE (types.second), opt_type)
!= CODE_FOR_nothing);
}
@@ -2982,6 +3053,7 @@ multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
#define direct_unary_optab_supported_p direct_optab_supported_p
#define direct_binary_optab_supported_p direct_optab_supported_p
#define direct_ternary_optab_supported_p direct_optab_supported_p
+#define direct_cond_unary_optab_supported_p direct_optab_supported_p
#define direct_cond_binary_optab_supported_p direct_optab_supported_p
#define direct_cond_ternary_optab_supported_p direct_optab_supported_p
#define direct_mask_load_optab_supported_p direct_optab_supported_p
@@ -2993,11 +3065,30 @@ multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
#define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
#define direct_scatter_store_optab_supported_p direct_optab_supported_p
#define direct_while_optab_supported_p convert_optab_supported_p
-#define direct_clastb_optab_supported_p direct_optab_supported_p
+#define direct_fold_extract_optab_supported_p direct_optab_supported_p
#define direct_firstfault_load_optab_supported_p direct_optab_supported_p
#define direct_read_nf_optab_supported_p direct_optab_supported_p
#define direct_write_nf_optab_supported_p direct_optab_supported_p
+/* Return the optab used by internal function FN. */
+
+static optab
+direct_internal_fn_optab (internal_fn fn)
+{
+ switch (fn)
+ {
+#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
+ case IFN_##CODE: break;
+#define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
+ case IFN_##CODE: return OPTAB##_optab;
+#include "internal-fn.def"
+
+ case IFN_LAST:
+ break;
+ }
+ gcc_unreachable ();
+}
+
/* Return true if FN is supported for the types in TYPES when the
optimization type is OPT_TYPE. The types are those associated with
the "type0" and "type1" fields of FN's direct_internal_fn_info
@@ -3115,62 +3206,146 @@ get_conditional_internal_fn (tree_code code, tree type)
}
}
-/* Expand STMT as though it were a call to internal function FN. */
+/* Return true if IFN is some form of load from memory. */
-void
-expand_internal_call (internal_fn fn, gcall *stmt)
+bool
+internal_load_fn_p (internal_fn fn)
{
- internal_fn_expanders[fn] (fn, stmt);
+ switch (fn)
+ {
+ case IFN_MASK_LOAD:
+ case IFN_LOAD_LANES:
+ case IFN_MASK_LOAD_LANES:
+ case IFN_GATHER_LOAD:
+ case IFN_MASK_GATHER_LOAD:
+ return true;
+
+ default:
+ return false;
+ }
}
-/* Expand STMT, which is a call to internal function FN. */
+/* Return true if IFN is some form of store to memory. */
-void
-expand_internal_call (gcall *stmt)
+bool
+internal_store_fn_p (internal_fn fn)
{
- expand_internal_call (gimple_call_internal_fn (stmt), stmt);
+ switch (fn)
+ {
+ case IFN_MASK_STORE:
+ case IFN_STORE_LANES:
+ case IFN_MASK_STORE_LANES:
+ case IFN_SCATTER_STORE:
+ case IFN_MASK_SCATTER_STORE:
+ return true;
+
+ default:
+ return false;
+ }
}
-/* Determine whether the target can perform a gather load (if GATHER_P)
- or scatter store (if !GATHER_P) in cases where:
- - the data being loaded or stored has type TYPE
- - the individual offsets have type OFFSET_TYPE and
- - individual operations are conditional if HAS_MASK_P
- Return the function to use if so, otherwise return IFN_LAST. */
+/* Return true if IFN is some form of gather load or scatter store. */
-internal_fn
-get_gather_scatter_internal_fn (bool gather_p, tree type, tree offset_type,
- bool has_mask_p)
+bool
+internal_gather_scatter_fn_p (internal_fn fn)
{
- internal_fn ifn;
- addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
- machine_mode address_mode = targetm.addr_space.address_mode (as);
- scalar_int_mode offset_mode = SCALAR_INT_TYPE_MODE (offset_type);
- bool offset_unsigned = TYPE_UNSIGNED (offset_type);
+ switch (fn)
+ {
+ case IFN_GATHER_LOAD:
+ case IFN_MASK_GATHER_LOAD:
+ case IFN_SCATTER_STORE:
+ case IFN_MASK_SCATTER_STORE:
+ return true;
+
+ default:
+ return false;
+ }
+}
- /* Always used signed when the offset does not need extending. */
- if (GET_MODE_BITSIZE (offset_mode) >= GET_MODE_UNIT_BITSIZE (address_mode))
- offset_unsigned = false;
+/* If FN takes a vector mask argument, return the index of that argument,
+ otherwise return -1. */
- if (gather_p)
+int
+internal_fn_mask_index (internal_fn fn)
+{
+ switch (fn)
{
- if (offset_unsigned)
- ifn = has_mask_p ? IFN_MASK_GATHER_LOADU : IFN_GATHER_LOADU;
- else
- ifn = has_mask_p ? IFN_MASK_GATHER_LOADS : IFN_GATHER_LOADS;
+ case IFN_MASK_LOAD:
+ case IFN_MASK_LOAD_LANES:
+ case IFN_MASK_STORE:
+ case IFN_MASK_STORE_LANES:
+ return 2;
+
+ case IFN_MASK_GATHER_LOAD:
+ return 3;
+
+ case IFN_MASK_SCATTER_STORE:
+ return 4;
+
+ default:
+ return -1;
}
- else
+}
+
+/* If FN takes a value that should be stored to memory, return the index
+ of that argument, otherwise return -1. */
+
+int
+internal_fn_stored_value_index (internal_fn fn)
+{
+ switch (fn)
{
- if (offset_unsigned)
- ifn = has_mask_p ? IFN_MASK_SCATTER_STOREU : IFN_SCATTER_STOREU;
- else
- ifn = has_mask_p ? IFN_MASK_SCATTER_STORES : IFN_SCATTER_STORES;
+ case IFN_MASK_STORE:
+ case IFN_SCATTER_STORE:
+ case IFN_MASK_SCATTER_STORE:
+ return 3;
+
+ default:
+ return -1;
}
+}
+
+/* Return true if the target supports gather load or scatter store function
+ IFN. For loads, VECTOR_TYPE is the vector type of the load result,
+ while for stores it is the vector type of the stored data argument.
+ MEMORY_ELEMENT_TYPE is the type of the memory elements being loaded
+ or stored. OFFSET_SIGN is the sign of the offset argument, which is
+ only relevant when the offset is narrower than an address. SCALE is
+ the amount by which the offset should be multiplied *after* it has
+ been extended to address width. */
+
+bool
+internal_gather_scatter_fn_supported_p (internal_fn ifn, tree vector_type,
+ tree memory_element_type,
+ signop offset_sign, int scale)
+{
+ if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (vector_type)),
+ TYPE_SIZE (memory_element_type)))
+ return false;
+ optab optab = direct_internal_fn_optab (ifn);
+ insn_code icode = direct_optab_handler (optab, TYPE_MODE (vector_type));
+ int output_ops = internal_load_fn_p (ifn) ? 1 : 0;
+ return (icode != CODE_FOR_nothing
+ && insn_operand_matches (icode, 2 + output_ops,
+ GEN_INT (offset_sign == UNSIGNED))
+ && insn_operand_matches (icode, 3 + output_ops,
+ GEN_INT (scale)));
+}
- if (!direct_internal_fn_supported_p (ifn, type, OPTIMIZE_FOR_SPEED))
- return IFN_LAST;
+/* Expand STMT as though it were a call to internal function FN. */
- return ifn;
+void
+expand_internal_call (internal_fn fn, gcall *stmt)
+{
+ internal_fn_expanders[fn] (fn, stmt);
+}
+
+/* Expand STMT, which is a call to internal function FN. */
+
+void
+expand_internal_call (gcall *stmt)
+{
+ expand_internal_call (gimple_call_internal_fn (stmt), stmt);
}
/* If MODE is a vector mode, return true if IFN is a direct internal
diff --git a/gcc/internal-fn.def b/gcc/internal-fn.def
index 43885e7988d..d9a0216de9c 100644
--- a/gcc/internal-fn.def
+++ b/gcc/internal-fn.def
@@ -47,13 +47,15 @@ along with GCC; see the file COPYING3. If not see
- mask_load: currently just maskload
- load_lanes: currently just vec_load_lanes
- mask_load_lanes: currently just vec_mask_load_lanes
- - gather_load: currently just vec_{,mask_}gather_load
+ - gather_load: used for {mask_,}gather_load
- mask_store: currently just maskstore
- store_lanes: currently just vec_store_lanes
- mask_store_lanes: currently just vec_mask_store_lanes
+ - scatter_store: used for {mask_,}scatter_store
- - binary: a normal binary optab, such as add<mode>3
+ - unary: a normal unary optab, such as vec_reverse_<mode>
+ - binary: a normal binary optab, such as vec_interleave_lo_<mode>
- ternary: a normal ternary optab, such as fma<mode>4
- cond_binary: a conditional binary optab, such as cond_add<mode>
@@ -111,44 +113,33 @@ DEF_INTERNAL_OPTAB_FN (MASK_LOAD, ECF_PURE, maskload, mask_load)
DEF_INTERNAL_OPTAB_FN (FIRSTFAULT_LOAD, ECF_PURE, firstfault_load, firstfault_load)
DEF_INTERNAL_OPTAB_FN (READ_NF, ECF_NOTHROW, read_nf, read_nf)
DEF_INTERNAL_OPTAB_FN (WRITE_NF, ECF_NOTHROW, write_nf, write_nf)
-DEF_INTERNAL_OPTAB_FN (MASK_POPCOUNT, ECF_CONST | ECF_NOTHROW,
- mask_popcount, unary)
DEF_INTERNAL_OPTAB_FN (LOAD_LANES, ECF_CONST, vec_load_lanes, load_lanes)
DEF_INTERNAL_OPTAB_FN (MASK_LOAD_LANES, ECF_PURE,
vec_mask_load_lanes, mask_load_lanes)
-DEF_INTERNAL_OPTAB_FN (GATHER_LOADS, ECF_PURE,
- vec_gather_loads, gather_load)
-DEF_INTERNAL_OPTAB_FN (MASK_GATHER_LOADS, ECF_PURE,
- vec_mask_gather_loads, gather_load)
-DEF_INTERNAL_OPTAB_FN (GATHER_LOADU, ECF_PURE,
- vec_gather_loadu, gather_load)
-DEF_INTERNAL_OPTAB_FN (MASK_GATHER_LOADU, ECF_PURE,
- vec_mask_gather_loadu, gather_load)
-
-DEF_INTERNAL_OPTAB_FN (SCATTER_STORES, 0,
- vec_scatter_stores, scatter_store)
-DEF_INTERNAL_OPTAB_FN (MASK_SCATTER_STORES, 0,
- vec_mask_scatter_stores, scatter_store)
-DEF_INTERNAL_OPTAB_FN (SCATTER_STOREU, 0,
- vec_scatter_storeu, scatter_store)
-DEF_INTERNAL_OPTAB_FN (MASK_SCATTER_STOREU, 0,
- vec_mask_scatter_storeu, scatter_store)
+DEF_INTERNAL_OPTAB_FN (GATHER_LOAD, ECF_PURE, gather_load, gather_load)
+DEF_INTERNAL_OPTAB_FN (MASK_GATHER_LOAD, ECF_PURE,
+ mask_gather_load, gather_load)
+
+DEF_INTERNAL_OPTAB_FN (SCATTER_STORE, 0, scatter_store, scatter_store)
+DEF_INTERNAL_OPTAB_FN (MASK_SCATTER_STORE, 0,
+ mask_scatter_store, scatter_store)
DEF_INTERNAL_OPTAB_FN (MASK_STORE, 0, maskstore, mask_store)
DEF_INTERNAL_OPTAB_FN (STORE_LANES, ECF_CONST, vec_store_lanes, store_lanes)
DEF_INTERNAL_OPTAB_FN (MASK_STORE_LANES, 0,
vec_mask_store_lanes, mask_store_lanes)
-DEF_INTERNAL_OPTAB_FN (WHILE_ULT, ECF_CONST | ECF_NOTHROW, while_ult, while)
+DEF_INTERNAL_OPTAB_FN (MASK_POPCOUNT, ECF_CONST | ECF_NOTHROW,
+ mask_popcount, unary)
-DEF_INTERNAL_OPTAB_FN (CLASTB, ECF_CONST | ECF_NOTHROW, clastb, clastb)
+DEF_INTERNAL_OPTAB_FN (WHILE_ULT, ECF_CONST | ECF_NOTHROW, while_ult, while)
-DEF_INTERNAL_OPTAB_FN (VEC_INTERLEAVE_HI, ECF_CONST | ECF_NOTHROW,
- vec_interleave_hi, binary)
DEF_INTERNAL_OPTAB_FN (VEC_INTERLEAVE_LO, ECF_CONST | ECF_NOTHROW,
vec_interleave_lo, binary)
+DEF_INTERNAL_OPTAB_FN (VEC_INTERLEAVE_HI, ECF_CONST | ECF_NOTHROW,
+ vec_interleave_hi, binary)
DEF_INTERNAL_OPTAB_FN (VEC_EXTRACT_EVEN, ECF_CONST | ECF_NOTHROW,
vec_extract_even, binary)
DEF_INTERNAL_OPTAB_FN (VEC_EXTRACT_ODD, ECF_CONST | ECF_NOTHROW,
@@ -194,7 +185,12 @@ DEF_INTERNAL_OPTAB_FN (BREAK_AFTER, ECF_CONST | ECF_NOTHROW,
/* Extract the last active element from a vector. */
DEF_INTERNAL_OPTAB_FN (EXTRACT_LAST, ECF_CONST | ECF_NOTHROW,
- extract_last, binary)
+ extract_last, cond_unary)
+
+/* Same, but return the first argument if no elements are active. */
+DEF_INTERNAL_OPTAB_FN (FOLD_EXTRACT_LAST, ECF_CONST | ECF_NOTHROW,
+ fold_extract_last, fold_extract)
+
/* Unary math functions. */
DEF_INTERNAL_FLT_FN (ACOS, ECF_CONST, acos, unary)
diff --git a/gcc/internal-fn.h b/gcc/internal-fn.h
index fff1cb4204a..951a6ed865b 100644
--- a/gcc/internal-fn.h
+++ b/gcc/internal-fn.h
@@ -203,7 +203,6 @@ direct_internal_fn (internal_fn fn)
extern tree_pair direct_internal_fn_types (internal_fn, tree, tree *);
extern tree_pair direct_internal_fn_types (internal_fn, gcall *);
-
extern bool direct_internal_fn_supported_p (internal_fn, tree_pair,
optimization_type);
extern bool direct_internal_fn_supported_p (internal_fn, tree,
@@ -226,12 +225,18 @@ extern bool set_edom_supported_p (void);
extern internal_fn get_conditional_internal_fn (tree_code, tree);
+extern bool internal_load_fn_p (internal_fn);
+extern bool internal_store_fn_p (internal_fn);
+extern bool internal_gather_scatter_fn_p (internal_fn);
+extern int internal_fn_mask_index (internal_fn);
+extern int internal_fn_stored_value_index (internal_fn);
+extern bool internal_gather_scatter_fn_supported_p (internal_fn, tree,
+ tree, signop, int);
+
extern void expand_internal_call (gcall *);
extern void expand_internal_call (internal_fn, gcall *);
extern void expand_PHI (internal_fn, gcall *);
-extern internal_fn get_gather_scatter_internal_fn (bool, tree, tree, bool);
-
extern bool vectorized_internal_fn_supported_p (internal_fn, machine_mode);
#endif
diff --git a/gcc/ipa-chkp.c b/gcc/ipa-chkp.c
index 704ef6e4550..30684baa3f8 100644
--- a/gcc/ipa-chkp.c
+++ b/gcc/ipa-chkp.c
@@ -717,7 +717,7 @@ chkp_produce_thunks (bool early)
node->thunk.thunk_p = true;
node->thunk.add_pointer_bounds_args = true;
node->create_edge (node->instrumented_version, NULL,
- node->count, CGRAPH_FREQ_BASE);
+ node->count);
node->create_reference (node->instrumented_version,
IPA_REF_CHKP, NULL);
/* Thunk shouldn't be a cdtor. */
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 24d2be79103..bc1e3ae799d 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -498,7 +498,7 @@ ipcp_lattice<valtype>::print (FILE * f, bool dump_sources, bool dump_benefits)
fprintf (f, " [from:");
for (s = val->sources; s; s = s->next)
fprintf (f, " %i(%i)", s->cs->caller->order,
- s->cs->frequency);
+ s->cs->frequency ());
fprintf (f, "]");
}
@@ -677,9 +677,9 @@ gather_caller_stats (struct cgraph_node *node, void *data)
for (cs = node->callers; cs; cs = cs->next_caller)
if (!cs->caller->thunk.thunk_p)
{
- if (cs->count.initialized_p ())
- stats->count_sum += cs->count;
- stats->freq_sum += cs->frequency;
+ if (cs->count.ipa ().initialized_p ())
+ stats->count_sum += cs->count.ipa ();
+ stats->freq_sum += cs->frequency ();
stats->n_calls++;
if (cs->maybe_hot_p ())
stats->n_hot_calls ++;
@@ -731,7 +731,7 @@ ipcp_cloning_candidate_p (struct cgraph_node *node)
significantly. */
if (max_count > profile_count::zero ())
{
- if (stats.count_sum > node->count.apply_scale (90, 100))
+ if (stats.count_sum > node->count.ipa ().apply_scale (90, 100))
{
if (dump_file)
fprintf (dump_file, "Considering %s for cloning; "
@@ -3272,7 +3272,7 @@ ipcp_propagate_stage (struct ipa_topo_info *topo)
}
if (node->definition && !node->alias)
overall_size += ipa_fn_summaries->get (node)->self_size;
- max_count = max_count.max (node->count);
+ max_count = max_count.max (node->count.ipa ());
}
max_new_size = overall_size;
@@ -3550,9 +3550,9 @@ get_info_about_necessary_edges (ipcp_value<valtype> *val, cgraph_node *dest,
if (cgraph_edge_brings_value_p (cs, src, dest))
{
count++;
- freq += cs->frequency;
- if (cs->count.initialized_p ())
- cnt += cs->count;
+ freq += cs->frequency ();
+ if (cs->count.ipa ().initialized_p ())
+ cnt += cs->count.ipa ();
hot |= cs->maybe_hot_p ();
}
cs = get_next_cgraph_edge_clone (cs);
@@ -3662,7 +3662,7 @@ update_profiling_info (struct cgraph_node *orig_node,
profile_count new_sum, orig_sum;
profile_count remainder, orig_node_count = orig_node->count;
- if (!(orig_node_count > profile_count::zero ()))
+ if (!(orig_node_count.ipa () > profile_count::zero ()))
return;
init_caller_stats (&stats);
@@ -3701,7 +3701,7 @@ update_profiling_info (struct cgraph_node *orig_node,
for (cs = new_node->callees; cs; cs = cs->next_callee)
/* FIXME: why we care about non-zero frequency here? */
- if (cs->frequency)
+ if (cs->frequency ())
cs->count = cs->count.apply_scale (new_sum, orig_node_count);
else
cs->count = profile_count::zero ();
@@ -3741,7 +3741,7 @@ update_specialized_profile (struct cgraph_node *new_node,
orig_node->count -= redirected_sum;
for (cs = new_node->callees; cs; cs = cs->next_callee)
- if (cs->frequency)
+ if (cs->frequency ())
cs->count += cs->count.apply_scale (redirected_sum, new_node_count);
else
cs->count = profile_count::zero ();
@@ -4463,8 +4463,8 @@ perhaps_add_new_callers (cgraph_node *node, ipcp_value<valtype> *val)
cs->redirect_callee_duplicating_thunks (val->spec_node);
val->spec_node->expand_all_artificial_thunks ();
- if (cs->count.initialized_p ())
- redirected_sum = redirected_sum + cs->count;
+ if (cs->count.ipa ().initialized_p ())
+ redirected_sum = redirected_sum + cs->count.ipa ();
}
cs = get_next_cgraph_edge_clone (cs);
}
diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c
index f03c7f099f7..540f038a2d5 100644
--- a/gcc/ipa-devirt.c
+++ b/gcc/ipa-devirt.c
@@ -3566,7 +3566,7 @@ ipa_devirt (void)
bool final;
if (final_warning_records)
- final_warning_records->dyn_count = e->count;
+ final_warning_records->dyn_count = e->count.ipa ();
vec <cgraph_node *>targets
= possible_polymorphic_call_targets
@@ -3727,8 +3727,7 @@ ipa_devirt (void)
nconverted++;
update = true;
e->make_speculative
- (likely_target, e->count.apply_scale (8, 10),
- e->frequency * 8 / 10);
+ (likely_target, e->count.apply_scale (8, 10));
}
}
if (update)
diff --git a/gcc/ipa-fnsummary.c b/gcc/ipa-fnsummary.c
index f6841104a32..8e26e7e257a 100644
--- a/gcc/ipa-fnsummary.c
+++ b/gcc/ipa-fnsummary.c
@@ -244,7 +244,6 @@ redirect_to_unreachable (struct cgraph_edge *e)
e->redirect_callee (target);
struct ipa_call_summary *es = ipa_call_summaries->get (e);
e->inline_failed = CIF_UNREACHABLE;
- e->frequency = 0;
e->count = profile_count::zero ();
es->call_stmt_size = 0;
es->call_stmt_time = 0;
@@ -818,12 +817,12 @@ dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
int i;
fprintf (f,
- "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
+ "%*s%s/%i %s\n%*s loop depth:%2i freq:%4.2f size:%2i"
" time: %2i callee size:%2i stack:%2i",
indent, "", callee->name (), callee->order,
!edge->inline_failed
? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
- indent, "", es->loop_depth, edge->frequency,
+ indent, "", es->loop_depth, edge->sreal_frequency ().to_double (),
es->call_stmt_size, es->call_stmt_time,
(int) ipa_fn_summaries->get (callee)->size / ipa_fn_summary::size_scale,
(int) ipa_fn_summaries->get (callee)->estimated_stack_size);
@@ -861,11 +860,12 @@ dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
struct ipa_call_summary *es = ipa_call_summaries->get (edge);
- fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
+ fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
" time: %2i",
indent, "",
es->loop_depth,
- edge->frequency, es->call_stmt_size, es->call_stmt_time);
+ edge->sreal_frequency ().to_double (), es->call_stmt_size,
+ es->call_stmt_time);
if (es->predicate)
{
fprintf (f, "predicate: ");
@@ -2579,10 +2579,9 @@ estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
if (min_size)
*min_size += cur_size;
if (prob == REG_BR_PROB_BASE)
- *time += ((sreal)(call_time * e->frequency)) / CGRAPH_FREQ_BASE;
+ *time += ((sreal)call_time) * e->sreal_frequency ();
else
- *time += ((sreal)call_time) * (prob * e->frequency)
- / (CGRAPH_FREQ_BASE * REG_BR_PROB_BASE);
+ *time += ((sreal)call_time * prob) * e->sreal_frequency ();
}
@@ -3059,7 +3058,7 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
toplev_predicate);
if (p != false && nonconstp != false)
{
- sreal add_time = ((sreal)e->time * edge->frequency) / CGRAPH_FREQ_BASE;
+ sreal add_time = ((sreal)e->time * edge->sreal_frequency ());
int prob = e->nonconst_predicate.probability (callee_info->conds,
clause, es->param);
add_time = add_time * prob / REG_BR_PROB_BASE;
@@ -3619,3 +3618,12 @@ make_pass_ipa_fn_summary (gcc::context *ctxt)
{
return new pass_ipa_fn_summary (ctxt);
}
+
+/* Reset all state within ipa-fnsummary.c so that we can rerun the compiler
+ within the same process. For use by toplev::finalize. */
+
+void
+ipa_fnsummary_c_finalize (void)
+{
+ ipa_free_fn_summary ();
+}
diff --git a/gcc/ipa-fnsummary.h b/gcc/ipa-fnsummary.h
index a794bd09318..b345bbc4fd7 100644
--- a/gcc/ipa-fnsummary.h
+++ b/gcc/ipa-fnsummary.h
@@ -266,4 +266,6 @@ void estimate_node_size_and_time (struct cgraph_node *node,
vec<inline_param_summary>
inline_param_summary);
+void ipa_fnsummary_c_finalize (void);
+
#endif /* GCC_IPA_FNSUMMARY_H */
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 38f3d2e762a..54a441f142d 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -175,11 +175,11 @@ do_estimate_edge_time (struct cgraph_edge *edge)
edges and for those we disable size limits. Don't do that when
probability that caller will call the callee is low however, since it
may hurt optimization of the caller's hot path. */
- if (edge->count.initialized_p () && edge->maybe_hot_p ()
- && (edge->count.apply_scale (2, 1)
+ if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
+ && (edge->count.ipa ().apply_scale (2, 1)
> (edge->caller->global.inlined_to
- ? edge->caller->global.inlined_to->count
- : edge->caller->count)))
+ ? edge->caller->global.inlined_to->count.ipa ()
+ : edge->caller->count.ipa ())))
hints |= INLINE_HINT_known_hot;
known_vals.release ();
diff --git a/gcc/ipa-inline-transform.c b/gcc/ipa-inline-transform.c
index 886e8edd473..8e66483016a 100644
--- a/gcc/ipa-inline-transform.c
+++ b/gcc/ipa-inline-transform.c
@@ -51,39 +51,25 @@ along with GCC; see the file COPYING3. If not see
int ncalls_inlined;
int nfunctions_inlined;
-/* Scale frequency of NODE edges by FREQ_SCALE. */
+/* Scale counts of NODE edges by NUM/DEN. */
static void
-update_noncloned_frequencies (struct cgraph_node *node,
- int freq_scale, profile_count num,
- profile_count den)
+update_noncloned_counts (struct cgraph_node *node,
+ profile_count num, profile_count den)
{
struct cgraph_edge *e;
- bool scale = (num == profile_count::zero () || den > 0);
- /* We do not want to ignore high loop nest after freq drops to 0. */
- if (!freq_scale)
- freq_scale = 1;
+ profile_count::adjust_for_ipa_scaling (&num, &den);
+
for (e = node->callees; e; e = e->next_callee)
{
- e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
- if (e->frequency > CGRAPH_FREQ_MAX)
- e->frequency = CGRAPH_FREQ_MAX;
if (!e->inline_failed)
- update_noncloned_frequencies (e->callee, freq_scale, num, den);
- if (scale)
- e->count = e->count.apply_scale (num, den);
+ update_noncloned_counts (e->callee, num, den);
+ e->count = e->count.apply_scale (num, den);
}
for (e = node->indirect_calls; e; e = e->next_callee)
- {
- e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
- if (e->frequency > CGRAPH_FREQ_MAX)
- e->frequency = CGRAPH_FREQ_MAX;
- if (scale)
- e->count = e->count.apply_scale (num, den);
- }
- if (scale)
- node->count = node->count.apply_scale (num, den);
+ e->count = e->count.apply_scale (num, den);
+ node->count = node->count.apply_scale (num, den);
}
/* We removed or are going to remove the last call to NODE.
@@ -171,12 +157,11 @@ master_clone_with_noninline_clones_p (struct cgraph_node *node)
By default the offline copy is removed, when it appears dead after inlining.
UPDATE_ORIGINAL prevents this transformation.
If OVERALL_SIZE is non-NULL, the size is updated to reflect the
- transformation.
- FREQ_SCALE specify the scaling of frequencies of call sites. */
+ transformation. */
void
clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
- bool update_original, int *overall_size, int freq_scale)
+ bool update_original, int *overall_size)
{
struct cgraph_node *inlining_into;
struct cgraph_edge *next;
@@ -220,8 +205,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
}
duplicate = false;
e->callee->externally_visible = false;
- update_noncloned_frequencies (e->callee, e->frequency,
- e->count, e->callee->count);
+ update_noncloned_counts (e->callee, e->count, e->callee->count);
dump_callgraph_transformation (e->callee, inlining_into,
"inlining to");
@@ -230,11 +214,8 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
{
struct cgraph_node *n;
- if (freq_scale == -1)
- freq_scale = e->frequency;
n = e->callee->create_clone (e->callee->decl,
- MIN (e->count, e->callee->count),
- freq_scale,
+ e->count,
update_original, vNULL, true,
inlining_into,
NULL);
@@ -252,7 +233,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
{
next = e->next_callee;
if (!e->inline_failed)
- clone_inlined_nodes (e, duplicate, update_original, overall_size, freq_scale);
+ clone_inlined_nodes (e, duplicate, update_original, overall_size);
}
}
@@ -459,7 +440,7 @@ inline_call (struct cgraph_edge *e, bool update_original,
}
}
- clone_inlined_nodes (e, true, update_original, overall_size, e->frequency);
+ clone_inlined_nodes (e, true, update_original, overall_size);
gcc_assert (curr->callee->global.inlined_to == to);
@@ -692,7 +673,10 @@ inline_transform (struct cgraph_node *node)
basic_block bb;
FOR_ALL_BB_FN (bb, cfun)
- bb->count = bb->count.apply_scale (num, den);
+ if (num == profile_count::zero ())
+ bb->count = bb->count.global0 ();
+ else
+ bb->count = bb->count.apply_scale (num, den);
ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = node->count;
}
todo = optimize_inline_calls (current_function_decl);
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 687996876ce..8d9ecb26d23 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -129,8 +129,8 @@ static int overall_size;
static profile_count max_count;
static profile_count spec_rem;
-/* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
-static sreal cgraph_freq_base_rec, percent_rec;
+/* Pre-computed constant 1/100. */
+static sreal percent_rec;
/* Return false when inlining edge E would lead to violating
limits on function unit growth or stack usage growth.
@@ -640,12 +640,9 @@ compute_uninlined_call_time (struct cgraph_edge *edge,
? edge->caller->global.inlined_to
: edge->caller);
- if (edge->count.nonzero_p ()
- && caller->count.nonzero_p ())
- uninlined_call_time *= (sreal)edge->count.to_gcov_type ()
- / caller->count.to_gcov_type ();
- if (edge->frequency)
- uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
+ sreal freq = edge->sreal_frequency ();
+ if (freq != 0)
+ uninlined_call_time *= freq;
else
uninlined_call_time = uninlined_call_time >> 11;
@@ -665,18 +662,15 @@ compute_inlined_call_time (struct cgraph_edge *edge,
: edge->caller);
sreal caller_time = ipa_fn_summaries->get (caller)->time;
- if (edge->count.nonzero_p ()
- && caller->count.nonzero_p ())
- time *= (sreal)edge->count.to_gcov_type () / caller->count.to_gcov_type ();
- if (edge->frequency)
- time *= cgraph_freq_base_rec * edge->frequency;
+ sreal freq = edge->sreal_frequency ();
+ if (freq != 0)
+ time *= freq;
else
time = time >> 11;
/* This calculation should match one in ipa-inline-analysis.c
(estimate_edge_size_and_time). */
- time -= (sreal) edge->frequency
- * ipa_call_summaries->get (edge)->call_stmt_time / CGRAPH_FREQ_BASE;
+ time -= (sreal)ipa_call_summaries->get (edge)->call_stmt_time * freq;
time += caller_time;
if (time <= 0)
time = ((sreal) 1) >> 8;
@@ -724,7 +718,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
promote non-inline functions to inline and we increase
MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
else if ((!DECL_DECLARED_INLINE_P (callee->decl)
- && (!e->count.initialized_p () || !e->maybe_hot_p ()))
+ && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
&& ipa_fn_summaries->get (callee)->min_size
- ipa_call_summaries->get (e)->call_stmt_size
> MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
@@ -733,7 +727,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
want_inline = false;
}
else if ((DECL_DECLARED_INLINE_P (callee->decl)
- || e->count.nonzero_p ())
+ || e->count.ipa ().nonzero_p ())
&& ipa_fn_summaries->get (callee)->min_size
- ipa_call_summaries->get (e)->call_stmt_size
> 16 * MAX_INLINE_INSNS_SINGLE)
@@ -843,7 +837,7 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
reason = "recursive call is cold";
want_inline = false;
}
- else if (!outer_node->count.nonzero_p ())
+ else if (!outer_node->count.ipa ().nonzero_p ())
{
reason = "not executed in profile";
want_inline = false;
@@ -855,7 +849,7 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
}
if (outer_node->global.inlined_to)
- caller_freq = outer_node->callers->frequency;
+ caller_freq = outer_node->callers->frequency ();
if (!caller_freq)
{
@@ -881,16 +875,16 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
int i;
for (i = 1; i < depth; i++)
max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
- if (max_count.nonzero_p () && edge->count.nonzero_p ()
- && (edge->count.to_gcov_type () * CGRAPH_FREQ_BASE
- / outer_node->count.to_gcov_type ()
+ if (max_count.nonzero_p () && edge->count.ipa ().nonzero_p ()
+ && (edge->count.ipa ().to_gcov_type () * CGRAPH_FREQ_BASE
+ / outer_node->count.ipa ().to_gcov_type ()
>= max_prob))
{
reason = "profile of recursive call is too large";
want_inline = false;
}
if (!max_count.nonzero_p ()
- && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
+ && (edge->frequency () * CGRAPH_FREQ_BASE / caller_freq
>= max_prob))
{
reason = "frequency of recursive call is too large";
@@ -915,17 +909,17 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
methods. */
else
{
- if (max_count.nonzero_p () && edge->count.initialized_p ()
- && (edge->count.to_gcov_type () * 100
- / outer_node->count.to_gcov_type ()
+ if (max_count.nonzero_p () && edge->count.ipa ().initialized_p ()
+ && (edge->count.ipa ().to_gcov_type () * 100
+ / outer_node->count.ipa ().to_gcov_type ()
<= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
{
reason = "profile of recursive call is too small";
want_inline = false;
}
else if ((!max_count.nonzero_p ()
- || !edge->count.initialized_p ())
- && (edge->frequency * 100 / caller_freq
+ || !edge->count.ipa ().initialized_p ())
+ && (edge->frequency () * 100 / caller_freq
<= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
{
reason = "frequency of recursive call is too small";
@@ -1023,8 +1017,11 @@ edge_badness (struct cgraph_edge *edge, bool dump)
edge_time = estimate_edge_time (edge, &unspec_edge_time);
hints = estimate_edge_hints (edge);
gcc_checking_assert (edge_time >= 0);
- /* Check that inlined time is better, but tolerate some roundoff issues. */
- gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0);
+ /* Check that inlined time is better, but tolerate some roundoff issues.
+ FIXME: When callee profile drops to 0 we account calls more. This
+ should be fixed by never doing that. */
+ gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0
+ || callee->count.ipa ().initialized_p ());
gcc_checking_assert (growth <= callee_info->size);
if (dump)
@@ -1070,7 +1067,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
then calls without.
*/
else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
- || caller->count.nonzero_p ())
+ || caller->count.ipa ().nonzero_p ())
{
sreal numerator, denominator;
int overall_growth;
@@ -1080,9 +1077,9 @@ edge_badness (struct cgraph_edge *edge, bool dump)
- inlined_time);
if (numerator == 0)
numerator = ((sreal) 1 >> 8);
- if (caller->count.nonzero_p ())
- numerator *= caller->count.to_gcov_type ();
- else if (caller->count.initialized_p ())
+ if (caller->count.ipa ().nonzero_p ())
+ numerator *= caller->count.ipa ().to_gcov_type ();
+ else if (caller->count.ipa ().initialized_p ())
numerator = numerator >> 11;
denominator = growth;
@@ -1108,14 +1105,14 @@ edge_badness (struct cgraph_edge *edge, bool dump)
&& callee_info->single_caller
&& !edge->caller->global.inlined_to
/* ... and edges executed only conditionally ... */
- && edge->frequency < CGRAPH_FREQ_BASE
+ && edge->frequency () < CGRAPH_FREQ_BASE
/* ... consider case where callee is not inline but caller is ... */
&& ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
&& DECL_DECLARED_INLINE_P (caller->decl))
/* ... or when early optimizers decided to split and edge
frequency still indicates splitting is a win ... */
|| (callee->split_part && !caller->split_part
- && edge->frequency
+ && edge->frequency ()
< CGRAPH_FREQ_BASE
* PARAM_VALUE
(PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
@@ -1166,9 +1163,9 @@ edge_badness (struct cgraph_edge *edge, bool dump)
" overall growth %i (current) %i (original)"
" %i (compensated)\n",
badness.to_double (),
- (double)edge->frequency / CGRAPH_FREQ_BASE,
- edge->count.initialized_p () ? edge->count.to_gcov_type () : -1,
- caller->count.initialized_p () ? caller->count.to_gcov_type () : -1,
+ edge->sreal_frequency ().to_double (),
+ edge->count.ipa ().initialized_p () ? edge->count.ipa ().to_gcov_type () : -1,
+ caller->count.ipa ().initialized_p () ? caller->count.ipa ().to_gcov_type () : -1,
compute_uninlined_call_time (edge,
unspec_edge_time).to_double (),
compute_inlined_call_time (edge, edge_time).to_double (),
@@ -1430,8 +1427,8 @@ lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
{
/* When profile feedback is available, prioritize by expected number
of calls. */
- heap->insert (!(max_count > 0) || !e->count.initialized_p () ? -e->frequency
- : -(e->count.to_gcov_type ()
+ heap->insert (!(max_count > 0) || !e->count.ipa ().initialized_p () ? -e->frequency ()
+ : -(e->count.ipa ().to_gcov_type ()
/ ((max_count.to_gcov_type () + (1<<24) - 1)
/ (1<<24))),
e);
@@ -1533,11 +1530,10 @@ recursive_inlining (struct cgraph_edge *edge,
{
/* We need original clone to copy around. */
master_clone = node->create_clone (node->decl, node->count,
- CGRAPH_FREQ_BASE, false, vNULL,
- true, NULL, NULL);
+ false, vNULL, true, NULL, NULL);
for (e = master_clone->callees; e; e = e->next_callee)
if (!e->inline_failed)
- clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
+ clone_inlined_nodes (e, true, false, NULL);
curr->redirect_callee (master_clone);
reset_edge_growth_cache (curr);
}
@@ -1684,8 +1680,8 @@ resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
? node->global.inlined_to : node;
auto_bitmap updated_nodes;
- if (edge->count.initialized_p ())
- spec_rem += edge->count;
+ if (edge->count.ipa ().initialized_p ())
+ spec_rem += edge->count.ipa ();
edge->resolve_speculation ();
reset_edge_caches (where);
ipa_update_overall_fn_summary (where);
@@ -1790,7 +1786,7 @@ inline_small_functions (void)
}
for (edge = node->callers; edge; edge = edge->next_caller)
- max_count = max_count.max (edge->count);
+ max_count = max_count.max (edge->count.ipa ());
}
ipa_free_postorder_info ();
initialize_growth_caches ();
@@ -1874,35 +1870,40 @@ inline_small_functions (void)
continue;
#if CHECKING_P
- /* Be sure that caches are maintained consistent. */
- sreal cached_badness = edge_badness (edge, false);
-
- int old_size_est = estimate_edge_size (edge);
- sreal old_time_est = estimate_edge_time (edge);
- int old_hints_est = estimate_edge_hints (edge);
-
- reset_edge_growth_cache (edge);
- gcc_assert (old_size_est == estimate_edge_size (edge));
- gcc_assert (old_time_est == estimate_edge_time (edge));
- /* FIXME:
-
- gcc_assert (old_hints_est == estimate_edge_hints (edge));
-
- fails with profile feedback because some hints depends on
- maybe_hot_edge_p predicate and because callee gets inlined to other
- calls, the edge may become cold.
- This ought to be fixed by computing relative probabilities
- for given invocation but that will be better done once whole
- code is converted to sreals. Disable for now and revert to "wrong"
- value so enable/disable checking paths agree. */
- edge_growth_cache[edge->uid].hints = old_hints_est + 1;
-
- /* When updating the edge costs, we only decrease badness in the keys.
- Increases of badness are handled lazilly; when we see key with out
- of date value on it, we re-insert it now. */
- current_badness = edge_badness (edge, false);
- gcc_assert (cached_badness == current_badness);
- gcc_assert (current_badness >= badness);
+ /* Be sure that caches are maintained consistent.
+ This check is affected by scaling roundoff errors when compiling for
+ IPA this we skip it in that case. */
+ if (!edge->callee->count.ipa_p ())
+ {
+ sreal cached_badness = edge_badness (edge, false);
+
+ int old_size_est = estimate_edge_size (edge);
+ sreal old_time_est = estimate_edge_time (edge);
+ int old_hints_est = estimate_edge_hints (edge);
+
+ reset_edge_growth_cache (edge);
+ gcc_assert (old_size_est == estimate_edge_size (edge));
+ gcc_assert (old_time_est == estimate_edge_time (edge));
+ /* FIXME:
+
+ gcc_assert (old_hints_est == estimate_edge_hints (edge));
+
+ fails with profile feedback because some hints depends on
+ maybe_hot_edge_p predicate and because callee gets inlined to other
+ calls, the edge may become cold.
+ This ought to be fixed by computing relative probabilities
+ for given invocation but that will be better done once whole
+ code is converted to sreals. Disable for now and revert to "wrong"
+ value so enable/disable checking paths agree. */
+ edge_growth_cache[edge->uid].hints = old_hints_est + 1;
+
+ /* When updating the edge costs, we only decrease badness in the keys.
+ Increases of badness are handled lazilly; when we see key with out
+ of date value on it, we re-insert it now. */
+ current_badness = edge_badness (edge, false);
+ gcc_assert (cached_badness == current_badness);
+ gcc_assert (current_badness >= badness);
+ }
#else
current_badness = edge_badness (edge, false);
#endif
@@ -1945,11 +1946,11 @@ inline_small_functions (void)
? gimple_lineno ((const gimple *) edge->call_stmt)
: -1,
badness.to_double (),
- edge->frequency / (double)CGRAPH_FREQ_BASE);
- if (edge->count.initialized_p ())
+ edge->sreal_frequency ().to_double ());
+ if (edge->count.ipa ().initialized_p ())
{
fprintf (dump_file, " Called ");
- edge->count.dump (dump_file);
+ edge->count.ipa ().dump (dump_file);
fprintf (dump_file, "times\n");
}
if (dump_flags & TDF_DETAILS)
@@ -2255,8 +2256,8 @@ dump_overall_stats (void)
{
sreal time = ipa_fn_summaries->get (node)->time;
sum += time;
- if (node->count.initialized_p ())
- sum_weighted += time * node->count.to_gcov_type ();
+ if (node->count.ipa ().initialized_p ())
+ sum_weighted += time * node->count.ipa ().to_gcov_type ();
}
fprintf (dump_file, "Overall time estimate: "
"%f weighted by profile: "
@@ -2286,57 +2287,57 @@ dump_inline_stats (void)
{
if (e->inline_failed)
{
- if (e->count.initialized_p ())
- reason[(int) e->inline_failed][0] += e->count.to_gcov_type ();
- reason[(int) e->inline_failed][1] += e->frequency;
+ if (e->count.ipa ().initialized_p ())
+ reason[(int) e->inline_failed][0] += e->count.ipa ().to_gcov_type ();
+ reason[(int) e->inline_failed][1] += e->frequency ();
reason[(int) e->inline_failed][2] ++;
if (DECL_VIRTUAL_P (e->callee->decl)
- && e->count.initialized_p ())
+ && e->count.ipa ().initialized_p ())
{
if (e->indirect_inlining_edge)
- noninlined_virt_indir_cnt += e->count.to_gcov_type ();
+ noninlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
else
- noninlined_virt_cnt += e->count.to_gcov_type ();
+ noninlined_virt_cnt += e->count.ipa ().to_gcov_type ();
}
- else if (e->count.initialized_p ())
+ else if (e->count.ipa ().initialized_p ())
{
if (e->indirect_inlining_edge)
- noninlined_indir_cnt += e->count.to_gcov_type ();
+ noninlined_indir_cnt += e->count.ipa ().to_gcov_type ();
else
- noninlined_cnt += e->count.to_gcov_type ();
+ noninlined_cnt += e->count.ipa ().to_gcov_type ();
}
}
- else if (e->count.initialized_p ())
+ else if (e->count.ipa ().initialized_p ())
{
if (e->speculative)
{
if (DECL_VIRTUAL_P (e->callee->decl))
- inlined_speculative_ply += e->count.to_gcov_type ();
+ inlined_speculative_ply += e->count.ipa ().to_gcov_type ();
else
- inlined_speculative += e->count.to_gcov_type ();
+ inlined_speculative += e->count.ipa ().to_gcov_type ();
}
else if (DECL_VIRTUAL_P (e->callee->decl))
{
if (e->indirect_inlining_edge)
- inlined_virt_indir_cnt += e->count.to_gcov_type ();
+ inlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
else
- inlined_virt_cnt += e->count.to_gcov_type ();
+ inlined_virt_cnt += e->count.ipa ().to_gcov_type ();
}
else
{
if (e->indirect_inlining_edge)
- inlined_indir_cnt += e->count.to_gcov_type ();
+ inlined_indir_cnt += e->count.ipa ().to_gcov_type ();
else
- inlined_cnt += e->count.to_gcov_type ();
+ inlined_cnt += e->count.ipa ().to_gcov_type ();
}
}
}
for (e = node->indirect_calls; e; e = e->next_callee)
if (e->indirect_info->polymorphic
- & e->count.initialized_p ())
- indirect_poly_cnt += e->count.to_gcov_type ();
- else if (e->count.initialized_p ())
- indirect_cnt += e->count.to_gcov_type ();
+ & e->count.ipa ().initialized_p ())
+ indirect_poly_cnt += e->count.ipa ().to_gcov_type ();
+ else if (e->count.ipa ().initialized_p ())
+ indirect_cnt += e->count.ipa ().to_gcov_type ();
}
if (max_count.initialized_p ())
{
@@ -2383,7 +2384,6 @@ ipa_inline (void)
int cold;
bool remove_functions = false;
- cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
percent_rec = (sreal) 1 / (sreal) 100;
order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
@@ -2488,8 +2488,8 @@ ipa_inline (void)
next = edge->next_callee;
if (edge->speculative && !speculation_useful_p (edge, false))
{
- if (edge->count.initialized_p ())
- spec_rem += edge->count;
+ if (edge->count.ipa ().initialized_p ())
+ spec_rem += edge->count.ipa ();
edge->resolve_speculation ();
update = true;
remove_functions = true;
diff --git a/gcc/ipa-inline.h b/gcc/ipa-inline.h
index 4b9a1c2191a..ebf5d9718fb 100644
--- a/gcc/ipa-inline.h
+++ b/gcc/ipa-inline.h
@@ -59,8 +59,7 @@ bool inline_account_function_p (struct cgraph_node *node);
bool inline_call (struct cgraph_edge *, bool, vec<cgraph_edge *> *, int *, bool,
bool *callee_removed = NULL);
unsigned int inline_transform (struct cgraph_node *);
-void clone_inlined_nodes (struct cgraph_edge *e, bool, bool, int *,
- int freq_scale);
+void clone_inlined_nodes (struct cgraph_edge *e, bool, bool, int *);
extern int ncalls_inlined;
extern int nfunctions_inlined;
diff --git a/gcc/ipa-param-manipulation.c b/gcc/ipa-param-manipulation.c
new file mode 100644
index 00000000000..4f8f805b18e
--- /dev/null
+++ b/gcc/ipa-param-manipulation.c
@@ -0,0 +1,766 @@
+/* Manipulation of formal and actual parameters of functions and function
+ calls.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "tree.h"
+#include "gimple.h"
+#include "ssa.h"
+#include "cgraph.h"
+#include "fold-const.h"
+#include "stor-layout.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "tree-dfa.h"
+#include "ipa-param-manipulation.h"
+#include "print-tree.h"
+#include "gimple-pretty-print.h"
+#include "builtins.h"
+
+/* Return a heap allocated vector containing formal parameters of FNDECL. */
+
+vec<tree>
+ipa_get_vector_of_formal_parms (tree fndecl)
+{
+ vec<tree> args;
+ int count;
+ tree parm;
+
+ gcc_assert (!flag_wpa);
+ count = 0;
+ for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
+ count++;
+
+ args.create (count);
+ for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
+ args.quick_push (parm);
+
+ return args;
+}
+
+/* Return a heap allocated vector containing types of formal parameters of
+ function type FNTYPE. */
+
+vec<tree>
+ipa_get_vector_of_formal_parm_types (tree fntype)
+{
+ vec<tree> types;
+ int count = 0;
+ tree t;
+
+ for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
+ count++;
+
+ types.create (count);
+ for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
+ types.quick_push (TREE_VALUE (t));
+
+ return types;
+}
+
+/* Modify the function declaration FNDECL and its type according to the plan in
+ ADJUSTMENTS. It also sets base fields of individual adjustments structures
+ to reflect the actual parameters being modified which are determined by the
+ base_index field. */
+
+void
+ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments)
+{
+ vec<tree> oparms = ipa_get_vector_of_formal_parms (fndecl);
+ tree orig_type = TREE_TYPE (fndecl);
+ tree old_arg_types = TYPE_ARG_TYPES (orig_type);
+
+ /* The following test is an ugly hack, some functions simply don't have any
+ arguments in their type. This is probably a bug but well... */
+ bool care_for_types = (old_arg_types != NULL_TREE);
+ bool last_parm_void;
+ vec<tree> otypes;
+ if (care_for_types)
+ {
+ last_parm_void = (TREE_VALUE (tree_last (old_arg_types))
+ == void_type_node);
+ otypes = ipa_get_vector_of_formal_parm_types (orig_type);
+ if (last_parm_void)
+ gcc_assert (oparms.length () + 1 == otypes.length ());
+ else
+ gcc_assert (oparms.length () == otypes.length ());
+ }
+ else
+ {
+ last_parm_void = false;
+ otypes.create (0);
+ }
+
+ int len = adjustments.length ();
+ tree *link = &DECL_ARGUMENTS (fndecl);
+ tree new_arg_types = NULL;
+ for (int i = 0; i < len; i++)
+ {
+ struct ipa_parm_adjustment *adj;
+ gcc_assert (link);
+
+ adj = &adjustments[i];
+ tree parm;
+ if (adj->op == IPA_PARM_OP_NEW)
+ parm = NULL;
+ else
+ parm = oparms[adj->base_index];
+ adj->base = parm;
+
+ if (adj->op == IPA_PARM_OP_COPY)
+ {
+ if (care_for_types)
+ new_arg_types = tree_cons (NULL_TREE, otypes[adj->base_index],
+ new_arg_types);
+ *link = parm;
+ link = &DECL_CHAIN (parm);
+ }
+ else if (adj->op != IPA_PARM_OP_REMOVE)
+ {
+ tree new_parm;
+ tree ptype;
+
+ if (adj->by_ref)
+ ptype = build_pointer_type (adj->type);
+ else
+ {
+ ptype = adj->type;
+ if (is_gimple_reg_type (ptype)
+ && TYPE_MODE (ptype) != BLKmode)
+ {
+ unsigned malign = GET_MODE_ALIGNMENT (TYPE_MODE (ptype));
+ if (TYPE_ALIGN (ptype) != malign)
+ ptype = build_aligned_type (ptype, malign);
+ }
+ }
+
+ if (care_for_types)
+ new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
+
+ new_parm = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL_TREE,
+ ptype);
+ const char *prefix = adj->arg_prefix ? adj->arg_prefix : "SYNTH";
+ DECL_NAME (new_parm) = create_tmp_var_name (prefix);
+ DECL_ARTIFICIAL (new_parm) = 1;
+ DECL_ARG_TYPE (new_parm) = ptype;
+ DECL_CONTEXT (new_parm) = fndecl;
+ TREE_USED (new_parm) = 1;
+ DECL_IGNORED_P (new_parm) = 1;
+ layout_decl (new_parm, 0);
+
+ if (adj->op == IPA_PARM_OP_NEW)
+ adj->base = NULL;
+ else
+ adj->base = parm;
+ adj->new_decl = new_parm;
+
+ *link = new_parm;
+ link = &DECL_CHAIN (new_parm);
+ }
+ }
+
+ *link = NULL_TREE;
+
+ tree new_reversed = NULL;
+ if (care_for_types)
+ {
+ new_reversed = nreverse (new_arg_types);
+ if (last_parm_void)
+ {
+ if (new_reversed)
+ TREE_CHAIN (new_arg_types) = void_list_node;
+ else
+ new_reversed = void_list_node;
+ }
+ }
+
+ /* Use copy_node to preserve as much as possible from original type
+ (debug info, attribute lists etc.)
+ Exception is METHOD_TYPEs must have THIS argument.
+ When we are asked to remove it, we need to build new FUNCTION_TYPE
+ instead. */
+ tree new_type = NULL;
+ if (TREE_CODE (orig_type) != METHOD_TYPE
+ || (adjustments[0].op == IPA_PARM_OP_COPY
+ && adjustments[0].base_index == 0))
+ {
+ new_type = build_distinct_type_copy (orig_type);
+ TYPE_ARG_TYPES (new_type) = new_reversed;
+ }
+ else
+ {
+ new_type
+ = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
+ new_reversed));
+ TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
+ DECL_VINDEX (fndecl) = NULL_TREE;
+ }
+
+ /* When signature changes, we need to clear builtin info. */
+ if (DECL_BUILT_IN (fndecl))
+ {
+ DECL_BUILT_IN_CLASS (fndecl) = NOT_BUILT_IN;
+ DECL_FUNCTION_CODE (fndecl) = (enum built_in_function) 0;
+ }
+
+ TREE_TYPE (fndecl) = new_type;
+ DECL_VIRTUAL_P (fndecl) = 0;
+ DECL_LANG_SPECIFIC (fndecl) = NULL;
+ otypes.release ();
+ oparms.release ();
+}
+
+/* Modify actual arguments of a function call CS as indicated in ADJUSTMENTS.
+ If this is a directly recursive call, CS must be NULL. Otherwise it must
+ contain the corresponding call graph edge. */
+
+void
+ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt,
+ ipa_parm_adjustment_vec adjustments)
+{
+ struct cgraph_node *current_node = cgraph_node::get (current_function_decl);
+ vec<tree> vargs;
+ vec<tree, va_gc> **debug_args = NULL;
+ gcall *new_stmt;
+ gimple_stmt_iterator gsi, prev_gsi;
+ tree callee_decl;
+ int i, len;
+
+ len = adjustments.length ();
+ vargs.create (len);
+ callee_decl = !cs ? gimple_call_fndecl (stmt) : cs->callee->decl;
+ current_node->remove_stmt_references (stmt);
+
+ gsi = gsi_for_stmt (stmt);
+ prev_gsi = gsi;
+ gsi_prev (&prev_gsi);
+ for (i = 0; i < len; i++)
+ {
+ struct ipa_parm_adjustment *adj;
+
+ adj = &adjustments[i];
+
+ if (adj->op == IPA_PARM_OP_COPY)
+ {
+ tree arg = gimple_call_arg (stmt, adj->base_index);
+
+ vargs.quick_push (arg);
+ }
+ else if (adj->op != IPA_PARM_OP_REMOVE)
+ {
+ tree expr, base, off;
+ location_t loc;
+ unsigned int deref_align = 0;
+ bool deref_base = false;
+
+ /* We create a new parameter out of the value of the old one, we can
+ do the following kind of transformations:
+
+ - A scalar passed by reference is converted to a scalar passed by
+ value. (adj->by_ref is false and the type of the original
+ actual argument is a pointer to a scalar).
+
+ - A part of an aggregate is passed instead of the whole aggregate.
+ The part can be passed either by value or by reference, this is
+ determined by value of adj->by_ref. Moreover, the code below
+ handles both situations when the original aggregate is passed by
+ value (its type is not a pointer) and when it is passed by
+ reference (it is a pointer to an aggregate).
+
+ When the new argument is passed by reference (adj->by_ref is true)
+ it must be a part of an aggregate and therefore we form it by
+ simply taking the address of a reference inside the original
+ aggregate. */
+
+ poly_int64 byte_offset = exact_div (adj->offset, BITS_PER_UNIT);
+ base = gimple_call_arg (stmt, adj->base_index);
+ loc = DECL_P (base) ? DECL_SOURCE_LOCATION (base)
+ : EXPR_LOCATION (base);
+
+ if (TREE_CODE (base) != ADDR_EXPR
+ && POINTER_TYPE_P (TREE_TYPE (base)))
+ off = build_int_cst (adj->alias_ptr_type, byte_offset);
+ else
+ {
+ poly_int64 base_offset;
+ tree prev_base;
+ bool addrof;
+
+ if (TREE_CODE (base) == ADDR_EXPR)
+ {
+ base = TREE_OPERAND (base, 0);
+ addrof = true;
+ }
+ else
+ addrof = false;
+ prev_base = base;
+ base = get_addr_base_and_unit_offset (base, &base_offset);
+ /* Aggregate arguments can have non-invariant addresses. */
+ if (!base)
+ {
+ base = build_fold_addr_expr (prev_base);
+ off = build_int_cst (adj->alias_ptr_type, byte_offset);
+ }
+ else if (TREE_CODE (base) == MEM_REF)
+ {
+ if (!addrof)
+ {
+ deref_base = true;
+ deref_align = TYPE_ALIGN (TREE_TYPE (base));
+ }
+ off = build_int_cst (adj->alias_ptr_type,
+ base_offset + byte_offset);
+ off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1),
+ off);
+ base = TREE_OPERAND (base, 0);
+ }
+ else
+ {
+ off = build_int_cst (adj->alias_ptr_type,
+ base_offset + byte_offset);
+ base = build_fold_addr_expr (base);
+ }
+ }
+
+ if (!adj->by_ref)
+ {
+ tree type = adj->type;
+ unsigned int align;
+ unsigned HOST_WIDE_INT misalign;
+
+ if (deref_base)
+ {
+ align = deref_align;
+ misalign = 0;
+ }
+ else
+ {
+ get_pointer_alignment_1 (base, &align, &misalign);
+ if (TYPE_ALIGN (type) > align)
+ align = TYPE_ALIGN (type);
+ }
+ misalign += (offset_int::from (wi::to_wide (off),
+ SIGNED).to_short_addr ()
+ * BITS_PER_UNIT);
+ misalign = misalign & (align - 1);
+ if (misalign != 0)
+ align = least_bit_hwi (misalign);
+ if (align < TYPE_ALIGN (type))
+ type = build_aligned_type (type, align);
+ base = force_gimple_operand_gsi (&gsi, base,
+ true, NULL, true, GSI_SAME_STMT);
+ expr = fold_build2_loc (loc, MEM_REF, type, base, off);
+ REF_REVERSE_STORAGE_ORDER (expr) = adj->reverse;
+ /* If expr is not a valid gimple call argument emit
+ a load into a temporary. */
+ if (is_gimple_reg_type (TREE_TYPE (expr)))
+ {
+ gimple *tem = gimple_build_assign (NULL_TREE, expr);
+ if (gimple_in_ssa_p (cfun))
+ {
+ gimple_set_vuse (tem, gimple_vuse (stmt));
+ expr = make_ssa_name (TREE_TYPE (expr), tem);
+ }
+ else
+ expr = create_tmp_reg (TREE_TYPE (expr));
+ gimple_assign_set_lhs (tem, expr);
+ gsi_insert_before (&gsi, tem, GSI_SAME_STMT);
+ }
+ }
+ else
+ {
+ expr = fold_build2_loc (loc, MEM_REF, adj->type, base, off);
+ REF_REVERSE_STORAGE_ORDER (expr) = adj->reverse;
+ expr = build_fold_addr_expr (expr);
+ expr = force_gimple_operand_gsi (&gsi, expr,
+ true, NULL, true, GSI_SAME_STMT);
+ }
+ vargs.quick_push (expr);
+ }
+ if (adj->op != IPA_PARM_OP_COPY && MAY_HAVE_DEBUG_STMTS)
+ {
+ unsigned int ix;
+ tree ddecl = NULL_TREE, origin = DECL_ORIGIN (adj->base), arg;
+ gimple *def_temp;
+
+ arg = gimple_call_arg (stmt, adj->base_index);
+ if (!useless_type_conversion_p (TREE_TYPE (origin), TREE_TYPE (arg)))
+ {
+ if (!fold_convertible_p (TREE_TYPE (origin), arg))
+ continue;
+ arg = fold_convert_loc (gimple_location (stmt),
+ TREE_TYPE (origin), arg);
+ }
+ if (debug_args == NULL)
+ debug_args = decl_debug_args_insert (callee_decl);
+ for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl); ix += 2)
+ if (ddecl == origin)
+ {
+ ddecl = (**debug_args)[ix + 1];
+ break;
+ }
+ if (ddecl == NULL)
+ {
+ ddecl = make_node (DEBUG_EXPR_DECL);
+ DECL_ARTIFICIAL (ddecl) = 1;
+ TREE_TYPE (ddecl) = TREE_TYPE (origin);
+ SET_DECL_MODE (ddecl, DECL_MODE (origin));
+
+ vec_safe_push (*debug_args, origin);
+ vec_safe_push (*debug_args, ddecl);
+ }
+ def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg), stmt);
+ gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
+ }
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "replacing stmt:");
+ print_gimple_stmt (dump_file, gsi_stmt (gsi), 0);
+ }
+
+ new_stmt = gimple_build_call_vec (callee_decl, vargs);
+ vargs.release ();
+ if (gimple_call_lhs (stmt))
+ gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+
+ gimple_set_block (new_stmt, gimple_block (stmt));
+ if (gimple_has_location (stmt))
+ gimple_set_location (new_stmt, gimple_location (stmt));
+ gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
+ gimple_call_copy_flags (new_stmt, stmt);
+ if (gimple_in_ssa_p (cfun))
+ {
+ gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+ if (gimple_vdef (stmt))
+ {
+ gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+ SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
+ }
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "with stmt:");
+ print_gimple_stmt (dump_file, new_stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+ gsi_replace (&gsi, new_stmt, true);
+ if (cs)
+ cs->set_call_stmt (new_stmt);
+ do
+ {
+ current_node->record_stmt_references (gsi_stmt (gsi));
+ gsi_prev (&gsi);
+ }
+ while (gsi_stmt (gsi) != gsi_stmt (prev_gsi));
+}
+
+/* Return true iff BASE_INDEX is in ADJUSTMENTS more than once. */
+
+static bool
+index_in_adjustments_multiple_times_p (int base_index,
+ ipa_parm_adjustment_vec adjustments)
+{
+ int i, len = adjustments.length ();
+ bool one = false;
+
+ for (i = 0; i < len; i++)
+ {
+ struct ipa_parm_adjustment *adj;
+ adj = &adjustments[i];
+
+ if (adj->base_index == base_index)
+ {
+ if (one)
+ return true;
+ else
+ one = true;
+ }
+ }
+ return false;
+}
+
+/* Return adjustments that should have the same effect on function parameters
+ and call arguments as if they were first changed according to adjustments in
+ INNER and then by adjustments in OUTER. */
+
+ipa_parm_adjustment_vec
+ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
+ ipa_parm_adjustment_vec outer)
+{
+ int i, outlen = outer.length ();
+ int inlen = inner.length ();
+ int removals = 0;
+ ipa_parm_adjustment_vec adjustments, tmp;
+
+ tmp.create (inlen);
+ for (i = 0; i < inlen; i++)
+ {
+ struct ipa_parm_adjustment *n;
+ n = &inner[i];
+
+ if (n->op == IPA_PARM_OP_REMOVE)
+ removals++;
+ else
+ {
+ /* FIXME: Handling of new arguments are not implemented yet. */
+ gcc_assert (n->op != IPA_PARM_OP_NEW);
+ tmp.quick_push (*n);
+ }
+ }
+
+ adjustments.create (outlen + removals);
+ for (i = 0; i < outlen; i++)
+ {
+ struct ipa_parm_adjustment r;
+ struct ipa_parm_adjustment *out = &outer[i];
+ struct ipa_parm_adjustment *in = &tmp[out->base_index];
+
+ memset (&r, 0, sizeof (r));
+ gcc_assert (in->op != IPA_PARM_OP_REMOVE);
+ if (out->op == IPA_PARM_OP_REMOVE)
+ {
+ if (!index_in_adjustments_multiple_times_p (in->base_index, tmp))
+ {
+ r.op = IPA_PARM_OP_REMOVE;
+ adjustments.quick_push (r);
+ }
+ continue;
+ }
+ else
+ {
+ /* FIXME: Handling of new arguments are not implemented yet. */
+ gcc_assert (out->op != IPA_PARM_OP_NEW);
+ }
+
+ r.base_index = in->base_index;
+ r.type = out->type;
+
+ /* FIXME: Create nonlocal value too. */
+
+ if (in->op == IPA_PARM_OP_COPY && out->op == IPA_PARM_OP_COPY)
+ r.op = IPA_PARM_OP_COPY;
+ else if (in->op == IPA_PARM_OP_COPY)
+ r.offset = out->offset;
+ else if (out->op == IPA_PARM_OP_COPY)
+ r.offset = in->offset;
+ else
+ r.offset = in->offset + out->offset;
+ adjustments.quick_push (r);
+ }
+
+ for (i = 0; i < inlen; i++)
+ {
+ struct ipa_parm_adjustment *n = &inner[i];
+
+ if (n->op == IPA_PARM_OP_REMOVE)
+ adjustments.quick_push (*n);
+ }
+
+ tmp.release ();
+ return adjustments;
+}
+
+/* If T is an SSA_NAME, return NULL if it is not a default def or
+ return its base variable if it is. If IGNORE_DEFAULT_DEF is true,
+ the base variable is always returned, regardless if it is a default
+ def. Return T if it is not an SSA_NAME. */
+
+static tree
+get_ssa_base_param (tree t, bool ignore_default_def)
+{
+ if (TREE_CODE (t) == SSA_NAME)
+ {
+ if (ignore_default_def || SSA_NAME_IS_DEFAULT_DEF (t))
+ return SSA_NAME_VAR (t);
+ else
+ return NULL_TREE;
+ }
+ return t;
+}
+
+/* Given an expression, return an adjustment entry specifying the
+ transformation to be done on EXPR. If no suitable adjustment entry
+ was found, returns NULL.
+
+ If IGNORE_DEFAULT_DEF is set, consider SSA_NAMEs which are not a
+ default def, otherwise bail on them.
+
+ If CONVERT is non-NULL, this function will set *CONVERT if the
+ expression provided is a component reference. ADJUSTMENTS is the
+ adjustments vector. */
+
+ipa_parm_adjustment *
+ipa_get_adjustment_candidate (tree **expr, bool *convert,
+ ipa_parm_adjustment_vec adjustments,
+ bool ignore_default_def)
+{
+ if (TREE_CODE (**expr) == BIT_FIELD_REF
+ || TREE_CODE (**expr) == IMAGPART_EXPR
+ || TREE_CODE (**expr) == REALPART_EXPR)
+ {
+ *expr = &TREE_OPERAND (**expr, 0);
+ if (convert)
+ *convert = true;
+ }
+
+ poly_int64 offset, size, max_size;
+ bool reverse;
+ tree base
+ = get_ref_base_and_extent (**expr, &offset, &size, &max_size, &reverse);
+ if (!base || !known_size_p (size) || !known_size_p (max_size))
+ return NULL;
+
+ if (TREE_CODE (base) == MEM_REF)
+ {
+ offset += mem_ref_offset (base).force_shwi () * BITS_PER_UNIT;
+ base = TREE_OPERAND (base, 0);
+ }
+
+ base = get_ssa_base_param (base, ignore_default_def);
+ if (!base || TREE_CODE (base) != PARM_DECL)
+ return NULL;
+
+ struct ipa_parm_adjustment *cand = NULL;
+ unsigned int len = adjustments.length ();
+ for (unsigned i = 0; i < len; i++)
+ {
+ struct ipa_parm_adjustment *adj = &adjustments[i];
+
+ if (adj->base == base
+ && (must_eq (adj->offset, offset) || adj->op == IPA_PARM_OP_REMOVE))
+ {
+ cand = adj;
+ break;
+ }
+ }
+
+ if (!cand || cand->op == IPA_PARM_OP_COPY || cand->op == IPA_PARM_OP_REMOVE)
+ return NULL;
+ return cand;
+}
+
+/* If the expression *EXPR should be replaced by a reduction of a parameter, do
+ so. ADJUSTMENTS is a pointer to a vector of adjustments. CONVERT
+ specifies whether the function should care about type incompatibility the
+ current and new expressions. If it is false, the function will leave
+ incompatibility issues to the caller. Return true iff the expression
+ was modified. */
+
+bool
+ipa_modify_expr (tree *expr, bool convert,
+ ipa_parm_adjustment_vec adjustments)
+{
+ struct ipa_parm_adjustment *cand
+ = ipa_get_adjustment_candidate (&expr, &convert, adjustments, false);
+ if (!cand)
+ return false;
+
+ tree src;
+ if (cand->by_ref)
+ {
+ src = build_simple_mem_ref (cand->new_decl);
+ REF_REVERSE_STORAGE_ORDER (src) = cand->reverse;
+ }
+ else
+ src = cand->new_decl;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "About to replace expr ");
+ print_generic_expr (dump_file, *expr);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, src);
+ fprintf (dump_file, "\n");
+ }
+
+ if (convert && !useless_type_conversion_p (TREE_TYPE (*expr), cand->type))
+ {
+ tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*expr), src);
+ *expr = vce;
+ }
+ else
+ *expr = src;
+ return true;
+}
+
+/* Dump the adjustments in the vector ADJUSTMENTS to dump_file in a human
+ friendly way, assuming they are meant to be applied to FNDECL. */
+
+void
+ipa_dump_param_adjustments (FILE *file, ipa_parm_adjustment_vec adjustments,
+ tree fndecl)
+{
+ int i, len = adjustments.length ();
+ bool first = true;
+ vec<tree> parms = ipa_get_vector_of_formal_parms (fndecl);
+
+ fprintf (file, "IPA param adjustments: ");
+ for (i = 0; i < len; i++)
+ {
+ struct ipa_parm_adjustment *adj;
+ adj = &adjustments[i];
+
+ if (!first)
+ fprintf (file, " ");
+ else
+ first = false;
+
+ fprintf (file, "%i. base_index: %i - ", i, adj->base_index);
+ print_generic_expr (file, parms[adj->base_index]);
+ if (adj->base)
+ {
+ fprintf (file, ", base: ");
+ print_generic_expr (file, adj->base);
+ }
+ if (adj->new_decl)
+ {
+ fprintf (file, ", new_decl: ");
+ print_generic_expr (file, adj->new_decl);
+ }
+ if (adj->new_ssa_base)
+ {
+ fprintf (file, ", new_ssa_base: ");
+ print_generic_expr (file, adj->new_ssa_base);
+ }
+
+ if (adj->op == IPA_PARM_OP_COPY)
+ fprintf (file, ", copy_param");
+ else if (adj->op == IPA_PARM_OP_REMOVE)
+ fprintf (file, ", remove_param");
+ else
+ {
+ fprintf (file, ", offset ");
+ print_dec (adj->offset, file);
+ }
+ if (adj->by_ref)
+ fprintf (file, ", by_ref");
+ print_node_brief (file, ", type: ", adj->type, 0);
+ fprintf (file, "\n");
+ }
+ parms.release ();
+}
+
diff --git a/gcc/ipa-param-manipulation.h b/gcc/ipa-param-manipulation.h
new file mode 100644
index 00000000000..7bf942fe2c3
--- /dev/null
+++ b/gcc/ipa-param-manipulation.h
@@ -0,0 +1,120 @@
+/* Manipulation of formal and actual parameters of functions and function
+ calls.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef IPA_PARAM_MANIPULATION_H
+#define IPA_PARAM_MANIPULATION_H
+
+/* Operation to be performed for the parameter in ipa_parm_adjustment
+ below. */
+enum ipa_parm_op {
+ IPA_PARM_OP_NONE,
+
+ /* This describes a brand new parameter.
+
+ The field `type' should be set to the new type, `arg_prefix'
+ should be set to the string prefix for the new DECL_NAME, and
+ `new_decl' will ultimately hold the newly created argument. */
+ IPA_PARM_OP_NEW,
+
+ /* This new parameter is an unmodified parameter at index base_index. */
+ IPA_PARM_OP_COPY,
+
+ /* This adjustment describes a parameter that is about to be removed
+ completely. Most users will probably need to book keep those so that they
+ don't leave behinfd any non default def ssa names belonging to them. */
+ IPA_PARM_OP_REMOVE
+};
+
+/* Structure to describe transformations of formal parameters and actual
+ arguments. Each instance describes one new parameter and they are meant to
+ be stored in a vector. Additionally, most users will probably want to store
+ adjustments about parameters that are being removed altogether so that SSA
+ names belonging to them can be replaced by SSA names of an artificial
+ variable. */
+struct ipa_parm_adjustment
+{
+ /* The original PARM_DECL itself, helpful for processing of the body of the
+ function itself. Intended for traversing function bodies.
+ ipa_modify_formal_parameters, ipa_modify_call_arguments and
+ ipa_combine_adjustments ignore this and use base_index.
+ ipa_modify_formal_parameters actually sets this. */
+ tree base;
+
+ /* Type of the new parameter. However, if by_ref is true, the real type will
+ be a pointer to this type. */
+ tree type;
+
+ /* Alias refrerence type to be used in MEM_REFs when adjusting caller
+ arguments. */
+ tree alias_ptr_type;
+
+ /* The new declaration when creating/replacing a parameter. Created
+ by ipa_modify_formal_parameters, useful for functions modifying
+ the body accordingly. For brand new arguments, this is the newly
+ created argument. */
+ tree new_decl;
+
+ /* New declaration of a substitute variable that we may use to replace all
+ non-default-def ssa names when a parm decl is going away. */
+ tree new_ssa_base;
+
+ /* If non-NULL and the original parameter is to be removed (copy_param below
+ is NULL), this is going to be its nonlocalized vars value. */
+ tree nonlocal_value;
+
+ /* This holds the prefix to be used for the new DECL_NAME. */
+ const char *arg_prefix;
+
+ /* Offset into the original parameter (for the cases when the new parameter
+ is a component of an original one). */
+ poly_int64_pod offset;
+
+ /* Zero based index of the original parameter this one is based on. */
+ int base_index;
+
+ /* Whether this parameter is a new parameter, a copy of an old one,
+ or one about to be removed. */
+ enum ipa_parm_op op;
+
+ /* Storage order of the original parameter (for the cases when the new
+ parameter is a component of an original one). */
+ unsigned reverse : 1;
+
+ /* The parameter is to be passed by reference. */
+ unsigned by_ref : 1;
+};
+
+typedef vec<ipa_parm_adjustment> ipa_parm_adjustment_vec;
+
+vec<tree> ipa_get_vector_of_formal_parms (tree fndecl);
+vec<tree> ipa_get_vector_of_formal_parm_types (tree fntype);
+void ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec);
+void ipa_modify_call_arguments (struct cgraph_edge *, gcall *,
+ ipa_parm_adjustment_vec);
+ipa_parm_adjustment_vec ipa_combine_adjustments (ipa_parm_adjustment_vec,
+ ipa_parm_adjustment_vec);
+void ipa_dump_param_adjustments (FILE *, ipa_parm_adjustment_vec, tree);
+
+bool ipa_modify_expr (tree *, bool, ipa_parm_adjustment_vec);
+ipa_parm_adjustment *ipa_get_adjustment_candidate (tree **, bool *,
+ ipa_parm_adjustment_vec,
+ bool);
+
+#endif /* IPA_PARAM_MANIPULATION_H */
diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index 8eb03dd7c24..cdcd0505f03 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -340,7 +340,7 @@ ipa_propagate_frequency_1 (struct cgraph_node *node, void *data)
&& edge->caller->global.inlined_to->frequency
!= NODE_FREQUENCY_UNLIKELY_EXECUTED)))
d->maybe_unlikely_executed = false;
- if (!edge->frequency)
+ if (!edge->frequency ())
continue;
switch (edge->caller->frequency)
{
@@ -431,11 +431,11 @@ ipa_propagate_frequency (struct cgraph_node *node)
}
/* With profile we can decide on hot/normal based on count. */
- if (node->count.initialized_p ())
+ if (node->count. ipa().initialized_p ())
{
bool hot = false;
- if (!(node->count == profile_count::zero ())
- && node->count >= get_hot_bb_threshold ())
+ if (!(node->count. ipa() == profile_count::zero ())
+ && node->count. ipa() >= get_hot_bb_threshold ())
hot = true;
if (!hot)
hot |= contains_hot_call_p (node);
@@ -667,9 +667,7 @@ ipa_profile (void)
e->make_speculative
(n2,
e->count.apply_probability
- (e->indirect_info->common_target_probability),
- apply_scale (e->frequency,
- e->indirect_info->common_target_probability));
+ (e->indirect_info->common_target_probability));
update = true;
}
}
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index c535c6b3924..a3394709ee3 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -2973,7 +2973,7 @@ ipa_make_edge_direct_to_target (struct cgraph_edge *ie, tree target,
}
/* make_speculative will update ie's cost to direct call cost. */
ie = ie->make_speculative
- (callee, ie->count.apply_scale (8, 10), ie->frequency * 8 / 10);
+ (callee, ie->count.apply_scale (8, 10));
}
return ie;
@@ -4041,730 +4041,6 @@ ipa_print_all_params (FILE * f)
ipa_print_node_params (f, node);
}
-/* Return a heap allocated vector containing formal parameters of FNDECL. */
-
-vec<tree>
-ipa_get_vector_of_formal_parms (tree fndecl)
-{
- vec<tree> args;
- int count;
- tree parm;
-
- gcc_assert (!flag_wpa);
- count = count_formal_params (fndecl);
- args.create (count);
- for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
- args.quick_push (parm);
-
- return args;
-}
-
-/* Return a heap allocated vector containing types of formal parameters of
- function type FNTYPE. */
-
-vec<tree>
-ipa_get_vector_of_formal_parm_types (tree fntype)
-{
- vec<tree> types;
- int count = 0;
- tree t;
-
- for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
- count++;
-
- types.create (count);
- for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
- types.quick_push (TREE_VALUE (t));
-
- return types;
-}
-
-/* Modify the function declaration FNDECL and its type according to the plan in
- ADJUSTMENTS. It also sets base fields of individual adjustments structures
- to reflect the actual parameters being modified which are determined by the
- base_index field. */
-
-void
-ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments)
-{
- vec<tree> oparms = ipa_get_vector_of_formal_parms (fndecl);
- tree orig_type = TREE_TYPE (fndecl);
- tree old_arg_types = TYPE_ARG_TYPES (orig_type);
-
- /* The following test is an ugly hack, some functions simply don't have any
- arguments in their type. This is probably a bug but well... */
- bool care_for_types = (old_arg_types != NULL_TREE);
- bool last_parm_void;
- vec<tree> otypes;
- if (care_for_types)
- {
- last_parm_void = (TREE_VALUE (tree_last (old_arg_types))
- == void_type_node);
- otypes = ipa_get_vector_of_formal_parm_types (orig_type);
- if (last_parm_void)
- gcc_assert (oparms.length () + 1 == otypes.length ());
- else
- gcc_assert (oparms.length () == otypes.length ());
- }
- else
- {
- last_parm_void = false;
- otypes.create (0);
- }
-
- int len = adjustments.length ();
- tree *link = &DECL_ARGUMENTS (fndecl);
- tree new_arg_types = NULL;
- for (int i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- gcc_assert (link);
-
- adj = &adjustments[i];
- tree parm;
- if (adj->op == IPA_PARM_OP_NEW)
- parm = NULL;
- else
- parm = oparms[adj->base_index];
- adj->base = parm;
-
- if (adj->op == IPA_PARM_OP_COPY)
- {
- if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, otypes[adj->base_index],
- new_arg_types);
- *link = parm;
- link = &DECL_CHAIN (parm);
- }
- else if (adj->op != IPA_PARM_OP_REMOVE)
- {
- tree new_parm;
- tree ptype;
-
- if (adj->by_ref)
- ptype = build_pointer_type (adj->type);
- else
- {
- ptype = adj->type;
- if (is_gimple_reg_type (ptype)
- && TYPE_MODE (ptype) != BLKmode)
- {
- unsigned malign = GET_MODE_ALIGNMENT (TYPE_MODE (ptype));
- if (TYPE_ALIGN (ptype) != malign)
- ptype = build_aligned_type (ptype, malign);
- }
- }
-
- if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
-
- new_parm = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL_TREE,
- ptype);
- const char *prefix = adj->arg_prefix ? adj->arg_prefix : "SYNTH";
- DECL_NAME (new_parm) = create_tmp_var_name (prefix);
- DECL_ARTIFICIAL (new_parm) = 1;
- DECL_ARG_TYPE (new_parm) = ptype;
- DECL_CONTEXT (new_parm) = fndecl;
- TREE_USED (new_parm) = 1;
- DECL_IGNORED_P (new_parm) = 1;
- layout_decl (new_parm, 0);
-
- if (adj->op == IPA_PARM_OP_NEW)
- adj->base = NULL;
- else
- adj->base = parm;
- adj->new_decl = new_parm;
-
- *link = new_parm;
- link = &DECL_CHAIN (new_parm);
- }
- }
-
- *link = NULL_TREE;
-
- tree new_reversed = NULL;
- if (care_for_types)
- {
- new_reversed = nreverse (new_arg_types);
- if (last_parm_void)
- {
- if (new_reversed)
- TREE_CHAIN (new_arg_types) = void_list_node;
- else
- new_reversed = void_list_node;
- }
- }
-
- /* Use copy_node to preserve as much as possible from original type
- (debug info, attribute lists etc.)
- Exception is METHOD_TYPEs must have THIS argument.
- When we are asked to remove it, we need to build new FUNCTION_TYPE
- instead. */
- tree new_type = NULL;
- if (TREE_CODE (orig_type) != METHOD_TYPE
- || (adjustments[0].op == IPA_PARM_OP_COPY
- && adjustments[0].base_index == 0))
- {
- new_type = build_distinct_type_copy (orig_type);
- TYPE_ARG_TYPES (new_type) = new_reversed;
- }
- else
- {
- new_type
- = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
- new_reversed));
- TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
- DECL_VINDEX (fndecl) = NULL_TREE;
- }
-
- /* When signature changes, we need to clear builtin info. */
- if (DECL_BUILT_IN (fndecl))
- {
- DECL_BUILT_IN_CLASS (fndecl) = NOT_BUILT_IN;
- DECL_FUNCTION_CODE (fndecl) = (enum built_in_function) 0;
- }
-
- TREE_TYPE (fndecl) = new_type;
- DECL_VIRTUAL_P (fndecl) = 0;
- DECL_LANG_SPECIFIC (fndecl) = NULL;
- otypes.release ();
- oparms.release ();
-}
-
-/* Modify actual arguments of a function call CS as indicated in ADJUSTMENTS.
- If this is a directly recursive call, CS must be NULL. Otherwise it must
- contain the corresponding call graph edge. */
-
-void
-ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt,
- ipa_parm_adjustment_vec adjustments)
-{
- struct cgraph_node *current_node = cgraph_node::get (current_function_decl);
- vec<tree> vargs;
- vec<tree, va_gc> **debug_args = NULL;
- gcall *new_stmt;
- gimple_stmt_iterator gsi, prev_gsi;
- tree callee_decl;
- int i, len;
-
- len = adjustments.length ();
- vargs.create (len);
- callee_decl = !cs ? gimple_call_fndecl (stmt) : cs->callee->decl;
- current_node->remove_stmt_references (stmt);
-
- gsi = gsi_for_stmt (stmt);
- prev_gsi = gsi;
- gsi_prev (&prev_gsi);
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
-
- adj = &adjustments[i];
-
- if (adj->op == IPA_PARM_OP_COPY)
- {
- tree arg = gimple_call_arg (stmt, adj->base_index);
-
- vargs.quick_push (arg);
- }
- else if (adj->op != IPA_PARM_OP_REMOVE)
- {
- tree expr, base, off;
- location_t loc;
- unsigned int deref_align = 0;
- bool deref_base = false;
-
- /* We create a new parameter out of the value of the old one, we can
- do the following kind of transformations:
-
- - A scalar passed by reference is converted to a scalar passed by
- value. (adj->by_ref is false and the type of the original
- actual argument is a pointer to a scalar).
-
- - A part of an aggregate is passed instead of the whole aggregate.
- The part can be passed either by value or by reference, this is
- determined by value of adj->by_ref. Moreover, the code below
- handles both situations when the original aggregate is passed by
- value (its type is not a pointer) and when it is passed by
- reference (it is a pointer to an aggregate).
-
- When the new argument is passed by reference (adj->by_ref is true)
- it must be a part of an aggregate and therefore we form it by
- simply taking the address of a reference inside the original
- aggregate. */
-
- poly_int64 byte_offset = exact_div (adj->offset, BITS_PER_UNIT);
- base = gimple_call_arg (stmt, adj->base_index);
- loc = DECL_P (base) ? DECL_SOURCE_LOCATION (base)
- : EXPR_LOCATION (base);
-
- if (TREE_CODE (base) != ADDR_EXPR
- && POINTER_TYPE_P (TREE_TYPE (base)))
- off = build_int_cst (adj->alias_ptr_type, byte_offset);
- else
- {
- poly_int64 base_offset;
- tree prev_base;
- bool addrof;
-
- if (TREE_CODE (base) == ADDR_EXPR)
- {
- base = TREE_OPERAND (base, 0);
- addrof = true;
- }
- else
- addrof = false;
- prev_base = base;
- base = get_addr_base_and_unit_offset (base, &base_offset);
- /* Aggregate arguments can have non-invariant addresses. */
- if (!base)
- {
- base = build_fold_addr_expr (prev_base);
- off = build_int_cst (adj->alias_ptr_type, byte_offset);
- }
- else if (TREE_CODE (base) == MEM_REF)
- {
- if (!addrof)
- {
- deref_base = true;
- deref_align = TYPE_ALIGN (TREE_TYPE (base));
- }
- off = build_int_cst (adj->alias_ptr_type,
- base_offset + byte_offset);
- off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1),
- off);
- base = TREE_OPERAND (base, 0);
- }
- else
- {
- off = build_int_cst (adj->alias_ptr_type,
- base_offset + byte_offset);
- base = build_fold_addr_expr (base);
- }
- }
-
- if (!adj->by_ref)
- {
- tree type = adj->type;
- unsigned int align;
- unsigned HOST_WIDE_INT misalign;
-
- if (deref_base)
- {
- align = deref_align;
- misalign = 0;
- }
- else
- {
- get_pointer_alignment_1 (base, &align, &misalign);
- if (TYPE_ALIGN (type) > align)
- align = TYPE_ALIGN (type);
- }
- misalign += (offset_int::from (wi::to_wide (off),
- SIGNED).to_short_addr ()
- * BITS_PER_UNIT);
- misalign = misalign & (align - 1);
- if (misalign != 0)
- align = least_bit_hwi (misalign);
- if (align < TYPE_ALIGN (type))
- type = build_aligned_type (type, align);
- base = force_gimple_operand_gsi (&gsi, base,
- true, NULL, true, GSI_SAME_STMT);
- expr = fold_build2_loc (loc, MEM_REF, type, base, off);
- REF_REVERSE_STORAGE_ORDER (expr) = adj->reverse;
- /* If expr is not a valid gimple call argument emit
- a load into a temporary. */
- if (is_gimple_reg_type (TREE_TYPE (expr)))
- {
- gimple *tem = gimple_build_assign (NULL_TREE, expr);
- if (gimple_in_ssa_p (cfun))
- {
- gimple_set_vuse (tem, gimple_vuse (stmt));
- expr = make_ssa_name (TREE_TYPE (expr), tem);
- }
- else
- expr = create_tmp_reg (TREE_TYPE (expr));
- gimple_assign_set_lhs (tem, expr);
- gsi_insert_before (&gsi, tem, GSI_SAME_STMT);
- }
- }
- else
- {
- expr = fold_build2_loc (loc, MEM_REF, adj->type, base, off);
- REF_REVERSE_STORAGE_ORDER (expr) = adj->reverse;
- expr = build_fold_addr_expr (expr);
- expr = force_gimple_operand_gsi (&gsi, expr,
- true, NULL, true, GSI_SAME_STMT);
- }
- vargs.quick_push (expr);
- }
- if (adj->op != IPA_PARM_OP_COPY && MAY_HAVE_DEBUG_STMTS)
- {
- unsigned int ix;
- tree ddecl = NULL_TREE, origin = DECL_ORIGIN (adj->base), arg;
- gimple *def_temp;
-
- arg = gimple_call_arg (stmt, adj->base_index);
- if (!useless_type_conversion_p (TREE_TYPE (origin), TREE_TYPE (arg)))
- {
- if (!fold_convertible_p (TREE_TYPE (origin), arg))
- continue;
- arg = fold_convert_loc (gimple_location (stmt),
- TREE_TYPE (origin), arg);
- }
- if (debug_args == NULL)
- debug_args = decl_debug_args_insert (callee_decl);
- for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl); ix += 2)
- if (ddecl == origin)
- {
- ddecl = (**debug_args)[ix + 1];
- break;
- }
- if (ddecl == NULL)
- {
- ddecl = make_node (DEBUG_EXPR_DECL);
- DECL_ARTIFICIAL (ddecl) = 1;
- TREE_TYPE (ddecl) = TREE_TYPE (origin);
- SET_DECL_MODE (ddecl, DECL_MODE (origin));
-
- vec_safe_push (*debug_args, origin);
- vec_safe_push (*debug_args, ddecl);
- }
- def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg), stmt);
- gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "replacing stmt:");
- print_gimple_stmt (dump_file, gsi_stmt (gsi), 0);
- }
-
- new_stmt = gimple_build_call_vec (callee_decl, vargs);
- vargs.release ();
- if (gimple_call_lhs (stmt))
- gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
-
- gimple_set_block (new_stmt, gimple_block (stmt));
- if (gimple_has_location (stmt))
- gimple_set_location (new_stmt, gimple_location (stmt));
- gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
- gimple_call_copy_flags (new_stmt, stmt);
- if (gimple_in_ssa_p (cfun))
- {
- gimple_set_vuse (new_stmt, gimple_vuse (stmt));
- if (gimple_vdef (stmt))
- {
- gimple_set_vdef (new_stmt, gimple_vdef (stmt));
- SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "with stmt:");
- print_gimple_stmt (dump_file, new_stmt, 0);
- fprintf (dump_file, "\n");
- }
- gsi_replace (&gsi, new_stmt, true);
- if (cs)
- cs->set_call_stmt (new_stmt);
- do
- {
- current_node->record_stmt_references (gsi_stmt (gsi));
- gsi_prev (&gsi);
- }
- while (gsi_stmt (gsi) != gsi_stmt (prev_gsi));
-}
-
-/* If the expression *EXPR should be replaced by a reduction of a parameter, do
- so. ADJUSTMENTS is a pointer to a vector of adjustments. CONVERT
- specifies whether the function should care about type incompatibility the
- current and new expressions. If it is false, the function will leave
- incompatibility issues to the caller. Return true iff the expression
- was modified. */
-
-bool
-ipa_modify_expr (tree *expr, bool convert,
- ipa_parm_adjustment_vec adjustments)
-{
- struct ipa_parm_adjustment *cand
- = ipa_get_adjustment_candidate (&expr, &convert, adjustments, false);
- if (!cand)
- return false;
-
- tree src;
- if (cand->by_ref)
- {
- src = build_simple_mem_ref (cand->new_decl);
- REF_REVERSE_STORAGE_ORDER (src) = cand->reverse;
- }
- else
- src = cand->new_decl;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "About to replace expr ");
- print_generic_expr (dump_file, *expr);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, src);
- fprintf (dump_file, "\n");
- }
-
- if (convert && !useless_type_conversion_p (TREE_TYPE (*expr), cand->type))
- {
- tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*expr), src);
- *expr = vce;
- }
- else
- *expr = src;
- return true;
-}
-
-/* If T is an SSA_NAME, return NULL if it is not a default def or
- return its base variable if it is. If IGNORE_DEFAULT_DEF is true,
- the base variable is always returned, regardless if it is a default
- def. Return T if it is not an SSA_NAME. */
-
-static tree
-get_ssa_base_param (tree t, bool ignore_default_def)
-{
- if (TREE_CODE (t) == SSA_NAME)
- {
- if (ignore_default_def || SSA_NAME_IS_DEFAULT_DEF (t))
- return SSA_NAME_VAR (t);
- else
- return NULL_TREE;
- }
- return t;
-}
-
-/* Given an expression, return an adjustment entry specifying the
- transformation to be done on EXPR. If no suitable adjustment entry
- was found, returns NULL.
-
- If IGNORE_DEFAULT_DEF is set, consider SSA_NAMEs which are not a
- default def, otherwise bail on them.
-
- If CONVERT is non-NULL, this function will set *CONVERT if the
- expression provided is a component reference. ADJUSTMENTS is the
- adjustments vector. */
-
-ipa_parm_adjustment *
-ipa_get_adjustment_candidate (tree **expr, bool *convert,
- ipa_parm_adjustment_vec adjustments,
- bool ignore_default_def)
-{
- if (TREE_CODE (**expr) == BIT_FIELD_REF
- || TREE_CODE (**expr) == IMAGPART_EXPR
- || TREE_CODE (**expr) == REALPART_EXPR)
- {
- *expr = &TREE_OPERAND (**expr, 0);
- if (convert)
- *convert = true;
- }
-
- poly_int64 offset, size, max_size;
- bool reverse;
- tree base
- = get_ref_base_and_extent (**expr, &offset, &size, &max_size, &reverse);
- if (!base || !known_size_p (size) || !known_size_p (max_size))
- return NULL;
-
- if (TREE_CODE (base) == MEM_REF)
- {
- offset += mem_ref_offset (base).force_shwi () * BITS_PER_UNIT;
- base = TREE_OPERAND (base, 0);
- }
-
- base = get_ssa_base_param (base, ignore_default_def);
- if (!base || TREE_CODE (base) != PARM_DECL)
- return NULL;
-
- struct ipa_parm_adjustment *cand = NULL;
- unsigned int len = adjustments.length ();
- for (unsigned i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj = &adjustments[i];
-
- if (adj->base == base
- && (must_eq (adj->offset, offset) || adj->op == IPA_PARM_OP_REMOVE))
- {
- cand = adj;
- break;
- }
- }
-
- if (!cand || cand->op == IPA_PARM_OP_COPY || cand->op == IPA_PARM_OP_REMOVE)
- return NULL;
- return cand;
-}
-
-/* Return true iff BASE_INDEX is in ADJUSTMENTS more than once. */
-
-static bool
-index_in_adjustments_multiple_times_p (int base_index,
- ipa_parm_adjustment_vec adjustments)
-{
- int i, len = adjustments.length ();
- bool one = false;
-
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- adj = &adjustments[i];
-
- if (adj->base_index == base_index)
- {
- if (one)
- return true;
- else
- one = true;
- }
- }
- return false;
-}
-
-
-/* Return adjustments that should have the same effect on function parameters
- and call arguments as if they were first changed according to adjustments in
- INNER and then by adjustments in OUTER. */
-
-ipa_parm_adjustment_vec
-ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
- ipa_parm_adjustment_vec outer)
-{
- int i, outlen = outer.length ();
- int inlen = inner.length ();
- int removals = 0;
- ipa_parm_adjustment_vec adjustments, tmp;
-
- tmp.create (inlen);
- for (i = 0; i < inlen; i++)
- {
- struct ipa_parm_adjustment *n;
- n = &inner[i];
-
- if (n->op == IPA_PARM_OP_REMOVE)
- removals++;
- else
- {
- /* FIXME: Handling of new arguments are not implemented yet. */
- gcc_assert (n->op != IPA_PARM_OP_NEW);
- tmp.quick_push (*n);
- }
- }
-
- adjustments.create (outlen + removals);
- for (i = 0; i < outlen; i++)
- {
- struct ipa_parm_adjustment r;
- struct ipa_parm_adjustment *out = &outer[i];
- struct ipa_parm_adjustment *in = &tmp[out->base_index];
-
- memset (&r, 0, sizeof (r));
- gcc_assert (in->op != IPA_PARM_OP_REMOVE);
- if (out->op == IPA_PARM_OP_REMOVE)
- {
- if (!index_in_adjustments_multiple_times_p (in->base_index, tmp))
- {
- r.op = IPA_PARM_OP_REMOVE;
- adjustments.quick_push (r);
- }
- continue;
- }
- else
- {
- /* FIXME: Handling of new arguments are not implemented yet. */
- gcc_assert (out->op != IPA_PARM_OP_NEW);
- }
-
- r.base_index = in->base_index;
- r.type = out->type;
-
- /* FIXME: Create nonlocal value too. */
-
- if (in->op == IPA_PARM_OP_COPY && out->op == IPA_PARM_OP_COPY)
- r.op = IPA_PARM_OP_COPY;
- else if (in->op == IPA_PARM_OP_COPY)
- r.offset = out->offset;
- else if (out->op == IPA_PARM_OP_COPY)
- r.offset = in->offset;
- else
- r.offset = in->offset + out->offset;
- adjustments.quick_push (r);
- }
-
- for (i = 0; i < inlen; i++)
- {
- struct ipa_parm_adjustment *n = &inner[i];
-
- if (n->op == IPA_PARM_OP_REMOVE)
- adjustments.quick_push (*n);
- }
-
- tmp.release ();
- return adjustments;
-}
-
-/* Dump the adjustments in the vector ADJUSTMENTS to dump_file in a human
- friendly way, assuming they are meant to be applied to FNDECL. */
-
-void
-ipa_dump_param_adjustments (FILE *file, ipa_parm_adjustment_vec adjustments,
- tree fndecl)
-{
- int i, len = adjustments.length ();
- bool first = true;
- vec<tree> parms = ipa_get_vector_of_formal_parms (fndecl);
-
- fprintf (file, "IPA param adjustments: ");
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- adj = &adjustments[i];
-
- if (!first)
- fprintf (file, " ");
- else
- first = false;
-
- fprintf (file, "%i. base_index: %i - ", i, adj->base_index);
- print_generic_expr (file, parms[adj->base_index]);
- if (adj->base)
- {
- fprintf (file, ", base: ");
- print_generic_expr (file, adj->base);
- }
- if (adj->new_decl)
- {
- fprintf (file, ", new_decl: ");
- print_generic_expr (file, adj->new_decl);
- }
- if (adj->new_ssa_base)
- {
- fprintf (file, ", new_ssa_base: ");
- print_generic_expr (file, adj->new_ssa_base);
- }
-
- if (adj->op == IPA_PARM_OP_COPY)
- fprintf (file, ", copy_param");
- else if (adj->op == IPA_PARM_OP_REMOVE)
- fprintf (file, ", remove_param");
- else
- {
- fprintf (file, ", offset ");
- print_dec (adj->offset, file);
- }
- if (adj->by_ref)
- fprintf (file, ", by_ref");
- print_node_brief (file, ", type: ", adj->type, 0);
- fprintf (file, "\n");
- }
- parms.release ();
-}
-
/* Dump the AV linked list. */
void
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index cf2906d2314..196bd63f5e3 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -765,96 +765,6 @@ class ipcp_agg_lattice;
extern object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
-/* Operation to be performed for the parameter in ipa_parm_adjustment
- below. */
-enum ipa_parm_op {
- IPA_PARM_OP_NONE,
-
- /* This describes a brand new parameter.
-
- The field `type' should be set to the new type, `arg_prefix'
- should be set to the string prefix for the new DECL_NAME, and
- `new_decl' will ultimately hold the newly created argument. */
- IPA_PARM_OP_NEW,
-
- /* This new parameter is an unmodified parameter at index base_index. */
- IPA_PARM_OP_COPY,
-
- /* This adjustment describes a parameter that is about to be removed
- completely. Most users will probably need to book keep those so that they
- don't leave behinfd any non default def ssa names belonging to them. */
- IPA_PARM_OP_REMOVE
-};
-
-/* Structure to describe transformations of formal parameters and actual
- arguments. Each instance describes one new parameter and they are meant to
- be stored in a vector. Additionally, most users will probably want to store
- adjustments about parameters that are being removed altogether so that SSA
- names belonging to them can be replaced by SSA names of an artificial
- variable. */
-struct ipa_parm_adjustment
-{
- /* The original PARM_DECL itself, helpful for processing of the body of the
- function itself. Intended for traversing function bodies.
- ipa_modify_formal_parameters, ipa_modify_call_arguments and
- ipa_combine_adjustments ignore this and use base_index.
- ipa_modify_formal_parameters actually sets this. */
- tree base;
-
- /* Type of the new parameter. However, if by_ref is true, the real type will
- be a pointer to this type. */
- tree type;
-
- /* Alias refrerence type to be used in MEM_REFs when adjusting caller
- arguments. */
- tree alias_ptr_type;
-
- /* The new declaration when creating/replacing a parameter. Created
- by ipa_modify_formal_parameters, useful for functions modifying
- the body accordingly. For brand new arguments, this is the newly
- created argument. */
- tree new_decl;
-
- /* New declaration of a substitute variable that we may use to replace all
- non-default-def ssa names when a parm decl is going away. */
- tree new_ssa_base;
-
- /* If non-NULL and the original parameter is to be removed (copy_param below
- is NULL), this is going to be its nonlocalized vars value. */
- tree nonlocal_value;
-
- /* This holds the prefix to be used for the new DECL_NAME. */
- const char *arg_prefix;
-
- /* Offset into the original parameter (for the cases when the new parameter
- is a component of an original one). */
- poly_int64_pod offset;
-
- /* Zero based index of the original parameter this one is based on. */
- int base_index;
-
- /* Whether this parameter is a new parameter, a copy of an old one,
- or one about to be removed. */
- enum ipa_parm_op op;
-
- /* Storage order of the original parameter (for the cases when the new
- parameter is a component of an original one). */
- unsigned reverse : 1;
-
- /* The parameter is to be passed by reference. */
- unsigned by_ref : 1;
-};
-
-typedef vec<ipa_parm_adjustment> ipa_parm_adjustment_vec;
-
-vec<tree> ipa_get_vector_of_formal_parms (tree fndecl);
-vec<tree> ipa_get_vector_of_formal_parm_types (tree fntype);
-void ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec);
-void ipa_modify_call_arguments (struct cgraph_edge *, gcall *,
- ipa_parm_adjustment_vec);
-ipa_parm_adjustment_vec ipa_combine_adjustments (ipa_parm_adjustment_vec,
- ipa_parm_adjustment_vec);
-void ipa_dump_param_adjustments (FILE *, ipa_parm_adjustment_vec, tree);
void ipa_dump_agg_replacement_values (FILE *f,
struct ipa_agg_replacement_value *av);
void ipa_prop_write_jump_functions (void);
@@ -870,10 +780,6 @@ ipa_polymorphic_call_context ipa_context_from_jfunc (ipa_node_params *,
int,
ipa_jump_func *);
void ipa_dump_param (FILE *, struct ipa_node_params *info, int i);
-bool ipa_modify_expr (tree *, bool, ipa_parm_adjustment_vec);
-ipa_parm_adjustment *ipa_get_adjustment_candidate (tree **, bool *,
- ipa_parm_adjustment_vec,
- bool);
void ipa_release_body_info (struct ipa_func_body_info *);
tree ipa_get_callee_param_type (struct cgraph_edge *e, int i);
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index 252ea053e2a..9f893915c17 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -129,6 +129,10 @@ struct split_point
/* Basic block where we split (that will become entry point of new function. */
basic_block entry_bb;
+ /* Count for entering the split part.
+ This is not count of the entry_bb because it may be in loop. */
+ profile_count count;
+
/* Basic blocks we are splitting away. */
bitmap split_bbs;
@@ -426,7 +430,6 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
edge_iterator ei;
gphi_iterator bsi;
unsigned int i;
- int incoming_freq = 0;
tree retval;
tree retbnd;
bool back_edge = false;
@@ -434,18 +437,21 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
if (dump_file && (dump_flags & TDF_DETAILS))
dump_split_point (dump_file, current);
+ current->count = profile_count::zero ();
FOR_EACH_EDGE (e, ei, current->entry_bb->preds)
{
if (e->flags & EDGE_DFS_BACK)
back_edge = true;
if (!bitmap_bit_p (current->split_bbs, e->src->index))
- incoming_freq += EDGE_FREQUENCY (e);
+ current->count += e->count ();
}
- /* Do not split when we would end up calling function anyway. */
- if (incoming_freq
- >= (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun)
- * PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100))
+ /* Do not split when we would end up calling function anyway.
+ Compares are three state, use !(...<...) to also give up when outcome
+ is unknown. */
+ if (!(current->count
+ < (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale
+ (PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY), 100))))
{
/* When profile is guessed, we can not expect it to give us
realistic estimate on likelyness of function taking the
@@ -454,14 +460,17 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
is likely noticeable win. */
if (back_edge
&& profile_status_for_fn (cfun) != PROFILE_READ
- && incoming_freq
- < ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun))
+ && current->count
+ < ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
{
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file,
- " Split before loop, accepting despite low frequencies %i %i.\n",
- incoming_freq,
- ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun));
+ {
+ fprintf (dump_file,
+ " Split before loop, accepting despite low counts");
+ current->count.dump (dump_file);
+ fprintf (dump_file, " ");
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.dump (dump_file);
+ }
}
else
{
@@ -711,14 +720,13 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Accepted!\n");
- /* At the moment chose split point with lowest frequency and that leaves
+ /* At the moment chose split point with lowest count and that leaves
out smallest size of header.
In future we might re-consider this heuristics. */
if (!best_split_point.split_bbs
- || best_split_point.entry_bb->count.to_frequency (cfun)
- > current->entry_bb->count.to_frequency (cfun)
- || (best_split_point.entry_bb->count.to_frequency (cfun)
- == current->entry_bb->count.to_frequency (cfun)
+ || best_split_point.count
+ > current->count
+ || (best_split_point.count == current->count
&& best_split_point.split_size < current->split_size))
{
@@ -1446,6 +1454,7 @@ split_function (basic_block return_bb, struct split_point *split_point,
}
else
break;
+ call_bb->count = split_point->count;
e = split_block (split_point->entry_bb, last_stmt);
remove_edge (e);
diff --git a/gcc/ipa-utils.c b/gcc/ipa-utils.c
index e9ab78cdabb..c991bddd7dd 100644
--- a/gcc/ipa-utils.c
+++ b/gcc/ipa-utils.c
@@ -404,17 +404,18 @@ ipa_merge_profiles (struct cgraph_node *dst,
/* FIXME when we merge in unknown profile, we ought to set counts as
unsafe. */
- if (!src->count.initialized_p ())
+ if (!src->count.initialized_p ()
+ || !(src->count.ipa () == src->count))
return;
if (symtab->dump_file)
{
fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
src->dump_name (), dst->dump_name ());
}
- if (dst->count.initialized_p ())
- dst->count += src->count;
- else
- dst->count = src->count;
+ if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
+ dst->count += src->count.ipa ();
+ else
+ dst->count = src->count.ipa ();
/* This is ugly. We need to get both function bodies into memory.
If declaration is merged, we need to duplicate it to be able
@@ -557,25 +558,19 @@ ipa_merge_profiles (struct cgraph_node *dst,
}
}
push_cfun (dstcfun);
- counts_to_freqs ();
+ update_max_bb_count ();
compute_function_frequency ();
pop_cfun ();
for (e = dst->callees; e; e = e->next_callee)
{
if (e->speculative)
continue;
- e->count = gimple_bb (e->call_stmt)->count.ipa ();
- e->frequency = compute_call_stmt_bb_frequency
- (dst->decl,
- gimple_bb (e->call_stmt));
+ e->count = gimple_bb (e->call_stmt)->count;
}
for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
{
profile_count count = gimple_bb (e->call_stmt)->count;
- int freq = compute_call_stmt_bb_frequency
- (dst->decl,
- gimple_bb (e->call_stmt));
/* When call is speculative, we need to re-distribute probabilities
the same way as they was. This is not really correct because
in the other copy the speculation may differ; but probably it
@@ -624,12 +619,6 @@ ipa_merge_profiles (struct cgraph_node *dst,
indirect->count += indirect2->count;
}
}
- int prob = direct->count.probability_in (direct->count
- + indirect->count).
- to_reg_br_prob_base ();
- direct->frequency = RDIV (freq * prob, REG_BR_PROB_BASE);
- indirect->frequency = RDIV (freq * (REG_BR_PROB_BASE - prob),
- REG_BR_PROB_BASE);
}
else
/* At the moment we should have only profile feedback based
@@ -642,18 +631,11 @@ ipa_merge_profiles (struct cgraph_node *dst,
ipa_ref *ref;
e2->speculative_call_info (direct, indirect, ref);
- e->count = count.ipa ();
- e->frequency = freq;
- int prob = direct->count.probability_in (e->count)
- .to_reg_br_prob_base ();
- e->make_speculative (direct->callee, direct->count,
- RDIV (freq * prob, REG_BR_PROB_BASE));
+ e->count = count;
+ e->make_speculative (direct->callee, direct->count);
}
else
- {
- e->count = count.ipa ();
- e->frequency = freq;
- }
+ e->count = count;
}
if (!preserve_body)
src->release_body ();
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index 15f0eaadf20..a19f8a13dfb 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -266,7 +266,6 @@ lto_output_edge (struct lto_simple_output_block *ob, struct cgraph_edge *edge,
bp_pack_enum (&bp, cgraph_inline_failed_t,
CIF_N_REASONS, edge->inline_failed);
bp_pack_var_len_unsigned (&bp, uid);
- bp_pack_var_len_unsigned (&bp, edge->frequency);
bp_pack_value (&bp, edge->indirect_inlining_edge, 1);
bp_pack_value (&bp, edge->speculative, 1);
bp_pack_value (&bp, edge->call_stmt_cannot_inline_p, 1);
@@ -1248,7 +1247,7 @@ input_node (struct lto_file_decl_data *file_data,
if (clone_ref != LCC_NOT_FOUND)
{
node = dyn_cast<cgraph_node *> (nodes[clone_ref])->create_clone (fn_decl,
- profile_count::uninitialized (), CGRAPH_FREQ_BASE, false,
+ profile_count::uninitialized (), false,
vNULL, false, NULL, NULL);
}
else
@@ -1464,7 +1463,6 @@ input_edge (struct lto_input_block *ib, vec<symtab_node *> nodes,
struct cgraph_edge *edge;
unsigned int stmt_id;
profile_count count;
- int freq;
cgraph_inline_failed_t inline_failed;
struct bitpack_d bp;
int ecf_flags = 0;
@@ -1487,12 +1485,11 @@ input_edge (struct lto_input_block *ib, vec<symtab_node *> nodes,
bp = streamer_read_bitpack (ib);
inline_failed = bp_unpack_enum (&bp, cgraph_inline_failed_t, CIF_N_REASONS);
stmt_id = bp_unpack_var_len_unsigned (&bp);
- freq = (int) bp_unpack_var_len_unsigned (&bp);
if (indirect)
- edge = caller->create_indirect_edge (NULL, 0, count, freq);
+ edge = caller->create_indirect_edge (NULL, 0, count);
else
- edge = caller->create_edge (callee, NULL, count, freq);
+ edge = caller->create_edge (callee, NULL, count);
edge->indirect_inlining_edge = bp_unpack_value (&bp, 1);
edge->speculative = bp_unpack_value (&bp, 1);
@@ -1823,8 +1820,13 @@ merge_profile_summaries (struct lto_file_decl_data **file_data_vec)
if (scale == REG_BR_PROB_BASE)
continue;
for (edge = node->callees; edge; edge = edge->next_callee)
- edge->count = edge->count.apply_scale (scale, REG_BR_PROB_BASE);
- node->count = node->count.apply_scale (scale, REG_BR_PROB_BASE);
+ if (edge->count.ipa ().nonzero_p ())
+ edge->count = edge->count.apply_scale (scale, REG_BR_PROB_BASE);
+ for (edge = node->indirect_calls; edge; edge = edge->next_callee)
+ if (edge->count.ipa ().nonzero_p ())
+ edge->count = edge->count.apply_scale (scale, REG_BR_PROB_BASE);
+ if (node->count.ipa ().nonzero_p ())
+ node->count = node->count.apply_scale (scale, REG_BR_PROB_BASE);
}
}
diff --git a/gcc/lto-opts.c b/gcc/lto-opts.c
index 641b2795b2c..e39f05d0e9c 100644
--- a/gcc/lto-opts.c
+++ b/gcc/lto-opts.c
@@ -70,73 +70,10 @@ lto_write_options (void)
obstack_init (&temporary_obstack);
- /* Output options that affect GIMPLE IL semantics and are implicitly
- enabled by the frontend.
- This for now includes an explicit set of options that we also handle
- explicitly in lto-wrapper.c. In the end the effects on GIMPLE IL
- semantics should be explicitely encoded in the IL or saved per
- function rather than per compilation unit. */
- /* -fexceptions causes the EH machinery to be initialized, enabling
- generation of unwind data so that explicit throw() calls work. */
- if (!global_options_set.x_flag_exceptions
- && global_options.x_flag_exceptions)
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- "-fexceptions");
- /* -fnon-call-exceptions changes the generation of exception
- regions. It is enabled implicitly by the Go frontend. */
- if (!global_options_set.x_flag_non_call_exceptions
- && global_options.x_flag_non_call_exceptions)
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- "-fnon-call-exceptions");
- /* The default -ffp-contract changes depending on the language
- standard. Pass thru conservative standard settings. */
- if (!global_options_set.x_flag_fp_contract_mode)
- switch (global_options.x_flag_fp_contract_mode)
- {
- case FP_CONTRACT_OFF:
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- "-ffp-contract=off");
- break;
- case FP_CONTRACT_ON:
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- "-ffp-contract=on");
- break;
- case FP_CONTRACT_FAST:
- /* Nothing. That merges conservatively and is the default for LTO. */
- break;
- default:
- gcc_unreachable ();
- }
- /* The default -fmath-errno, -fsigned-zeros and -ftrapping-math change
- depending on the language (they can be disabled by the Ada front-end).
- Pass thru conservative standard settings. */
- if (!global_options_set.x_flag_errno_math)
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- global_options.x_flag_errno_math
- ? "-fmath-errno"
- : "-fno-math-errno");
- if (!global_options_set.x_flag_signed_zeros)
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- global_options.x_flag_signed_zeros
- ? "-fsigned-zeros"
- : "-fno-signed-zeros");
- if (!global_options_set.x_flag_trapping_math)
- append_to_collect_gcc_options (&temporary_obstack, &first_p,
- global_options.x_flag_trapping_math
- ? "-ftrapping-math"
- : "-fno-trapping-math");
- /* We need to merge -f[no-]strict-overflow, -f[no-]wrapv and -f[no-]trapv
- conservatively, so stream out their defaults. */
- if (!global_options_set.x_flag_wrapv
- && global_options.x_flag_wrapv)
- append_to_collect_gcc_options (&temporary_obstack, &first_p, "-fwrapv");
- if (!global_options_set.x_flag_trapv
- && !global_options.x_flag_trapv)
- append_to_collect_gcc_options (&temporary_obstack, &first_p, "-fno-trapv");
-
if (!global_options_set.x_flag_openmp
&& !global_options.x_flag_openmp)
- append_to_collect_gcc_options (&temporary_obstack, &first_p, "-fno-openmp");
+ append_to_collect_gcc_options (&temporary_obstack, &first_p,
+ "-fno-openmp");
if (!global_options_set.x_flag_openacc
&& !global_options.x_flag_openacc)
append_to_collect_gcc_options (&temporary_obstack, &first_p,
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 4682be089c4..3ef5e83d39b 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -1192,7 +1192,7 @@ input_function (tree fn_decl, struct data_in *data_in,
gimple_set_body (fn_decl, bb_seq (ei_edge (ei)->dest));
}
- counts_to_freqs ();
+ update_max_bb_count ();
fixup_call_stmt_edges (node, stmts);
execute_all_ipa_stmt_fixups (node, stmts);
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 173cde67369..4edb153ed76 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,7 @@
+2017-11-10 Jan Hubicka <hubicka@ucw.cz>
+
+ * lto-partition.c (lto_balanced_map): Use frequency accessor.
+
2017-10-13 Jan Hubicka <hubicka@ucw.cz>
* lto-lang.c (lto_post_options): Clean shlib flag when not doing PIC.
diff --git a/gcc/lto/lto-partition.c b/gcc/lto/lto-partition.c
index 3141ecec07c..2d0663eb93c 100644
--- a/gcc/lto/lto-partition.c
+++ b/gcc/lto/lto-partition.c
@@ -587,7 +587,7 @@ lto_balanced_map (int n_lto_partitions, int max_partition_size)
for (edge = node->callees; edge; edge = edge->next_callee)
if (edge->callee->definition)
{
- int edge_cost = edge->frequency;
+ int edge_cost = edge->frequency ();
int index;
if (!edge_cost)
@@ -603,7 +603,7 @@ lto_balanced_map (int n_lto_partitions, int max_partition_size)
}
for (edge = node->callers; edge; edge = edge->next_caller)
{
- int edge_cost = edge->frequency;
+ int edge_cost = edge->frequency ();
int index;
gcc_assert (edge->caller->definition);
diff --git a/gcc/machmode.def b/gcc/machmode.def
index dcf10565958..a78c722f8cf 100644
--- a/gcc/machmode.def
+++ b/gcc/machmode.def
@@ -142,11 +142,16 @@ along with GCC; see the file COPYING3. If not see
than two bytes (if CLASS is FLOAT). CLASS must be INT or
FLOAT. The names follow the same rule as VECTOR_MODE uses.
- VECTOR_BOOL_MODE (COUNT, BYTESIZE)
- Create a vector of booleans with COUNT elements and BYTESIZE bytes.
- Each boolean occupies (COUNT * BITS_PER_UNIT) / BYTESIZE bits,
- with the element at index 0 occupying the lsb of the first byte
- in memory. Only the lowest bit of each element is significant.
+ VECTOR_MODES_WITH_PREFIX (PREFIX, CLASS, WIDTH);
+ Like VECTOR_MODES, but start the mode names with PREFIX instead
+ of the usual "V".
+
+ VECTOR_BOOL_MODE (NAME, COUNT, BYTESIZE)
+ Create a vector mode called NAME that contains COUNT boolean
+ elements and occupies BYTESIZE bytes in total. Each boolean
+ element occupies (COUNT * BITS_PER_UNIT) / BYTESIZE bits, with
+ the element at index 0 occupying the lsb of the first byte in
+ memory. Only the lowest bit of each element is significant.
COMPLEX_MODES (CLASS);
For all modes presently declared in class CLASS, construct
diff --git a/gcc/machmode.h b/gcc/machmode.h
index 04c1e877e1c..419f08ad869 100644
--- a/gcc/machmode.h
+++ b/gcc/machmode.h
@@ -608,14 +608,14 @@ GET_MODE_SIZE (machine_mode mode)
}
template<typename T>
-ALWAYS_INLINE typename if_poly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
GET_MODE_SIZE (const T &mode)
{
return mode_to_bytes (mode);
}
template<typename T>
-ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
GET_MODE_SIZE (const T &mode)
{
return mode_to_bytes (mode).coeffs[0];
@@ -634,14 +634,14 @@ GET_MODE_BITSIZE (machine_mode mode)
}
template<typename T>
-ALWAYS_INLINE typename if_poly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
GET_MODE_BITSIZE (const T &mode)
{
return mode_to_bits (mode);
}
template<typename T>
-ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
GET_MODE_BITSIZE (const T &mode)
{
return mode_to_bits (mode).coeffs[0];
@@ -661,14 +661,14 @@ GET_MODE_PRECISION (machine_mode mode)
}
template<typename T>
-ALWAYS_INLINE typename if_poly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
GET_MODE_PRECISION (const T &mode)
{
return mode_to_precision (mode);
}
template<typename T>
-ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
GET_MODE_PRECISION (const T &mode)
{
return mode_to_precision (mode).coeffs[0];
@@ -719,14 +719,14 @@ GET_MODE_NUNITS (machine_mode mode)
}
template<typename T>
-ALWAYS_INLINE typename if_poly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
GET_MODE_NUNITS (const T &mode)
{
return mode_to_nunits (mode);
}
template<typename T>
-ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::t
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
GET_MODE_NUNITS (const T &mode)
{
return mode_to_nunits (mode).coeffs[0];
diff --git a/gcc/match.pd b/gcc/match.pd
index 63566df3205..f27073994b6 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -355,6 +355,11 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(rdiv @0 (rdiv:s @1 @2))
(mult (rdiv @0 @1) @2)))
+/* Simplify x / (- y) to -x / y. */
+(simplify
+ (rdiv @0 (negate @1))
+ (rdiv (negate @0) @1))
+
/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
(for div (trunc_div ceil_div floor_div round_div exact_div)
(simplify
@@ -606,6 +611,19 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& tree_nop_conversion_p (type, TREE_TYPE (@1)))
(lshift @0 @2)))
+/* Fold (1 << (C - x)) where C = precision(type) - 1
+ into ((1 << C) >> x). */
+(simplify
+ (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
+ (if (INTEGRAL_TYPE_P (type)
+ && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
+ && single_use (@1))
+ (if (TYPE_UNSIGNED (type))
+ (rshift (lshift @0 @2) @3)
+ (with
+ { tree utype = unsigned_type_for (type); }
+ (convert (rshift (lshift (convert:utype @0) @2) @3))))))
+
/* Fold (C1/X)*C2 into (C1*C2)/X. */
(simplify
(mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
@@ -686,6 +704,42 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(bit_ior:c (bit_xor:c @0 @1) @0)
(bit_ior @0 @1))
+/* (a & ~b) | (a ^ b) --> a ^ b */
+(simplify
+ (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
+ @2)
+
+/* (a & ~b) ^ ~a --> ~(a & b) */
+(simplify
+ (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
+ (bit_not (bit_and @0 @1)))
+
+/* (a | b) & ~(a ^ b) --> a & b */
+(simplify
+ (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
+ (bit_and @0 @1))
+
+/* a | ~(a ^ b) --> a | ~b */
+(simplify
+ (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
+ (bit_ior @0 (bit_not @1)))
+
+/* (a | b) | (a &^ b) --> a | b */
+(for op (bit_and bit_xor)
+ (simplify
+ (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
+ @2))
+
+/* (a & b) | ~(a ^ b) --> ~(a ^ b) */
+(simplify
+ (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
+ @2)
+
+/* ~(~a & b) --> a | ~b */
+(simplify
+ (bit_not (bit_and:cs (bit_not @0) @1))
+ (bit_ior @0 (bit_not @1)))
+
/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
#if GIMPLE
(simplify
@@ -962,6 +1016,12 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(match negate_expr_p
VEC_DUPLICATE_CST
(if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
+(match negate_expr_p
+ (minus @0 @1)
+ (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
+ || (FLOAT_TYPE_P (type)
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
+ && !HONOR_SIGNED_ZEROS (type)))))
/* (-A) * (-B) -> A * B */
(simplify
@@ -977,6 +1037,15 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& !HONOR_SIGNED_ZEROS (element_mode (type)))
(minus (negate @1) @0)))
+/* -(A - B) -> B - A. */
+(simplify
+ (negate (minus @0 @1))
+ (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
+ || (FLOAT_TYPE_P (type)
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
+ && !HONOR_SIGNED_ZEROS (type)))
+ (minus @1 @0)))
+
/* A - B -> A + (-B) if B is easily negatable. */
(simplify
(minus @0 negate_expr_p@1)
@@ -1093,6 +1162,11 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
|| !TYPE_UNSIGNED (TREE_TYPE (@0)))
(convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
+/* Convert - (~A) to A + 1. */
+(simplify
+ (negate (nop_convert (bit_not @0)))
+ (plus (view_convert @0) { build_each_one_cst (type); }))
+
/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
(simplify
(bit_not (convert? (minus @0 integer_each_onep)))
@@ -1115,6 +1189,12 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(convert (bit_xor @0 @1))))
+/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
+(simplify
+ (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
+ (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
+ (bit_not (bit_xor (view_convert @0) @1))))
+
/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
(simplify
(bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
@@ -1734,7 +1814,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
-(for minmax (min max FMIN FMAX)
+(for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
(simplify
(minmax @0 @0)
@0))
@@ -1812,7 +1892,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
(minmax @1 (convert @2)))))
-(for minmax (FMIN FMAX)
+(for minmax (FMIN FMIN_FN FMAX FMAX_FN)
/* If either argument is NaN, return the other one. Avoid the
transformation if we get (and honor) a signalling NaN. */
(simplify
@@ -1830,11 +1910,17 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(FMIN @0 @1)
(min @0 @1))
(simplify
+ (FMIN_FN @0 @1)
+ (min @0 @1))
+ (simplify
(FMAX @0 @1)
+ (max @0 @1))
+ (simplify
+ (FMAX_FN @0 @1)
(max @0 @1)))
/* min (-A, -B) -> -max (A, B) */
-(for minmax (min max FMIN FMAX)
- maxmin (max min FMAX FMIN)
+(for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
+ maxmin (max min FMAX FMAX_FN FMIN FMAX_FN)
(simplify
(minmax (negate:s@2 @0) (negate:s@3 @1))
(if (FLOAT_TYPE_P (TREE_TYPE (@0))
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index 7d865928999..f87294be8f5 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -1,3 +1,8 @@
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * objc-encoding.c (encode_type): Use memcpy instead of strncpy.
+
2017-10-31 David Malcolm <dmalcolm@redhat.com>
* objc-gnu-runtime-abi-01.c (objc_gnu_runtime_abi_01_init): Use
diff --git a/gcc/objc/objc-encoding.c b/gcc/objc/objc-encoding.c
index 9f46d57ac8c..f9d8d477d76 100644
--- a/gcc/objc/objc-encoding.c
+++ b/gcc/objc/objc-encoding.c
@@ -734,7 +734,7 @@ encode_type (tree type, int curtype, int format)
/* Rewrite "in const" from "nr" to "rn". */
if (curtype >= 1 && !strncmp (enc - 1, "nr", 2))
- strncpy (enc - 1, "rn", 2);
+ memcpy (enc - 1, "rn", 2);
}
}
}
diff --git a/gcc/omp-expand.c b/gcc/omp-expand.c
index 34a95aa15b6..fc61a45a22c 100644
--- a/gcc/omp-expand.c
+++ b/gcc/omp-expand.c
@@ -1399,7 +1399,7 @@ expand_omp_taskreg (struct omp_region *region)
if (optimize)
optimize_omp_library_calls (entry_stmt);
- counts_to_freqs ();
+ update_max_bb_count ();
cgraph_edge::rebuild_edges ();
/* Some EH regions might become dead, see PR34608. If
diff --git a/gcc/omp-simd-clone.c b/gcc/omp-simd-clone.c
index 9cd66e26c27..026c8e489eb 100644
--- a/gcc/omp-simd-clone.c
+++ b/gcc/omp-simd-clone.c
@@ -45,7 +45,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-dfa.h"
#include "cfgloop.h"
#include "symbol-summary.h"
-#include "ipa-prop.h"
+#include "ipa-param-manipulation.h"
#include "tree-eh.h"
#include "varasm.h"
#include "stringpool.h"
@@ -1398,10 +1398,8 @@ simd_clone_adjust (struct cgraph_node *node)
(single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- int freq = compute_call_stmt_bb_frequency (current_function_decl,
- entry_bb);
node->create_edge (cgraph_node::get_create (fn),
- call, entry_bb->count, freq);
+ call, entry_bb->count);
imm_use_iterator iter;
use_operand_p use_p;
diff --git a/gcc/optabs-query.c b/gcc/optabs-query.c
index 6118c9671e7..2d4f018ba3d 100644
--- a/gcc/optabs-query.c
+++ b/gcc/optabs-query.c
@@ -622,7 +622,7 @@ lshift_cheap_p (bool speed_p)
return cheap[speed_p];
}
-/* Return TRUE if at least one mode is available for optab OP. */
+/* Return true if optab OP supports at least one mode. */
static bool
supports_at_least_one_mode_p (optab op)
@@ -634,7 +634,7 @@ supports_at_least_one_mode_p (optab op)
return false;
}
-/* Return TRUE if vec_gather_load is available for at least one vector
+/* Return true if vec_gather_load is available for at least one vector
mode. */
bool
@@ -646,13 +646,12 @@ supports_vec_gather_load_p ()
this_fn_optabs->supports_vec_gather_load_cached = true;
this_fn_optabs->supports_vec_gather_load
- = supports_at_least_one_mode_p (vec_gather_loads_optab)
- || supports_at_least_one_mode_p (vec_gather_loadu_optab);
+ = supports_at_least_one_mode_p (gather_load_optab);
return this_fn_optabs->supports_vec_gather_load;
}
-/* Return TRUE if vec_scatter_store is available for at least one vector
+/* Return true if vec_scatter_store is available for at least one vector
mode. */
bool
@@ -664,8 +663,7 @@ supports_vec_scatter_store_p ()
this_fn_optabs->supports_vec_scatter_store_cached = true;
this_fn_optabs->supports_vec_scatter_store
- = supports_at_least_one_mode_p (vec_scatter_stores_optab)
- || supports_at_least_one_mode_p (vec_scatter_storeu_optab);
+ = supports_at_least_one_mode_p (scatter_store_optab);
return this_fn_optabs->supports_vec_scatter_store;
}
diff --git a/gcc/optabs-tree.c b/gcc/optabs-tree.c
index 763b9e02645..6c49288c687 100644
--- a/gcc/optabs-tree.c
+++ b/gcc/optabs-tree.c
@@ -163,8 +163,8 @@ optab_for_tree_code (enum tree_code code, const_tree type,
case REDUC_XOR_EXPR:
return reduc_xor_scal_optab;
- case STRICT_REDUC_PLUS_EXPR:
- return strict_reduc_plus_scal_optab;
+ case FOLD_LEFT_PLUS_EXPR:
+ return fold_left_plus_optab;
case VEC_WIDEN_MULT_HI_EXPR:
return TYPE_UNSIGNED (type) ?
@@ -261,25 +261,6 @@ optab_for_tree_code (enum tree_code code, const_tree type,
}
}
-/* Return true if appropriate vector instructions are available to perform a
- strict reduction operation (i.e. in order) CODE for mode MODE. */
-bool
-strict_reduction_support (tree_code code, tree type)
-{
- optab optab;
-
- switch (code)
- {
- case STRICT_REDUC_PLUS_EXPR:
- optab = strict_reduc_plus_scal_optab;
- break;
- default:
- gcc_unreachable ();
- }
-
- return optab_handler (optab, TYPE_MODE (type)) != CODE_FOR_nothing;
-}
-
/* Function supportable_convert_operation
Check whether an operation represented by the code CODE is a
@@ -393,9 +374,10 @@ init_tree_optimization_optabs (tree optnode)
TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs;
struct target_optabs *tmp_optabs = (struct target_optabs *)
TREE_OPTIMIZATION_OPTABS (optnode);
- if (!tmp_optabs)
- tmp_optabs = ggc_alloc<target_optabs> ();
- memset (tmp_optabs, 0, sizeof (struct target_optabs));
+ if (tmp_optabs)
+ memset (tmp_optabs, 0, sizeof (struct target_optabs));
+ else
+ tmp_optabs = ggc_cleared_alloc<target_optabs> ();
/* Generate a new set of optabs into tmp_optabs. */
init_all_optabs (tmp_optabs);
diff --git a/gcc/optabs-tree.h b/gcc/optabs-tree.h
index 03178fdd7a8..52e842bfb61 100644
--- a/gcc/optabs-tree.h
+++ b/gcc/optabs-tree.h
@@ -36,7 +36,6 @@ enum optab_subtype
the second argument. The third argument distinguishes between the types of
vector shifts and rotates. */
optab optab_for_tree_code (enum tree_code, const_tree, enum optab_subtype);
-bool strict_reduction_support (tree_code, tree);
bool supportable_convert_operation (enum tree_code, tree, tree, tree *,
enum tree_code *);
bool expand_vec_cmp_expr_p (tree, tree, enum tree_code);
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 7b8c0f60c99..9f5165fedbb 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -375,7 +375,7 @@ expand_vector_broadcast (machine_mode vmode, rtx op)
gcc_checking_assert (VECTOR_MODE_P (vmode));
- if (CONSTANT_P (op))
+ if (valid_for_const_vec_duplicate_p (vmode, op))
return gen_const_vec_duplicate (vmode, op);
insn_code icode = optab_handler (vec_duplicate_optab, vmode);
@@ -1401,7 +1401,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
shift_mask = targetm.shift_truncation_mask (word_mode);
op1_mode = (GET_MODE (op1) != VOIDmode
? as_a <scalar_int_mode> (GET_MODE (op1))
- : get_shift_amount_mode (word_mode));
+ : word_mode);
/* Apply the truncation to constant shifts. */
if (double_shift_mask > 0 && CONST_INT_P (op1))
diff --git a/gcc/optabs.def b/gcc/optabs.def
index b530c86d04a..58a2f3bd5f7 100644
--- a/gcc/optabs.def
+++ b/gcc/optabs.def
@@ -313,10 +313,11 @@ OPTAB_D (reduc_umin_scal_optab, "reduc_umin_scal_$a")
OPTAB_D (reduc_and_scal_optab, "reduc_and_scal_$a")
OPTAB_D (reduc_ior_scal_optab, "reduc_ior_scal_$a")
OPTAB_D (reduc_xor_scal_optab, "reduc_xor_scal_$a")
-OPTAB_D (strict_reduc_plus_scal_optab, "strict_reduc_plus_scal_$a")
+OPTAB_D (fold_left_plus_optab, "fold_left_plus_$a")
OPTAB_D (break_after_optab, "break_after_$a")
OPTAB_D (extract_last_optab, "extract_last_$a")
+OPTAB_D (fold_extract_last_optab, "fold_extract_last_$a")
OPTAB_D (sdot_prod_optab, "sdot_prod$I$a")
OPTAB_D (ssum_widen_optab, "widen_ssum$I$a3")
@@ -334,8 +335,8 @@ OPTAB_D (vec_perm_optab, "vec_perm$a")
OPTAB_D (vec_realign_load_optab, "vec_realign_load_$a")
OPTAB_D (vec_set_optab, "vec_set$a")
OPTAB_D (vec_shr_optab, "vec_shr_$a")
-OPTAB_D (vec_interleave_hi_optab, "vec_interleave_hi_$a")
OPTAB_D (vec_interleave_lo_optab, "vec_interleave_lo_$a")
+OPTAB_D (vec_interleave_hi_optab, "vec_interleave_hi_$a")
OPTAB_D (vec_extract_even_optab, "vec_extract_even_$a")
OPTAB_D (vec_extract_odd_optab, "vec_extract_odd_$a")
OPTAB_D (vec_reverse_optab, "vec_reverse_$a")
@@ -397,20 +398,17 @@ OPTAB_D (atomic_xor_optab, "atomic_xor$I$a")
OPTAB_D (get_thread_pointer_optab, "get_thread_pointer$I$a")
OPTAB_D (set_thread_pointer_optab, "set_thread_pointer$I$a")
-OPTAB_D (vec_gather_loads_optab, "vec_gather_loads$a")
-OPTAB_D (vec_mask_gather_loads_optab, "vec_mask_gather_loads$a")
-OPTAB_D (vec_gather_loadu_optab, "vec_gather_loadu$a")
-OPTAB_D (vec_mask_gather_loadu_optab, "vec_mask_gather_loadu$a")
-OPTAB_D (vec_scatter_stores_optab, "vec_scatter_stores$a")
-OPTAB_D (vec_mask_scatter_stores_optab, "vec_mask_scatter_stores$a")
-OPTAB_D (vec_scatter_storeu_optab, "vec_scatter_storeu$a")
-OPTAB_D (vec_mask_scatter_storeu_optab, "vec_mask_scatter_storeu$a")
+OPTAB_D (firstfault_load_optab, "firstfault_load$a")
+OPTAB_D (read_nf_optab, "read_nf$a")
+OPTAB_D (write_nf_optab, "write_nf$a")
+
+OPTAB_D (gather_load_optab, "gather_load$a")
+OPTAB_D (mask_gather_load_optab, "mask_gather_load$a")
+OPTAB_D (scatter_store_optab, "scatter_store$a")
+OPTAB_D (mask_scatter_store_optab, "mask_scatter_store$a")
+
OPTAB_DC (vec_duplicate_optab, "vec_duplicate$a", VEC_DUPLICATE)
OPTAB_DC (vec_series_optab, "vec_series$a", VEC_SERIES)
-OPTAB_D (clastb_optab, "clastb$a")
OPTAB_D (mask_popcount_optab, "mask_popcount$a")
-OPTAB_D (firstfault_load_optab, "firstfault_load$a")
-OPTAB_D (read_nf_optab, "read_nf$a")
-OPTAB_D (write_nf_optab, "write_nf$a")
OPTAB_D (vec_shl_insert_optab, "vec_shl_insert_$a")
diff --git a/gcc/passes.def b/gcc/passes.def
index 8a0740716fc..55226f7b751 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -289,6 +289,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_parallelize_loops, false /* oacc_kernels_p */);
NEXT_PASS (pass_expand_omp_ssa);
NEXT_PASS (pass_ch_vect);
+ NEXT_PASS (pass_early_predcom);
NEXT_PASS (pass_if_conversion);
/* pass_vectorize must immediately follow pass_if_conversion.
Please do not add any other passes in between. */
diff --git a/gcc/poly-int-types.h b/gcc/poly-int-types.h
index 1c62ff88ff7..d681c374fc1 100644
--- a/gcc/poly-int-types.h
+++ b/gcc/poly-int-types.h
@@ -60,6 +60,18 @@ typedef poly_int<NUM_POLY_INT_COEFFS, widest_int> poly_widest_int;
of bytes in size. */
#define num_trailing_bits(X) force_get_misalignment (X, BITS_PER_UNIT)
+/* Round bit quantity X down to the nearest byte boundary.
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define round_down_to_byte_boundary(X) force_align_down (X, BITS_PER_UNIT)
+
+/* Round bit quantity X up the nearest byte boundary.
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define round_up_to_byte_boundary(X) force_align_up (X, BITS_PER_UNIT)
+
/* Return the size of an element in a vector of size SIZE, given that
the vector has NELTS elements. The return value is in the same units
as SIZE (either bits or bytes).
diff --git a/gcc/poly-int.h b/gcc/poly-int.h
index 73c0efd47e4..a5b68188178 100644
--- a/gcc/poly-int.h
+++ b/gcc/poly-int.h
@@ -138,7 +138,7 @@ struct poly_coeff_pair_traits
#undef RANK
};
-/* SFINAE class that makes T3 available as t if T2 can represent all the
+/* SFINAE class that makes T3 available as "type" if T2 can represent all the
values in T1. */
template<typename T1, typename T2, typename T3,
bool lossless_p = poly_coeff_pair_traits<T1, T2>::lossless_p>
@@ -146,7 +146,7 @@ struct if_lossless;
template<typename T1, typename T2, typename T3>
struct if_lossless<T1, T2, T3, true>
{
- typedef T3 t;
+ typedef T3 type;
};
/* poly_int_traits<T> describes an integer type T that might be polynomial
@@ -184,7 +184,7 @@ struct poly_int_traits<poly_int<N, C> > : poly_int_traits<poly_int_pod<N, C> >
{
};
-/* SFINAE class that makes T2 available as t if T1 is a non-polynomial
+/* SFINAE class that makes T2 available as "type" if T1 is a non-polynomial
type. */
template<typename T1, typename T2 = T1,
bool is_poly = poly_int_traits<T1>::is_poly>
@@ -192,10 +192,10 @@ struct if_nonpoly {};
template<typename T1, typename T2>
struct if_nonpoly<T1, T2, false>
{
- typedef T2 t;
+ typedef T2 type;
};
-/* SFINAE class that makes T3 available as t if both T1 and T2 are
+/* SFINAE class that makes T3 available as "type" if both T1 and T2 are
non-polynomial types. */
template<typename T1, typename T2, typename T3,
bool is_poly1 = poly_int_traits<T1>::is_poly,
@@ -204,23 +204,24 @@ struct if_nonpoly2 {};
template<typename T1, typename T2, typename T3>
struct if_nonpoly2<T1, T2, T3, false, false>
{
- typedef T3 t;
+ typedef T3 type;
};
-/* SFINAE class that makes T2 available as t if T1 is a polynomial type. */
+/* SFINAE class that makes T2 available as "type" if T1 is a polynomial
+ type. */
template<typename T1, typename T2 = T1,
bool is_poly = poly_int_traits<T1>::is_poly>
struct if_poly {};
template<typename T1, typename T2>
struct if_poly<T1, T2, true>
{
- typedef T2 t;
+ typedef T2 type;
};
/* poly_result<T1, T2> describes the result of an operation on two
types T1 and T2, where at least one of the types is polynomial:
- - poly_result<T1, T2>::t gives the result type for the operation.
+ - poly_result<T1, T2>::type gives the result type for the operation.
The intention is to provide normal C-like rules for integer ranks,
except that everything smaller than HOST_WIDE_INT promotes to
HOST_WIDE_INT.
@@ -228,7 +229,7 @@ struct if_poly<T1, T2, true>
- poly_result<T1, T2>::cast is the type to which an operand of type
T1 should be cast before doing the operation, to ensure that
the operation is done at the right precision. Casting to
- poly_result<T1, T2>::t would also work, but casting to this
+ poly_result<T1, T2>::type would also work, but casting to this
type is more efficient. */
template<typename T1, typename T2 = T1,
int result_kind = poly_coeff_pair_traits<T1, T2>::result_kind>
@@ -238,27 +239,27 @@ struct poly_result;
template<typename T1, typename T2>
struct poly_result<T1, T2, 0>
{
- typedef HOST_WIDE_INT t;
+ typedef HOST_WIDE_INT type;
/* T1 and T2 are primitive types, so cast values to T before operating
on them. */
- typedef t cast;
+ typedef type cast;
};
/* Promote pair to unsigned HOST_WIDE_INT. */
template<typename T1, typename T2>
struct poly_result<T1, T2, 1>
{
- typedef unsigned HOST_WIDE_INT t;
+ typedef unsigned HOST_WIDE_INT type;
/* T1 and T2 are primitive types, so cast values to T before operating
on them. */
- typedef t cast;
+ typedef type cast;
};
/* Use normal wide-int rules. */
template<typename T1, typename T2>
struct poly_result<T1, T2, 2>
{
- typedef WI_BINARY_RESULT (T1, T2) t;
+ typedef WI_BINARY_RESULT (T1, T2) type;
/* Don't cast values before operating on them; leave the wi:: routines
to handle promotion as necessary. */
typedef const T1 &cast;
@@ -267,18 +268,18 @@ struct poly_result<T1, T2, 2>
/* The coefficient type for the result of a binary operation on two
poly_ints, the first of which has coefficients of type C1 and the
second of which has coefficients of type C2. */
-#define POLY_POLY_COEFF(C1, C2) typename poly_result<C1, C2>::t
+#define POLY_POLY_COEFF(C1, C2) typename poly_result<C1, C2>::type
/* Enforce that T2 is non-polynomial and provide the cofficient type of
the result of a binary operation in which the first operand is a
poly_int with coefficients of type C1 and the second operand is
a constant of type T2. */
#define POLY_CONST_COEFF(C1, T2) \
- POLY_POLY_COEFF (C1, typename if_nonpoly<T2>::t)
+ POLY_POLY_COEFF (C1, typename if_nonpoly<T2>::type)
/* Likewise in reverse. */
#define CONST_POLY_COEFF(T1, C2) \
- POLY_POLY_COEFF (typename if_nonpoly<T1>::t, C2)
+ POLY_POLY_COEFF (typename if_nonpoly<T1>::type, C2)
/* The result type for a binary operation on poly_int<N, C1> and
poly_int<N, C2>. */
@@ -295,7 +296,8 @@ struct poly_result<T1, T2, 2>
/* Enforce that T1 and T2 are non-polynomial and provide the result type
for a binary operation on T1 and T2. */
#define CONST_CONST_RESULT(N, T1, T2) \
- POLY_POLY_COEFF (typename if_nonpoly<T1>::t, typename if_nonpoly<T2>::t)
+ POLY_POLY_COEFF (typename if_nonpoly<T1>::type, \
+ typename if_nonpoly<T2>::type)
/* The type to which a coefficient of type C1 should be cast before
using it in a binary operation with a coefficient of type C2. */
@@ -305,7 +307,7 @@ struct poly_result<T1, T2, 2>
and T2 can be polynomial or non-polynomial. */
#define POLY_BINARY_COEFF(T1, T2) \
typename poly_result<typename poly_int_traits<T1>::coeff_type, \
- typename poly_int_traits<T2>::coeff_type>::t
+ typename poly_int_traits<T2>::coeff_type>::type
/* The type to which an integer constant should be cast before
comparing it with T. */
@@ -339,27 +341,27 @@ public:
template<typename Ca>
poly_int_pod &operator = (const poly_int_pod<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::t &operator = (const Ca &);
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator = (const Ca &);
template<typename Ca>
poly_int_pod &operator += (const poly_int_pod<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::t &operator += (const Ca &);
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator += (const Ca &);
template<typename Ca>
poly_int_pod &operator -= (const poly_int_pod<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::t &operator -= (const Ca &);
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator -= (const Ca &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::t &operator *= (const Ca &);
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator *= (const Ca &);
poly_int_pod &operator <<= (unsigned int);
bool is_constant () const;
template<typename T>
- typename if_lossless<T, C, bool>::t is_constant (T *) const;
+ typename if_lossless<T, C, bool>::type is_constant (T *) const;
C to_constant () const;
@@ -386,16 +388,14 @@ template<typename Ca>
inline poly_int_pod<N, C>&
poly_int_pod<N, C>::operator = (const poly_int_pod<N, Ca> &a)
{
- POLY_SET_COEFF (C, *this, 0, a.coeffs[0]);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
return *this;
}
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
poly_int_pod<N, C>::operator = (const Ca &a)
{
POLY_SET_COEFF (C, *this, 0, a);
@@ -410,16 +410,14 @@ template<typename Ca>
inline poly_int_pod<N, C>&
poly_int_pod<N, C>::operator += (const poly_int_pod<N, Ca> &a)
{
- this->coeffs[0] += a.coeffs[0];
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] += a.coeffs[i];
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] += a.coeffs[i];
return *this;
}
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
poly_int_pod<N, C>::operator += (const Ca &a)
{
this->coeffs[0] += a;
@@ -431,16 +429,14 @@ template<typename Ca>
inline poly_int_pod<N, C>&
poly_int_pod<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
{
- this->coeffs[0] -= a.coeffs[0];
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] -= a.coeffs[i];
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] -= a.coeffs[i];
return *this;
}
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
poly_int_pod<N, C>::operator -= (const Ca &a)
{
this->coeffs[0] -= a;
@@ -449,13 +445,11 @@ poly_int_pod<N, C>::operator -= (const Ca &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
poly_int_pod<N, C>::operator *= (const Ca &a)
{
- this->coeffs[0] *= a;
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] *= a;
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] *= a;
return *this;
}
@@ -463,10 +457,8 @@ template<unsigned int N, typename C>
inline poly_int_pod<N, C>&
poly_int_pod<N, C>::operator <<= (unsigned int a)
{
- POLY_SET_COEFF (C, *this, 0, this->coeffs[0] << a);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, *this, i, this->coeffs[i] << a);
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] <<= a;
return *this;
}
@@ -488,7 +480,7 @@ poly_int_pod<N, C>::is_constant () const
template<unsigned int N, typename C>
template<typename T>
-inline typename if_lossless<T, C, bool>::t
+inline typename if_lossless<T, C, bool>::type
poly_int_pod<N, C>::is_constant (T *const_value) const
{
if (is_constant ())
@@ -634,20 +626,20 @@ public:
template<typename Ca>
poly_int &operator = (const poly_int_pod<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::t &operator = (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator = (const Ca &);
template<typename Ca>
poly_int &operator += (const poly_int_pod<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::t &operator += (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator += (const Ca &);
template<typename Ca>
poly_int &operator -= (const poly_int_pod<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::t &operator -= (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator -= (const Ca &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::t &operator *= (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator *= (const Ca &);
poly_int &operator <<= (unsigned int);
};
@@ -697,16 +689,14 @@ template<typename Ca>
inline poly_int<N, C>&
poly_int<N, C>::operator = (const poly_int_pod<N, Ca> &a)
{
- this->coeffs[0] = a.coeffs[0];
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] = a.coeffs[i];
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] = a.coeffs[i];
return *this;
}
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
poly_int<N, C>::operator = (const Ca &a)
{
this->coeffs[0] = a;
@@ -721,16 +711,14 @@ template<typename Ca>
inline poly_int<N, C>&
poly_int<N, C>::operator += (const poly_int_pod<N, Ca> &a)
{
- this->coeffs[0] += a.coeffs[0];
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] += a.coeffs[i];
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] += a.coeffs[i];
return *this;
}
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
poly_int<N, C>::operator += (const Ca &a)
{
this->coeffs[0] += a;
@@ -742,16 +730,14 @@ template<typename Ca>
inline poly_int<N, C>&
poly_int<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
{
- this->coeffs[0] -= a.coeffs[0];
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] -= a.coeffs[i];
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] -= a.coeffs[i];
return *this;
}
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
poly_int<N, C>::operator -= (const Ca &a)
{
this->coeffs[0] -= a;
@@ -760,13 +746,11 @@ poly_int<N, C>::operator -= (const Ca &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::t &
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
poly_int<N, C>::operator *= (const Ca &a)
{
- this->coeffs[0] *= a;
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] *= a;
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] *= a;
return *this;
}
@@ -774,24 +758,22 @@ template<unsigned int N, typename C>
inline poly_int<N, C>&
poly_int<N, C>::operator <<= (unsigned int a)
{
- this->coeffs[0] = this->coeffs[0] << a;
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] = this->coeffs[i] << a;
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] <<= a;
return *this;
}
/* Return true if every coefficient of A is in the inclusive range [B, C]. */
template<typename Ca, typename Cb, typename Cc>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
coeffs_in_range_p (const Ca &a, const Cb &b, const Cc &c)
{
return a >= b && a <= c;
}
template<unsigned int N, typename Ca, typename Cb, typename Cc>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
coeffs_in_range_p (const poly_int_pod<N, Ca> &a, const Cb &b, const Cc &c)
{
for (unsigned int i = 0; i < N; i++)
@@ -859,10 +841,8 @@ operator + (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_POLY_COEFF (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, NCa (a.coeffs[0]) + b.coeffs[0]);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) + b.coeffs[i]);
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) + b.coeffs[i]);
return r;
}
@@ -960,10 +940,8 @@ operator - (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_POLY_COEFF (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, NCa (a.coeffs[0]) - b.coeffs[0]);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) - b.coeffs[i]);
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) - b.coeffs[i]);
return r;
}
@@ -1061,10 +1039,8 @@ operator - (const poly_int_pod<N, Ca> &a)
typedef POLY_CAST (Ca, Ca) NCa;
typedef POLY_POLY_COEFF (Ca, Ca) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, -NCa (a.coeffs[0]));
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, r, i, -NCa (a.coeffs[i]));
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, -NCa (a.coeffs[i]));
return r;
}
@@ -1115,10 +1091,8 @@ operator * (const poly_int_pod<N, Ca> &a, const Cb &b)
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CONST_COEFF (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, NCa (a.coeffs[0]) * b);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) * b);
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) * b);
return r;
}
@@ -1129,10 +1103,8 @@ operator * (const Ca &a, const poly_int_pod<N, Cb> &b)
typedef POLY_CAST (Ca, Cb) NCa;
typedef CONST_POLY_COEFF (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, NCa (a) * b.coeffs[0]);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, r, i, NCa (a) * b.coeffs[i]);
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a) * b.coeffs[i]);
return r;
}
@@ -1186,10 +1158,8 @@ operator << (const poly_int_pod<N, Ca> &a, const Cb &b)
typedef POLY_CAST (Ca, Ca) NCa;
typedef POLY_POLY_COEFF (Ca, Ca) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, NCa (a.coeffs[0]) << b);
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) << b);
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) << b);
return r;
}
@@ -1262,7 +1232,7 @@ may_eq (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
may_eq (const poly_int_pod<N, Ca> &a, const Cb &b)
{
STATIC_ASSERT (N <= 2);
@@ -1272,7 +1242,7 @@ may_eq (const poly_int_pod<N, Ca> &a, const Cb &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
may_eq (const Ca &a, const poly_int_pod<N, Cb> &b)
{
STATIC_ASSERT (N <= 2);
@@ -1282,7 +1252,7 @@ may_eq (const Ca &a, const poly_int_pod<N, Cb> &b)
}
template<typename Ca, typename Cb>
-inline typename if_nonpoly2<Ca, Cb, bool>::t
+inline typename if_nonpoly2<Ca, Cb, bool>::type
may_eq (const Ca &a, const Cb &b)
{
return a == b;
@@ -1302,7 +1272,7 @@ may_ne (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
may_ne (const poly_int_pod<N, Ca> &a, const Cb &b)
{
if (N >= 2)
@@ -1313,7 +1283,7 @@ may_ne (const poly_int_pod<N, Ca> &a, const Cb &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
may_ne (const Ca &a, const poly_int_pod<N, Cb> &b)
{
if (N >= 2)
@@ -1324,7 +1294,7 @@ may_ne (const Ca &a, const poly_int_pod<N, Cb> &b)
}
template<typename Ca, typename Cb>
-inline typename if_nonpoly2<Ca, Cb, bool>::t
+inline typename if_nonpoly2<Ca, Cb, bool>::type
may_ne (const Ca &a, const Cb &b)
{
return a != b;
@@ -1351,7 +1321,7 @@ may_le (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
may_le (const poly_int_pod<N, Ca> &a, const Cb &b)
{
if (N >= 2)
@@ -1362,7 +1332,7 @@ may_le (const poly_int_pod<N, Ca> &a, const Cb &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
may_le (const Ca &a, const poly_int_pod<N, Cb> &b)
{
if (N >= 2)
@@ -1373,7 +1343,7 @@ may_le (const Ca &a, const poly_int_pod<N, Cb> &b)
}
template<typename Ca, typename Cb>
-inline typename if_nonpoly2<Ca, Cb, bool>::t
+inline typename if_nonpoly2<Ca, Cb, bool>::type
may_le (const Ca &a, const Cb &b)
{
return a <= b;
@@ -1393,7 +1363,7 @@ may_lt (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
may_lt (const poly_int_pod<N, Ca> &a, const Cb &b)
{
if (N >= 2)
@@ -1404,7 +1374,7 @@ may_lt (const poly_int_pod<N, Ca> &a, const Cb &b)
}
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
may_lt (const Ca &a, const poly_int_pod<N, Cb> &b)
{
if (N >= 2)
@@ -1415,7 +1385,7 @@ may_lt (const Ca &a, const poly_int_pod<N, Cb> &b)
}
template<typename Ca, typename Cb>
-inline typename if_nonpoly2<Ca, Cb, bool>::t
+inline typename if_nonpoly2<Ca, Cb, bool>::type
may_lt (const Ca &a, const Cb &b)
{
return a < b;
@@ -1975,7 +1945,7 @@ known_alignment (const poly_int_pod<N, Ca> &a)
result in RES if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cr>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
can_ior_p (const poly_int_pod<N, Ca> &a, Cb b, Cr *result)
{
/* Coefficients 1 and above must be a multiple of something greater
@@ -1994,7 +1964,7 @@ can_ior_p (const poly_int_pod<N, Ca> &a, Cb b, Cr *result)
multiple in *MULTIPLE if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cm>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
@@ -2009,7 +1979,7 @@ constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b, Cm *multiple)
}
template<unsigned int N, typename Ca, typename Cb, typename Cm>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
@@ -2054,7 +2024,7 @@ constant_multiple_p (const poly_int_pod<N, Ca> &a,
/* Return true if A is a multiple of B. */
template<typename Ca, typename Cb>
-inline typename if_nonpoly2<Ca, Cb, bool>::t
+inline typename if_nonpoly2<Ca, Cb, bool>::type
multiple_p (Ca a, Cb b)
{
return a % b != 0;
@@ -2063,7 +2033,7 @@ multiple_p (Ca a, Cb b)
/* Return true if A is a (polynomial) multiple of B. */
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
{
for (unsigned int i = 0; i < N; ++i)
@@ -2075,7 +2045,7 @@ multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
/* Return true if A is a (constant) multiple of B. */
template<unsigned int N, typename Ca, typename Cb>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
{
typedef POLY_INT_TYPE (Ca) int_type;
@@ -2102,7 +2072,7 @@ multiple_p (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
multiple in *MULTIPLE if so. */
template<typename Ca, typename Cb, typename Cm>
-inline typename if_nonpoly2<Ca, Cb, bool>::t
+inline typename if_nonpoly2<Ca, Cb, bool>::type
multiple_p (Ca a, Cb b, Cm *multiple)
{
if (a % b != 0)
@@ -2115,7 +2085,7 @@ multiple_p (Ca a, Cb b, Cm *multiple)
multiple in *MULTIPLE if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cm>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
multiple_p (const poly_int_pod<N, Ca> &a, Cb b, poly_int_pod<N, Cm> *multiple)
{
if (!multiple_p (a, b))
@@ -2129,7 +2099,7 @@ multiple_p (const poly_int_pod<N, Ca> &a, Cb b, poly_int_pod<N, Cm> *multiple)
storing the multiple in *MULTIPLE if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cm>
-inline typename if_nonpoly<Ca, bool>::t
+inline typename if_nonpoly<Ca, bool>::type
multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
@@ -2206,7 +2176,7 @@ exact_div (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
Store the value Q in *QUOTIENT if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cq>
-inline typename if_nonpoly2<Cb, Cq, bool>::t
+inline typename if_nonpoly2<Cb, Cq, bool>::type
can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b, Cq *quotient)
{
typedef POLY_CAST (Ca, Cb) NCa;
@@ -2222,7 +2192,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b, Cq *quotient)
}
template<unsigned int N, typename Ca, typename Cb, typename Cq>
-inline typename if_nonpoly<Cq, bool>::t
+inline typename if_nonpoly<Cq, bool>::type
can_div_trunc_p (const poly_int_pod<N, Ca> &a,
const poly_int_pod<N, Cb> &b,
Cq *quotient)
@@ -2330,7 +2300,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a,
/* Likewise, but also store r in *REMAINDER. */
template<unsigned int N, typename Ca, typename Cb, typename Cq, typename Cr>
-inline typename if_nonpoly<Cq, bool>::t
+inline typename if_nonpoly<Cq, bool>::type
can_div_trunc_p (const poly_int_pod<N, Ca> &a,
const poly_int_pod<N, Cb> &b,
Cq *quotient, Cr *remainder)
@@ -2350,7 +2320,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a,
Store the value q in *QUOTIENT if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cq>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
poly_int_pod<N, Cq> *quotient)
{
@@ -2366,7 +2336,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
/* Likewise, but also store R in *REMAINDER. */
template<unsigned int N, typename Ca, typename Cb, typename Cq, typename Cr>
-inline typename if_nonpoly<Cb, bool>::t
+inline typename if_nonpoly<Cb, bool>::type
can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
poly_int_pod<N, Cq> *quotient, Cr *remainder)
{
@@ -2385,7 +2355,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
Store the value Q in *QUOTIENT if so. */
template<unsigned int N, typename Ca, typename Cb, typename Cq>
-inline typename if_nonpoly<Cq, bool>::t
+inline typename if_nonpoly<Cq, bool>::type
can_div_away_from_zero_p (const poly_int_pod<N, Ca> &a,
const poly_int_pod<N, Cb> &b,
Cq *quotient)
@@ -2455,7 +2425,7 @@ template<typename T1, typename T2, typename T3>
struct poly_span_traits<T1, T2, T3, HOST_WIDE_INT, unsigned HOST_WIDE_INT>
{
template<typename T>
- static typename if_nonpoly<T, unsigned HOST_WIDE_INT>::t
+ static typename if_nonpoly<T, unsigned HOST_WIDE_INT>::type
cast (const T &x) { return x; }
template<unsigned int N, typename T>
@@ -2580,7 +2550,7 @@ known_subrange_p (const T1 &pos1, const T2 &size1,
range open-ended. */
template<typename T>
-inline typename if_nonpoly<T, bool>::t
+inline typename if_nonpoly<T, bool>::type
endpoint_representable_p (const T &pos, const T &size)
{
return (!known_size_p (size)
diff --git a/gcc/predict.c b/gcc/predict.c
index cf42ccbd903..f490ec116ad 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -121,32 +121,6 @@ static const struct predictor_info predictor_info[]= {
};
#undef DEF_PREDICTOR
-/* Return TRUE if frequency FREQ is considered to be hot. */
-
-static inline bool
-maybe_hot_frequency_p (struct function *fun, int freq)
-{
- struct cgraph_node *node = cgraph_node::get (fun->decl);
- if (!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
- {
- if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
- return false;
- if (node->frequency == NODE_FREQUENCY_HOT)
- return true;
- }
- if (profile_status_for_fn (fun) == PROFILE_ABSENT)
- return true;
- if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
- && freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.to_frequency (cfun) * 2 / 3))
- return false;
- if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
- return false;
- if (freq * PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)
- < ENTRY_BLOCK_PTR_FOR_FN (fun)->count.to_frequency (cfun))
- return false;
- return true;
-}
-
static gcov_type min_count = -1;
/* Determine the threshold for hot BB counts. */
@@ -179,10 +153,30 @@ maybe_hot_count_p (struct function *fun, profile_count count)
{
if (!count.initialized_p ())
return true;
- if (!count.ipa_p ())
- return maybe_hot_frequency_p (fun, count.to_frequency (fun));
if (count.ipa () == profile_count::zero ())
return false;
+ if (!count.ipa_p ())
+ {
+ struct cgraph_node *node = cgraph_node::get (fun->decl);
+ if (!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
+ {
+ if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
+ return false;
+ if (node->frequency == NODE_FREQUENCY_HOT)
+ return true;
+ }
+ if (profile_status_for_fn (fun) == PROFILE_ABSENT)
+ return true;
+ if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
+ && count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
+ return false;
+ if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
+ return false;
+ if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
+ < ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
+ return false;
+ return true;
+ }
/* Code executed at most once is not hot. */
if (count <= MAX (profile_info ? profile_info->runs : 1, 1))
return false;
@@ -3222,11 +3216,10 @@ drop_profile (struct cgraph_node *node, profile_count call_count)
pop_cfun ();
struct cgraph_edge *e;
- for (e = node->callees; e; e = e->next_caller)
- {
- e->frequency = compute_call_stmt_bb_frequency (e->caller->decl,
- gimple_bb (e->call_stmt));
- }
+ for (e = node->callees; e; e = e->next_callee)
+ e->count = gimple_bb (e->call_stmt)->count;
+ for (e = node->indirect_calls; e; e = e->next_callee)
+ e->count = gimple_bb (e->call_stmt)->count;
profile_status_for_fn (fn)
= (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
@@ -3318,18 +3311,17 @@ handle_missing_profiles (void)
Return nonzero iff there was any nonzero execution count. */
bool
-counts_to_freqs (void)
+update_max_bb_count (void)
{
profile_count true_count_max = profile_count::uninitialized ();
basic_block bb;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
- if (!(bb->count < true_count_max))
- true_count_max = true_count_max.max (bb->count);
+ true_count_max = true_count_max.max (bb->count);
cfun->cfg->count_max = true_count_max;
- return true_count_max.nonzero_p ();
+ return true_count_max.ipa ().nonzero_p ();
}
/* Return true if function is likely to be expensive, so there is no point to
@@ -3340,30 +3332,37 @@ counts_to_freqs (void)
bool
expensive_function_p (int threshold)
{
- unsigned int sum = 0;
basic_block bb;
- unsigned int limit;
/* We can not compute accurately for large thresholds due to scaled
frequencies. */
gcc_assert (threshold <= BB_FREQ_MAX);
- /* Frequencies are out of range. This either means that function contains
- internal loop executing more than BB_FREQ_MAX times or profile feedback
- is available and function has not been executed at all. */
- if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun) == 0)
+ /* If profile was scaled in a way entry block has count 0, then the function
+ is deifnitly taking a lot of time. */
+ if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.nonzero_p ())
return true;
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
- limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun) * threshold;
+ profile_count limit = ENTRY_BLOCK_PTR_FOR_FN
+ (cfun)->count.apply_scale (threshold, 1);
+ profile_count sum = profile_count::zero ();
FOR_EACH_BB_FN (bb, cfun)
{
rtx_insn *insn;
+ if (!bb->count.initialized_p ())
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function is considered expensive because"
+ " count of bb %i is not initialized\n", bb->index);
+ return true;
+ }
+
FOR_BB_INSNS (bb, insn)
if (active_insn_p (insn))
{
- sum += bb->count.to_frequency (cfun);
+ sum += bb->count;
if (sum > limit)
return true;
}
@@ -3523,7 +3522,7 @@ estimate_bb_frequencies (bool force)
determine_unlikely_bbs ();
if (force || profile_status_for_fn (cfun) != PROFILE_READ
- || !counts_to_freqs ())
+ || !update_max_bb_count ())
{
static int real_values_initialized = 0;
@@ -3875,7 +3874,7 @@ rebuild_frequencies (void)
loop_optimizer_finalize ();
}
else if (profile_status_for_fn (cfun) == PROFILE_READ)
- counts_to_freqs ();
+ update_max_bb_count ();
else
gcc_unreachable ();
timevar_pop (TV_REBUILD_FREQUENCIES);
diff --git a/gcc/predict.h b/gcc/predict.h
index 1b73ae28a49..24c604f5abf 100644
--- a/gcc/predict.h
+++ b/gcc/predict.h
@@ -89,7 +89,7 @@ extern void guess_outgoing_edge_probabilities (basic_block);
extern void tree_guess_outgoing_edge_probabilities (basic_block);
extern void tree_estimate_probability (bool);
extern void handle_missing_profiles (void);
-extern bool counts_to_freqs (void);
+extern bool update_max_bb_count (void);
extern bool expensive_function_p (int);
extern void estimate_bb_frequencies (bool);
extern void compute_function_frequency (void);
diff --git a/gcc/prefix.c b/gcc/prefix.c
index b40e9c48a0d..ae098589279 100644
--- a/gcc/prefix.c
+++ b/gcc/prefix.c
@@ -199,7 +199,7 @@ translate_name (char *name)
;
key = (char *) alloca (keylen + 1);
- strncpy (key, &name[1], keylen);
+ memcpy (key, &name[1], keylen);
key[keylen] = 0;
if (code == '@')
diff --git a/gcc/profile-count.c b/gcc/profile-count.c
index d7031404645..51c3b74fefa 100644
--- a/gcc/profile-count.c
+++ b/gcc/profile-count.c
@@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
#include "data-streamer.h"
#include "cgraph.h"
#include "wide-int.h"
+#include "sreal.h"
/* Dump THIS to F. */
@@ -255,3 +256,55 @@ profile_count::to_cgraph_frequency (profile_count entry_bb_count) const
return CGRAPH_FREQ_MAX;
return MIN (scale, CGRAPH_FREQ_MAX);
}
+
+/* Return THIS/IN as sreal value. */
+
+sreal
+profile_count::to_sreal_scale (profile_count in, bool *known) const
+{
+ if (!initialized_p ())
+ {
+ if (known)
+ *known = false;
+ return CGRAPH_FREQ_BASE;
+ }
+ if (known)
+ *known = true;
+ if (*this == profile_count::zero ())
+ return 0;
+ gcc_checking_assert (in.initialized_p ());
+
+ if (!in.m_val)
+ {
+ if (!m_val)
+ return 1;
+ return m_val * 4;
+ }
+ return (sreal)m_val / (sreal)in.m_val;
+}
+
+/* We want to scale profile across function boundary from NUM to DEN.
+ Take care of the side case when DEN is zeros. We still want to behave
+ sanely here which means
+ - scale to profile_count::zero () if NUM is profile_count::zero
+ - do not affect anything if NUM == DEN
+ - preserve counter value but adjust quality in other cases. */
+
+void
+profile_count::adjust_for_ipa_scaling (profile_count *num,
+ profile_count *den)
+{
+ /* Scaling is no-op if NUM and DEN are the same. */
+ if (*num == *den)
+ return;
+ /* Scaling to zero is always zeor. */
+ if (*num == profile_count::zero ())
+ return;
+ /* If den is non-zero we are safe. */
+ if (den->force_nonzero () == *den)
+ return;
+ /* Force both to non-zero so we do not push profiles to 0 when
+ both num == 0 and den == 0. */
+ *den = den->force_nonzero ();
+ *num = num->force_nonzero ();
+}
diff --git a/gcc/profile-count.h b/gcc/profile-count.h
index d793d11c830..90d1bc747ee 100644
--- a/gcc/profile-count.h
+++ b/gcc/profile-count.h
@@ -601,6 +601,8 @@ public:
*/
+class sreal;
+
class GTY(()) profile_count
{
/* Use 62bit to hold basic block counters. Should be at least
@@ -949,9 +951,9 @@ public:
return num;
if (!initialized_p () || !num.initialized_p () || !den.initialized_p ())
return profile_count::uninitialized ();
- gcc_checking_assert (den.m_val);
if (num == den)
return *this;
+ gcc_checking_assert (den.m_val);
profile_count ret;
uint64_t val;
@@ -1034,6 +1036,7 @@ public:
int to_frequency (struct function *fun) const;
int to_cgraph_frequency (profile_count entry_bb_count) const;
+ sreal to_sreal_scale (profile_count in, bool *known = NULL) const;
/* Output THIS to F. */
void dump (FILE *f) const;
@@ -1044,6 +1047,11 @@ public:
/* Return true if THIS is known to differ significantly from OTHER. */
bool differs_from_p (profile_count other) const;
+ /* We want to scale profile across function boundary from NUM to DEN.
+ Take care of the side case when NUM and DEN are zeros of incompatible
+ kinds. */
+ static void adjust_for_ipa_scaling (profile_count *num, profile_count *den);
+
/* LTO streaming support. */
static profile_count stream_in (struct lto_input_block *);
void stream_out (struct output_block *);
diff --git a/gcc/profile.c b/gcc/profile.c
index 2b30a9e6754..11170066cae 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -497,7 +497,11 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
/* Very simple sanity checks so we catch bugs in our profiling code. */
if (!profile_info)
- return;
+ {
+ if (dump_file)
+ fprintf (dump_file, "Profile info is missing; giving up\n");
+ return;
+ }
bb_gcov_counts.safe_grow_cleared (last_basic_block_for_fn (cfun));
edge_gcov_counts = new hash_map<edge,gcov_type>;
@@ -805,7 +809,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
delete edge_gcov_counts;
edge_gcov_counts = NULL;
- counts_to_freqs ();
+ update_max_bb_count ();
if (dump_file)
{
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index 83fc4762671..4f67a7bfa50 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -2954,11 +2954,6 @@ better_edge (edge e1, edge e2)
if (!e1)
return e2;
- if (EDGE_FREQUENCY (e1) > EDGE_FREQUENCY (e2))
- return e1;
- if (EDGE_FREQUENCY (e1) < EDGE_FREQUENCY (e2))
- return e2;
-
if (e1->count () > e2->count ())
return e1;
if (e1->count () < e2->count ())
diff --git a/gcc/sbitmap.h b/gcc/sbitmap.h
index a5ff0685e43..3c58bf01fdc 100644
--- a/gcc/sbitmap.h
+++ b/gcc/sbitmap.h
@@ -171,8 +171,6 @@ static inline void
bmp_iter_set_init (sbitmap_iterator *i, const_sbitmap bmp,
unsigned int min, unsigned *bit_no ATTRIBUTE_UNUSED)
{
- bitmap_check_index (bmp, min);
-
i->word_num = min / (unsigned int) SBITMAP_ELT_BITS;
i->bit_num = min;
i->size = bmp->size;
diff --git a/gcc/shrink-wrap.c b/gcc/shrink-wrap.c
index 0e4ff6cd46a..ce2ddfc09f7 100644
--- a/gcc/shrink-wrap.c
+++ b/gcc/shrink-wrap.c
@@ -880,19 +880,18 @@ try_shrink_wrapping (edge *entry_edge, rtx_insn *prologue_seq)
the correct answer for reducible flow graphs; for irreducible flow graphs
our profile is messed up beyond repair anyway. */
- gcov_type num = 0;
- gcov_type den = 0;
+ profile_count num = profile_count::zero ();
+ profile_count den = profile_count::zero ();
FOR_EACH_EDGE (e, ei, pro->preds)
if (!dominated_by_p (CDI_DOMINATORS, e->src, pro))
{
- num += EDGE_FREQUENCY (e);
- den += e->src->count.to_frequency (cfun);
+ if (e->count ().initialized_p ())
+ num += e->count ();
+ if (e->src->count.initialized_p ())
+ den += e->src->count;
}
- if (den == 0)
- den = 1;
-
/* All is okay, so do it. */
crtl->shrink_wrapped = true;
@@ -919,8 +918,9 @@ try_shrink_wrapping (edge *entry_edge, rtx_insn *prologue_seq)
if (dump_file)
fprintf (dump_file, "Duplicated %d to %d\n", bb->index, dup->index);
-
- bb->count = bb->count.apply_scale (num, den);
+
+ if (num == profile_count::zero () || den.nonzero_p ())
+ bb->count = bb->count.apply_scale (num, den);
dup->count -= bb->count;
}
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 212b5068cd0..a56ee29c11f 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -5836,6 +5836,59 @@ simplify_ternary_operation (enum rtx_code code, machine_mode mode,
return op1;
}
}
+ /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
+ (const_int N))
+ with (vec_concat (X) (B)) if N == 1 or
+ (vec_concat (A) (X)) if N == 2. */
+ if (GET_CODE (op0) == VEC_DUPLICATE
+ && GET_CODE (op1) == CONST_VECTOR
+ && CONST_VECTOR_NUNITS (op1) == 2
+ && must_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
+ && IN_RANGE (sel, 1, 2))
+ {
+ rtx newop0 = XEXP (op0, 0);
+ rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
+ if (sel == 2)
+ std::swap (newop0, newop1);
+ return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
+ }
+ /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
+ with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
+ Only applies for vectors of two elements. */
+ if (GET_CODE (op0) == VEC_DUPLICATE
+ && GET_CODE (op1) == VEC_CONCAT
+ && must_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
+ && must_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
+ && IN_RANGE (sel, 1, 2))
+ {
+ rtx newop0 = XEXP (op0, 0);
+ rtx newop1 = XEXP (op1, 2 - sel);
+ rtx otherop = XEXP (op1, sel - 1);
+ if (sel == 2)
+ std::swap (newop0, newop1);
+ /* Don't want to throw away the other part of the vec_concat if
+ it has side-effects. */
+ if (!side_effects_p (otherop))
+ return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
+ }
+
+ /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
+ (const_int n))
+ with (vec_concat x y) or (vec_concat y x) depending on value
+ of N. */
+ if (GET_CODE (op0) == VEC_DUPLICATE
+ && GET_CODE (op1) == VEC_DUPLICATE
+ && must_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
+ && must_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
+ && IN_RANGE (sel, 1, 2))
+ {
+ rtx newop0 = XEXP (op0, 0);
+ rtx newop1 = XEXP (op1, 0);
+ if (sel == 2)
+ std::swap (newop0, newop1);
+
+ return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
+ }
}
if (rtx_equal_p (op0, op1)
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index bd7d44471d8..008fd736abd 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-inline.h"
#include "dumpfile.h"
#include "gimplify.h"
+#include "attribs.h"
#include "debug.h"
/* Data type for the expressions representing sizes of data types.
@@ -1132,7 +1133,7 @@ handle_warn_if_not_align (tree field, unsigned int record_align)
if (!warn_if_not_align
&& warn_packed_not_aligned
- && TYPE_USER_ALIGN (type))
+ && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type)))
{
warn_if_not_align = TYPE_ALIGN (type);
opt_w = OPT_Wpacked_not_aligned;
diff --git a/gcc/target.def b/gcc/target.def
index 6593df14ee0..7347c67e87e 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -3415,14 +3415,15 @@ DEFHOOK
(array_mode,
"Return the mode that GCC should use for an array that has\n\
@var{nelems} elements, with each element having mode @var{mode}.\n\
-Return @code{BLKmode} if an integer mode of the appropriate size should\n\
-be used; it is the caller's reponsibility to find such a mode. Usually the\n\
-search for the integer mode is limited to @code{MAX_FIXED_MODE_SIZE},\n\
-but the @code{TARGET_ARRAY_MODE_SUPPORTED_P} hook allows a larger\n\
-mode to be used in specific cases.\n\
+Return no mode if the target has no special requirements. In the\n\
+latter case, GCC looks for an integer mode of the appropriate size\n\
+if available and uses BLKmode otherwise. Usually the search for the\n\
+integer mode is limited to @code{MAX_FIXED_MODE_SIZE}, but the\n\
+@code{TARGET_ARRAY_MODE_SUPPORTED_P} hook allows a larger mode to be\n\
+used in specific cases.\n\
\n\
The main use of this hook is to specify that an array of vectors should\n\
-also have a vector mode. The default implementation returns @code{BLKmode}.",
+also have a vector mode. The default implementation returns no mode.",
opt_machine_mode, (machine_mode mode, unsigned HOST_WIDE_INT nelems),
hook_optmode_mode_uhwi_none)
@@ -4318,15 +4319,6 @@ supported by the target.",
unsigned HOST_WIDE_INT, (void),
NULL)
-DEFHOOK
-(gather_scatter_supports_scale_p,
- "Return true if it is possible to plant a gather load or scatter store, with\n\
-@var{gather_p} choosing between them. @var{offset_bitsize} if the size in bits\n\
-of the offset type and @var{scale} is the amount by which the offset is\n\
-multiplied.",
- bool, (bool gather_p, unsigned int offset_bitsize, unsigned int scale),
- hook_bool_bool_uint_uint_false)
-
/* Functions relating to calls - argument passing, returns, etc. */
/* Members of struct call have no special macro prefix. */
HOOK_VECTOR (TARGET_CALLS, calls)
@@ -5539,6 +5531,19 @@ reload from using some alternatives, like @code{TARGET_PREFERRED_RELOAD_CLASS}."
default_preferred_output_reload_class)
DEFHOOK
+(select_early_remat_modes,
+ "On some targets, certain modes cannot be held in registers around a\n\
+standard ABI call and are relatively expensive to spill to the stack.\n\
+The early rematerialization pass can help in such cases by aggressively\n\
+recomputing values after calls, so that they don't need to be spilled.\n\
+\n\
+This hook returns the set of such modes by setting the associated bits\n\
+in @var{modes}. The default implementation selects no modes, which has\n\
+the effect of disabling the early rematerialization pass.",
+ void, (sbitmap modes),
+ default_select_early_remat_modes)
+
+DEFHOOK
(class_likely_spilled_p,
"A target hook which returns @code{true} if pseudos that have been assigned\n\
to registers of class @var{rclass} would likely be spilled because\n\
diff --git a/gcc/target.h b/gcc/target.h
index e7bdad33d34..2e19a5a7fae 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -218,17 +218,6 @@ typedef auto_vec<poly_uint64, 8> auto_vector_sizes;
extern struct gcc_target targetm;
-/* Return the mode that should be used to hold a scalar shift amount
- when shifting values of the given mode. */
-/* ??? This could in principle be generated automatically from the .md
- shift patterns, but for now word_mode should be universally OK. */
-
-inline scalar_int_mode
-get_shift_amount_mode (machine_mode)
-{
- return word_mode;
-}
-
/* Return an estimate of the runtime value of X, for use in things
like cost calculations or profiling frequencies. Note that this
function should never be used in situations where the actual
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 5d8ecd31b8c..49b85d3a446 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -82,6 +82,7 @@ along with GCC; see the file COPYING3. If not see
#include "params.h"
#include "real.h"
#include "langhooks.h"
+#include "sbitmap.h"
bool
default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
@@ -2313,4 +2314,11 @@ default_stack_clash_protection_final_dynamic_probe (rtx residual ATTRIBUTE_UNUSE
return 0;
}
+/* The default implementation of TARGET_EARLY_REMAT_MODES. */
+
+void
+default_select_early_remat_modes (sbitmap)
+{
+}
+
#include "gt-targhooks.h"
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index 917431f17ee..0661c5f5f2b 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -284,5 +284,6 @@ extern unsigned int default_min_arithmetic_precision (void);
extern enum flt_eval_method
default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED);
extern bool default_stack_clash_protection_final_dynamic_probe (rtx);
+extern void default_select_early_remat_modes (sbitmap);
#endif /* GCC_TARGHOOKS_H */
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 10331b39929..29ddf92dc86 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,1758 @@
+2017-11-16 Julia Koval <julia.koval@intel.com>
+
+ * gcc.target/i386/avx512f-gf2p8mulb-2.c: New runtime tests.
+ * gcc.target/i386/avx512vl-gf2p8mulb-2.c: Ditto.
+ * gcc.target/i386/gfni-1.c: Add tests for GF2P8MUL.
+ * gcc.target/i386/gfni-2.c: Ditto.
+ * gcc.target/i386/gfni-3.c: Ditto.
+ * gcc.target/i386/gfni-4.c: Ditto.
+
+2017-11-15 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/swaps-p8-26.c: Modify expected code
+ generation.
+
+2017-11-15 Martin Sebor <msebor@redhat.com>
+
+ PR testsuite/82988
+ * g++.dg/cpp0x/lambda/lambda-switch.C: Prune unimportant warning.
+
+2017-11-15 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/82990
+ * gcc.target/i386/pr82942-2.c: Add -mtune=knl.
+ * gcc.target/i386/pr82990-1.c: New test.
+ * gcc.target/i386/pr82990-2.c: Likewise.
+ * gcc.target/i386/pr82990-3.c: Likewise.
+ * gcc.target/i386/pr82990-4.c: Likewise.
+ * gcc.target/i386/pr82990-5.c: Likewise.
+ * gcc.target/i386/pr82990-6.c: Likewise.
+ * gcc.target/i386/pr82990-7.c: Likewise.
+
+2017-11-15 Will Schmidt <will_schmidt@vnet.ibm.com>
+
+ * gcc.target/powerpc/builtins-3-p9.c: Add -O1, update
+ expected codegen checks.
+ * gcc.target/powerpc/vec-cmp-sel.c: Mark vars as volatile.
+ * gcc.target/powerpc/vsu/vec-cmpne-0.c: Add -O1.
+ * gcc.target/powerpc/vsu/vec-cmpne-1.c: Add -O1.
+ * gcc.target/powerpc/vsu/vec-cmpne-2.c: Add -O1.
+ * gcc.target/powerpc/vsu/vec-cmpne-3.c: Add -O1.
+ * gcc.target/powerpc/vsu/vec-cmpne-4.c: Add -O1.
+ * gcc.target/powerpc/vsu/vec-cmpne-5.c: Add -O1.
+ * gcc.target/powerpc/vsu/vec-cmpne-6.c: Add -O1.
+
+2017-11-15 Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/78240
+ gfortran.dg/pr78240.f90: Prune run-on errors.
+
+2017-11-15 Bin Cheng <bin.cheng@arm.com>
+
+ PR tree-optimization/82726
+ * gcc.dg/tree-ssa/pr82726.c: New test.
+
+2017-11-15 Sudakshina Das <sudi.das@arm.com>
+
+ * g++.dg/ext/pr57735.C: Add -Wno-return-type for test.
+ * gcc.target/arm/pr54300.C (main): Add return type and
+ return a value.
+
+2017-11-15 Tom de Vries <tom@codesourcery.com>
+
+ * gcc.dg/strncpy-fix-1.c: Add -Wno-stringop-truncation to dg-options.
+
+2017-11-15 Dominique d'Humieres <dominiq@lps.ens.fr>
+
+ * gcc.target/i386/pr81706.c: Adjust asm for darwin.
+ * g++.dg/ext/pr81706.C: Likewise.
+
+2017-11-15 Nathan Sidwell <nathan@acm.org>
+
+ PR c++/81574
+ * g++.dg/cpp1y/pr81574.C: New.
+
+2017-11-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/82985
+ * g++.dg/torture/pr82985.C: Likewise.
+
+2017-11-15 Sebastian Peryt <sebastian.peryt@intel.com>
+
+ PR target/82941
+ PR target/82942
+ * gcc.target/i386/pr82941-1.c: New test.
+ * gcc.target/i386/pr82941-2.c: New test.
+ * gcc.target/i386/pr82942-1.c: New test.
+ * gcc.target/i386/pr82942-2.c: New test.
+
+2017-11-15 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * g++.dg/cpp0x/rv-trivial-bug.C (test2): Return a value.
+
+2017-11-15 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/82981
+ * gcc.target/mips/pr82981.c: New test.
+
+2017-11-15 Martin Liska <mliska@suse.cz>
+
+ * g++.dg/ubsan/vptr-12.C: New test.
+
+2017-11-15 Joseph Myers <joseph@codesourcery.com>
+
+ PR c/81156
+ * gcc.dg/builtin-tgmath-1.c, gcc.dg/builtin-tgmath-2.c,
+ gcc.dg/builtin-tgmath-err-1.c, gcc.dg/builtin-tgmath-err-2.c,
+ gcc.dg/dfp/builtin-tgmath-dfp-err.c,
+ gcc.dg/dfp/builtin-tgmath-dfp.c: New tests.
+
+2017-11-14 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/float128-hw4.c: New test.
+
+2017-11-14 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * lib/target-supports.exp (check_effective_target_pie): Adapt
+ comment for Solaris 12 renaming.
+
+ * gcc.dg/torture/pr60092.c: Remove *-*-solaris2.11* dg-xfail-run-if.
+
+2017-11-14 Carl Love <cel@us.ibm.com>
+
+ * builtins-revb-runnable.c (dg-do run): Add lp64 directive. Fix
+ indentation of printf and abort statements.
+ * p9-xxbr-1.c (dg-do compile): Add lp64 && p9vector_h directives.
+
+2017-11-14 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * gcc.target/aarch64/bsl-idiom.c: New.
+ * gcc.target/aarch64/copysign-bsl.c: New.
+
+2017-11-14 Tom de Vries <tom@codesourcery.com>
+
+ * c-c++-common/Wstringop-truncation.c: Require effective target alloca.
+
+2017-11-13 Jan Hubicka <hubicka@ucw.cz>
+
+ * gcc.dg/tree-ssa/fnsplit-2.c: New testcase.
+
+2017-11-13 Fritz Reese <fritzoreese@gmail.com>
+
+ PR fortran/78240
+ * gfortran.dg/dec_structure_23.f90: New.
+ * gfortran.dg/pr78240.f90: New.
+
+2017-11-13 Carl Love <cel@us.ibm.com>
+
+ * gcc.target/powerpc/builtin-vec-sums-be-int.c: New test file.
+
+2017-11-13 Tom Tromey <tom@tromey.com>
+
+ * c-c++-common/cpp/va-opt-pedantic.c: New file.
+ * c-c++-common/cpp/va-opt.c: New file.
+ * c-c++-common/cpp/va-opt-error.c: New file.
+
+2017-11-13 Carl Love <cel@us.ibm.com>
+
+ * gcc.target/powerpc/builtins-6-p9-runnable.c: Add new runnable test.
+ * gcc.target/powerpc/vsu/vec-cnttz-lsbb-2.c: Update expected error
+ message.
+
+2017-11-13 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/float128-minmax.c: New test.
+
+2017-11-13 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/arm/pr67989.C: Add -Wno-return-type to
+ dg-additional-options.
+
+2017-11-13 Nathan Sidwell <nathan@acm.org>
+
+ * lib/gcc-dg.exp (process-message): Use -: for no column.
+ * c-c++-common/cilk-plus/CK/cilk_for_grain_errors.c: Mark elided
+ column messages.
+ * c-c++-common/cpp/pr58844-1.c: Likewise.
+ * c-c++-common/cpp/pr58844-2.c: Likewise.
+ * c-c++-common/cpp/warning-zero-location.c: Likewise.
+ * g++.dg/diagnostic/pr77949.C: Likewise.
+ * g++.dg/gomp/macro-4.C: Likewise.
+ * gcc.dg/Wunknownprag.c: Likewise.
+ * gcc.dg/builtin-redefine.c: Likewise.
+ * gcc.dg/cpp/Wunknown-pragmas-1.c: Likewise.
+ * gcc.dg/cpp/Wunused.c: Likewise.
+ * gcc.dg/cpp/misspelled-directive-1.c: Likewise.
+ * gcc.dg/cpp/redef2.c: Likewise.
+ * gcc.dg/cpp/redef3.c: Likewise.
+ * gcc.dg/cpp/redef4.c: Likewise.
+ * gcc.dg/cpp/trad/Wunused.c: Likewise.
+ * gcc.dg/cpp/trad/argcount.c: Likewise.
+ * gcc.dg/cpp/trad/comment-3.c: Likewise.
+ * gcc.dg/cpp/trad/comment.c: Likewise.
+ * gcc.dg/cpp/trad/defined.c: Likewise.
+ * gcc.dg/cpp/trad/directive.c: Likewise.
+ * gcc.dg/cpp/trad/funlike-3.c: Likewise.
+ * gcc.dg/cpp/trad/funlike.c: Likewise.
+ * gcc.dg/cpp/trad/literals-2.c: Likewise.
+ * gcc.dg/cpp/trad/macro.c: Likewise.
+ * gcc.dg/cpp/trad/pr65238-4.c: Likewise.
+ * gcc.dg/cpp/trad/recurse-1.c: Likewise.
+ * gcc.dg/cpp/trad/recurse-2.c: Likewise.
+ * gcc.dg/cpp/trad/redef2.c: Likewise.
+ * gcc.dg/cpp/ucnid-11.c: Likewise.
+ * gcc.dg/cpp/unc1.c: Likewise.
+ * gcc.dg/cpp/unc2.c: Likewise.
+ * gcc.dg/cpp/unc3.c: Likewise.
+ * gcc.dg/cpp/unc4.c: Likewise.
+ * gcc.dg/cpp/undef2.c: Likewise.
+ * gcc.dg/cpp/warn-redefined-2.c: Likewise.
+ * gcc.dg/cpp/warn-redefined.c: Likewise.
+ * gcc.dg/cpp/warn-unused-macros-2.c: Likewise.
+ * gcc.dg/cpp/warn-unused-macros.c: Likewise.
+ * gcc.dg/empty-source-2.c: Likewise.
+ * gcc.dg/empty-source-3.c: Likewise.
+ * gcc.dg/gomp/macro-4.c: Likewise.
+ * gcc.dg/noncompile/pr35447-1.c: Likewise.
+ * gcc.dg/plugin/location-overflow-test-1.c: Likewise.
+ * gcc.dg/pr20245-1.c: Likewise.
+ * gcc.dg/pr28419.c: Likewise.
+ * gcc.dg/rtl/truncated-rtl-file.c: Likewise.
+ * gcc.dg/unclosed-init.c: Likewise.
+
+2017-11-13 Charles Baylis <charles.baylis@linaro.org>
+
+ * gfortran.dg/ieee/ieee_8.f90: xfail for aarch64*-*-gnu*
+
+2017-11-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/78821
+ * gcc.dg/store_merging_15.c: New test.
+
+ PR tree-optimization/82954
+ * gcc.c-torture/execute/pr82954.c: New test.
+
+2017-11-11 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/82932
+ * gfortran.dg/typebound_call_29.f90: New test.
+
+2017-11-10 Fritz Reese <fritzoreese@gmail.com>
+
+ PR fortran/82886
+ * gfortran.dg/init_flag_16.f03: New testcase.
+
+2017-11-10 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/p9-xxbr-3.c: New test.
+
+2017-11-10 Uros Bizjak <ubizjak@gmail.com>
+
+ * gcc.target/i386/force-indirect-call-1.c: Merge scan strings.
+ * gcc.target/i386/force-indirect-call-2.c: Ditto.
+ Require fpic effective target.
+ * gcc.target/i386/force-indirect-call-3.c: Ditto.
+ Require lp64 effective target.
+
+2017-11-10 Julia Koval <julia.koval@intel.com>
+
+ * gcc.target/i386/avx-1.c: Handle new intrinsics.
+ * gcc.target/i386/avx512f-gf2p8affineqb-2.c: New runtime tests.
+ * gcc.target/i386/avx512vl-gf2p8affineqb-2.c: Ditto.
+ * gcc.target/i386/gfni-1.c: Add tests for GF2P8AFFINE.
+ * gcc.target/i386/gfni-2.c: Ditto.
+ * gcc.target/i386/gfni-3.c: Ditto.
+ * gcc.target/i386/gfni-4.c: Ditto.
+ * gcc.target/i386/sse-13.c: Handle new tests.
+ * gcc.target/i386/sse-14.c: Handle new tests.
+ * gcc.target/i386/sse-23.c: Handle new tests.
+
+2017-11-10 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/bitfield-4.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-4.c: Remove code and include
+ above file.
+ * gcc.target/arm/cmse/mainline/bitfield-4.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-5.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-5.c: Remove code and include
+ above file.
+ * gcc.target/arm/cmse/mainline/bitfield-5.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-6.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-6.c: Remove code and include
+ above file.
+ * gcc.target/arm/cmse/mainline/bitfield-6.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-7.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-7.c: Remove code and include
+ above file.
+ * gcc.target/arm/cmse/mainline/bitfield-7.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-8.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-8.c: Remove code and include
+ above file.
+ * gcc.target/arm/cmse/mainline/bitfield-8.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-9.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-9.c: Remove code and include
+ above file.
+ * gcc.target/arm/cmse/mainline/bitfield-9.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-and-union.x: New file.
+ * gcc.target/arm/cmse/baseline/bitfield-and-union-1.c: Rename into ...
+ * gcc.target/arm/cmse/baseline/bitfield-and-union.c: This. Remove code
+ and include above bitfield-and-union.x file.
+ * gcc.target/arm/cmse/mainline/bitfield-and-union-1.c: Rename into ...
+ * gcc.target/arm/cmse/mainline/bitfield-and-union.c: this. Remove code
+ and include above bitfield-and-union.x file.
+ * gcc.target/arm/cmse/cmse-13.x: New file.
+ * gcc.target/arm/cmse/baseline/cmse-13.c: Remove code and include above
+ file.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/cmse-5.x: New file.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c: Remove code and
+ include above file.
+ * gcc.target/arm/cmse/mainline/hard/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/cmse-7.x: New file.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c: Remove code and
+ include above file.
+ * gcc.target/arm/cmse/mainline/hard/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/cmse-8.x: New file.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c: Remove code and
+ include above file.
+ * gcc.target/arm/cmse/mainline/hard/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/union-1.x: New file.
+ * gcc.target/arm/cmse/baseline/union-1.c: Remove code and include above
+ file.
+ * gcc.target/arm/cmse/mainline/union-1.c: Likewise.
+ * gcc.target/arm/cmse/union-2.x: New file.
+ * gcc.target/arm/cmse/baseline/union-2.c: Remove code and include above
+ file.
+ * gcc.target/arm/cmse/mainline/union-2.c: Likewise.
+
+2017-11-10 Martin Sebor <msebor@redhat.com>
+
+ PR c/81117
+ * c-c++-common/Wsizeof-pointer-memaccess3.c: New test.
+ * c-c++-common/Wstringop-overflow.c: Same.
+ * c-c++-common/Wstringop-truncation.c: Same.
+ * c-c++-common/Wsizeof-pointer-memaccess2.c: Adjust.
+ * c-c++-common/attr-nonstring-2.c: New test.
+ * gcc/testsuite/gcc.dg/builtin-stpncpy.c: Adjust.
+ * g++.dg/torture/Wsizeof-pointer-memaccess1.C: Same.
+ * g++.dg/torture/Wsizeof-pointer-memaccess2.C: Same.
+ * gcc.dg/torture/pr63554.c: Same.
+ * gcc.dg/Walloca-1.c: Disable macro tracking.
+
+2017-11-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/82929
+ * gcc.dg/pr82929.c: New test.
+ * g++.dg/opt/pr82929.C: New test.
+
+2017-11-10 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * lib/target-supports.exp (check_effective_target_arm_soft_ok):
+ New function.
+ * gcc.target/arm/copysign_softfloat_1.c: Require arm_soft_ok
+ effective target.
+
+2017-11-10 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/82934
+ * gfortran.dg/allocate_assumed_charlen_1.f90: New test.
+
+2017-11-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/82916
+ * gcc.dg/store_merging_2.c: Only expect 2 successful mergings instead
+ of 3.
+ * gcc.dg/pr82916.c: New test.
+
+2017-11-10 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * lib/scanasm.exp (scan-assembler): Extract filename from testname used
+ in summary.
+ (scan-assembler-not): Likewise.
+ (scan-hidden): Likewise.
+ (scan-not-hidden): Likewise.
+ (scan-stack-usage): Likewise.
+ (scan-stack-usage-not): Likewise.
+ (scan-assembler-times): Likewise.
+ (scan-assembler-dem): Likewise.
+ (scan-assembler-dem-not): Likewise.
+ (object-size): Likewise.
+ (scan-lto-assembler): Likewise.
+ * lib/scandump.exp (scan-dump): Likewise.
+ (scan-dump-times): Likewise.
+ (scan-dump-not): Likewise.
+ (scan-dump-dem): Likewise.
+ (scan-dump-dem-not): Likewise
+
+2017-11-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/opt69.adb: New test.
+
+2017-11-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/82913
+ * gcc.c-torture/compile/pr82913.c: New test.
+
+2017-11-09 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/78619
+ * gfortran.dg/pr78619.f90: New test.
+
+2017-11-09 Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/78814
+ * gfortran.dg/interface_40.f90: New testcase.
+
+2017-11-09 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/controlled2.adb, gnat.dg/controlled4.adb,
+ gnat.dg/finalized.adb: Disable all warnings.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_vect_masked_store):
+ New proc.
+ * gcc.dg/vect/vect-cselim-1.c (foo): Mention that the second loop
+ is vectorizable with masked stores. Update scan-tree-dump-times
+ accordingly.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp
+ (check_effective_target_vect_align_stack_vars): New proc.
+ * gcc.dg/vect/vect-23.c: Only expect the array to be aligned if
+ vect_align_stack_vars.
+ * gcc.dg/vect/vect-24.c: Likewise.
+ * gcc.dg/vect/vect-25.c: Likewise.
+ * gcc.dg/vect/vect-26.c: Likewise.
+ * gcc.dg/vect/vect-32-big-array.c: Likewise.
+ * gcc.dg/vect/vect-32.c: Likewise.
+ * gcc.dg/vect/vect-40.c: Likewise.
+ * gcc.dg/vect/vect-42.c: Likewise.
+ * gcc.dg/vect/vect-46.c: Likewise.
+ * gcc.dg/vect/vect-48.c: Likewise.
+ * gcc.dg/vect/vect-52.c: Likewise.
+ * gcc.dg/vect/vect-54.c: Likewise.
+ * gcc.dg/vect/vect-62.c: Likewise.
+ * gcc.dg/vect/vect-67.c: Likewise.
+ * gcc.dg/vect/vect-75-big-array.c: Likewise.
+ * gcc.dg/vect/vect-75.c: Likewise.
+ * gcc.dg/vect/vect-77-alignchecks.c: Likewise.
+ * gcc.dg/vect/vect-78-alignchecks.c: Likewise.
+ * gcc.dg/vect/vect-89-big-array.c: Likewise.
+ * gcc.dg/vect/vect-89.c: Likewise.
+ * gcc.dg/vect/vect-96.c: Likewise.
+ * gcc.dg/vect/vect-multitypes-3.c: Likewise.
+ * gcc.dg/vect/vect-multitypes-6.c: Likewise.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp
+ (check_effective_target_vect_variable_length): New proc.
+ * gcc.dg/vect/pr60482.c: XFAIL test for no epilog loop if
+ vect_variable_length.
+ * gcc.dg/vect/slp-reduc-6.c: XFAIL two-operation SLP if
+ vect_variable_length.
+ * gcc.dg/vect/vect-alias-check-5.c: XFAIL alias optimization if
+ vect_variable_length.
+ * gfortran.dg/vect/fast-math-mgrid-resid.f: XFAIL predictive
+ commoning optimization if vect_variable_length.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp
+ (check_effective_target_vect_unaligned_possible): New proc.
+ * gcc.dg/vect/slp-25.c: Extend XFAIL of peeling for alignment from
+ vect_no_align && { ! vect_hw_misalign } to ! vect_unaligned_possible.
+ * gcc.dg/vect/vect-multitypes-1.c: Likewise.
+ * gcc.dg/vect/vect-109.c: XFAIL vectorisation of an unaligned
+ access to ! vect_unaligned_possible.
+ * gcc.dg/vect/vect-33.c: Likewise.
+ * gcc.dg/vect/vect-42.c: Likewise.
+ * gcc.dg/vect/vect-56.c: Likewise.
+ * gcc.dg/vect/vect-60.c: Likewise.
+ * gcc.dg/vect/vect-96.c: Likewise.
+ * gcc.dg/vect/vect-peel-1.c: Likewise.
+ * gcc.dg/vect/vect-27.c: Extend XFAIL of unaligned vectorization from
+ vect_no_align && { ! vect_hw_misalign } to ! vect_unaligned_possible.
+ * gcc.dg/vect/vect-29.c: Likewise.
+ * gcc.dg/vect/vect-44.c: Likewise.
+ * gcc.dg/vect/vect-48.c: Likewise.
+ * gcc.dg/vect/vect-50.c: Likewise.
+ * gcc.dg/vect/vect-52.c: Likewise.
+ * gcc.dg/vect/vect-72.c: Likewise.
+ * gcc.dg/vect/vect-75-big-array.c: Likewise.
+ * gcc.dg/vect/vect-75.c: Likewise.
+ * gcc.dg/vect/vect-77-alignchecks.c: Likewise.
+ * gcc.dg/vect/vect-77-global.c: Likewise.
+ * gcc.dg/vect/vect-78-alignchecks.c: Likewise.
+ * gcc.dg/vect/vect-78-global.c: Likewise.
+ * gcc.dg/vect/vect-multitypes-3.c: Likewise.
+ * gcc.dg/vect/vect-multitypes-4.c: Likewise.
+ * gcc.dg/vect/vect-multitypes-6.c: Likewise.
+ * gcc.dg/vect/vect-peel-4.c: Likewise.
+ * gcc.dg/vect/vect-peel-3.c: Likewise, and also for peeling
+ for alignment.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp
+ (check_effective_target_vect_element_align_preferred): New proc.
+ (check_effective_target_vect_peeling_profitable): Test it.
+ * gcc.dg/vect/no-section-anchors-vect-31.c: Don't expect peeling
+ if vect_element_align_preferred.
+ * gcc.dg/vect/no-section-anchors-vect-64.c: Likewise.
+ * gcc.dg/vect/pr65310.c: Likewise.
+ * gcc.dg/vect/vect-26.c: Likewise.
+ * gcc.dg/vect/vect-54.c: Likewise.
+ * gcc.dg/vect/vect-56.c: Likewise.
+ * gcc.dg/vect/vect-58.c: Likewise.
+ * gcc.dg/vect/vect-60.c: Likewise.
+ * gcc.dg/vect/vect-89-big-array.c: Likewise.
+ * gcc.dg/vect/vect-89.c: Likewise.
+ * gcc.dg/vect/vect-92.c: Likewise.
+ * gcc.dg/vect/vect-peel-1.c: Likewise.
+ * gcc.dg/vect/vect-outer-3a-big-array.c: Expect the step to
+ divide the alignment if vect_element_align_preferred.
+ * gcc.dg/vect/vect-outer-3a.c: Likewise.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp (vect_perm_supported): New proc.
+ (check_effective_target_vect_perm3_int): Likewise.
+ (check_effective_target_vect_perm3_short): Likewise.
+ (check_effective_target_vect_perm3_byte): Likewise.
+ * gcc.dg/vect/slp-perm-1.c: Expect SLP load permutation to
+ succeed if vect_perm3_int.
+ * gcc.dg/vect/slp-perm-5.c: Likewise.
+ * gcc.dg/vect/slp-perm-6.c: Likewise.
+ * gcc.dg/vect/slp-perm-7.c: Likewise.
+ * gcc.dg/vect/slp-perm-8.c: Likewise vect_perm3_byte.
+ * gcc.dg/vect/slp-perm-9.c: Likewise vect_perm3_short.
+ Use vect_perm_short instead of vect_perm. Add a scan-tree-dump-not
+ test for vect_perm3_short targets.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc.dg/vect/no-vfa-vect-101.c: Use scan-tree-dump rather than
+ scan-tree-dump-times for vect_multiple_sizes.
+ * gcc.dg/vect/no-vfa-vect-102.c: Likewise.
+ * gcc.dg/vect/no-vfa-vect-102a.c: Likewise.
+ * gcc.dg/vect/no-vfa-vect-37.c: Likewise.
+ * gcc.dg/vect/no-vfa-vect-79.c: Likewise.
+ * gcc.dg/vect/vect-104.c: Likewise.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lib/target-supports.exp (available_vector_sizes): New proc.
+ (check_effective_target_vect_multiple_sizes): Use it.
+ (check_effective_target_vect64): Likewise.
+ (check_effective_target_vect_sizes_32B_16B): Likewise.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc.dg/vect/tree-vect.h (VECTOR_BITS): Define.
+ * gcc.dg/vect/bb-slp-pr69907.c: Include tree-vect.h.
+ (N): New macro.
+ (foo): Use it instead of hard-coded 320.
+ * gcc.dg/vect/no-scevccp-outer-7.c (N): Redefine if the default
+ value is too small for VECTOR_BITS.
+ * gcc.dg/vect/no-scevccp-vect-iv-3.c (N): Likewise.
+ * gcc.dg/vect/no-section-anchors-vect-31.c (N): Likewise.
+ * gcc.dg/vect/no-section-anchors-vect-36.c (N): Likewise.
+ * gcc.dg/vect/slp-perm-9.c (N): Likewise.
+ * gcc.dg/vect/vect-32.c (N): Likewise.
+ * gcc.dg/vect/vect-75.c (N, OFF): Likewise.
+ * gcc.dg/vect/vect-77-alignchecks.c (N, OFF): Likewise.
+ * gcc.dg/vect/vect-78-alignchecks.c (N, OFF): Likewise.
+ * gcc.dg/vect/vect-89.c (N): Likewise.
+ * gcc.dg/vect/vect-96.c (N): Likewise.
+ * gcc.dg/vect/vect-multitypes-3.c (N): Likewise.
+ * gcc.dg/vect/vect-multitypes-6.c (N): Likewise.
+ * gcc.dg/vect/vect-over-widen-1.c (N): Likewise.
+ * gcc.dg/vect/vect-over-widen-4.c (N): Likewise.
+ * gcc.dg/vect/vect-reduc-pattern-1a.c (N): Likewise.
+ * gcc.dg/vect/vect-reduc-pattern-1b.c (N): Likewise.
+ * gcc.dg/vect/vect-reduc-pattern-2a.c (N): Likewise.
+ * gcc.dg/vect/no-section-anchors-vect-64.c (NINTS): New macro.
+ (N): Redefine in terms of NINTS.
+ (ia, ib, ic): Use NINTS instead of hard-coded constants in the
+ array bounds.
+ * gcc.dg/vect/no-section-anchors-vect-69.c (NINTS): New macro.
+ (N): Redefine in terms of NINTS.
+ (test1): Replace a and b fields with NINTS - 2 ints of padding.
+ (main1): Use NINTS instead of hard-coded constants.
+ * gcc.dg/vect/section-anchors-vect-69.c (NINTS): New macro.
+ (N): Redefine in terms of NINTS.
+ (test1): Replace a and b fields with NINTS - 2 ints of padding.
+ (test2): Remove incorrect comments about alignment.
+ (main1): Use NINTS instead of hard-coded constants.
+ * gcc.dg/vect/pr45752.c (N): Redefine if the default value is
+ too small for VECTOR_BITS.
+ (main): Continue to use canned results for the default value of N,
+ but compute the expected results from scratch for other values.
+ * gcc.dg/vect/slp-perm-1.c (N, main): As for pr45752.c.
+ * gcc.dg/vect/slp-perm-4.c (N, main): Likewise.
+ * gcc.dg/vect/slp-perm-5.c (N, main): Likewise.
+ * gcc.dg/vect/slp-perm-6.c (N, main): Likewise.
+ * gcc.dg/vect/slp-perm-7.c (N, main): Likewise.
+ * gcc.dg/vect/pr65518.c (NINTS, N, RESULT): New macros.
+ (giga): Use NINTS as the array bound.
+ (main): Use NINTS, N and RESULT.
+ * gcc.dg/vect/pr65947-5.c (N): Redefine if the default value is
+ too small for VECTOR_BITS.
+ (main): Fill in any remaining elements of A programmatically.
+ * gcc.dg/vect/pr81136.c: Include tree-vect.h.
+ (a): Use VECTOR_BITS to set the alignment of the target structure.
+ * gcc.dg/vect/slp-19c.c (N): Redefine if the default value is
+ too small for VECTOR_BITS.
+ (main1): Continue to use the canned input for the default value of N,
+ but compute the input from scratch for other values.
+ * gcc.dg/vect/slp-28.c (N): Redefine if the default value is
+ too small for VECTOR_BITS.
+ (in1, in2, in3): Remove initialization.
+ (check1, check2): Delete.
+ (main1): Initialize in1, in2 and in3 here. Check every element
+ of the vectors and compute the expected values directly instead
+ of using an array.
+ * gcc.dg/vect/slp-perm-8.c (N): Redefine if the default value is
+ too small for VECTOR_BITS.
+ (foo, main): Change type of "i" to int.
+ * gcc.dg/vect/vect-103.c (NINTS): New macro.
+ (N): Redefine in terms of N.
+ (c): Delete.
+ (main1): Use NINTS. Check the result from a and b directly.
+ * gcc.dg/vect/vect-67.c (NINTS): New macro.
+ (N): Redefine in terms of N.
+ (main1): Use NINTS for the inner array bounds.
+ * gcc.dg/vect/vect-70.c (NINTS, OUTERN): New macros.
+ (N): Redefine in terms of NINTS.
+ (s): Keep the outer dimensions as 4 even if N is larger than 24.
+ (tmp1): New variable.
+ (main1): Only define a local tmp1 if NINTS is relatively small.
+ Use OUTERN for the outer loops and NINTS for the inner loops.
+ * gcc.dg/vect/vect-91.c (OFF): New macro.
+ (a, main3): Use it.
+ * gcc.dg/vect/vect-92.c (NITER): New macro.
+ (main1, main2): Use it.
+ * gcc.dg/vect/vect-93.c (N): Rename to...
+ (N1): ...this.
+ (main): Update accordingly.
+ (N2): New macro.
+ (main1): Use N1 instead of 3001 and N2 insteaed of 10.
+ * gcc.dg/vect/vect-multitypes-1.c (NSHORTS, NINTS): New macros.
+ (N): Redefine in terms of NSHORTS.
+ (main1): Use NINTS - 1 instead of 3 and NSHORTS - 1 instead of 7.
+ (main): Likewise.
+ * gcc.dg/vect/vect-over-widen-3-big-array.c (N): Define to VECTOR_BITS.
+ (foo): Truncate the expected value to the type of *d.
+ * gcc.dg/vect/vect-peel-3.c (NINTS, EXTRA): New macros.
+ (ia, ib, ic, main): Use EXTRA.
+ (main): Use NINTS.
+ (RES_A, RES_B, REC_C): New macros.
+ (RES): Redefine as their sum.
+ * gcc.dg/vect/vect-reduc-or_1.c (N): New macro.
+ (in): Change number of elements to N.
+ (main): Update accordingly. Calculate the expected result.
+ * gcc.dg/vect/vect-reduc-or_2.c (N, in, main): As for
+ vect-reduc-or-1.c.
+
+2017-11-09 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc.dg/vect/bb-slp-cond-1.c (main): Add an asm volatile
+ to the set-up loop.
+ * gcc.dg/vect/slp-perm-7.c (main): Prevent vectorisation with
+ asm volatile ("" ::: "memory") instead of a conditional abort.
+ Update the expected vector loop count accordingly.
+ * gcc.dg/vect/slp-perm-9.c (main): Likewise.
+ * gcc.dg/vect/bb-slp-1.c (main1): Prevent vectorisation with
+ asm volatile ("" ::: "memory") instead of a conditional abort.
+ * gcc.dg/vect/slp-23.c (main): Likewise,
+ * gcc.dg/vect/slp-35.c (main): Likewise,
+ * gcc.dg/vect/slp-37.c (main): Likewise,
+ * gcc.dg/vect/slp-perm-4.c (main): Likewise.
+ * gcc.dg/vect/bb-slp-24.c (foo): Likewise. Remove dummy argument.
+ (main): Update call accordingly.
+ * gcc.dg/vect/bb-slp-25.c (foo, main): As for bb-slp-24.c.
+ * gcc.dg/vect/bb-slp-26.c (foo, main): Likewise.
+ * gcc.dg/vect/bb-slp-29.c (foo, main): Likewise.
+ * gcc.dg/vect/no-vfa-vect-102.c (foo): Delete.
+ (main): Don't initialize it.
+ (main1): Prevent vectorisation with asm volatile ("" ::: "memory")
+ instead of a conditional abort.
+ * gcc.dg/vect/no-vfa-vect-102a.c (foo, main1, main): As for
+ no-vfa-vect-102.c
+ * gcc.dg/vect/vect-103.c (foo, main1, main): Likewise.
+ * gcc.dg/vect/vect-104.c (foo, main1, main): Likewise.
+ * gcc.dg/vect/pr42709.c (main1): Remove dummy argument.
+ Prevent vectorisation with asm volatile ("" ::: "memory")
+ instead of a conditional abort.
+ * gcc.dg/vect/slp-13-big-array.c (y): Delete.
+ (main1): Prevent vectorisation with asm volatile ("" ::: "memory")
+ instead of a conditional abort.
+ * gcc.dg/vect/slp-3-big-array.c (y, main1): As for slp-13-big-array.c.
+ * gcc.dg/vect/slp-34-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/slp-4-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/slp-multitypes-11-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-105.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-105-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-112-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-15-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-2-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-34-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-6-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-73-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-74-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-75-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-76-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-80-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-97-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-all-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-reduc-1char-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-reduc-2char-big-array.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-mult.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-u16-i2.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-u16-i4.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-u16-mult.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-u8-i2-gap.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-u8-i8-gap2-big-array.c (y, main1):
+ Likewise.
+ * gcc.dg/vect/vect-strided-a-u8-i8-gap2.c (y, main1): Likewise.
+ * gcc.dg/vect/vect-strided-a-u8-i8-gap7-big-array.c (y, main1):
+ Likewise.
+ * gcc.dg/vect/vect-strided-a-u8-i8-gap7.c (y, main1): Likewise.
+ * gcc.dg/vect/slp-24.c (y): Delete.
+ (main): Prevent vectorisation with asm volatile ("" ::: "memory")
+ instead of a conditional abort.
+ * gcc.dg/vect/slp-24-big-array.c (y, main): As for slp-24.c.
+ * gcc.dg/vect/vect-98-big-array.c (y, main): Likewise.
+ * gcc.dg/vect/vect-bswap16.c (y, main): Likewise.
+ * gcc.dg/vect/vect-bswap32.c (y, main): Likewise.
+ * gcc.dg/vect/vect-bswap64.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-mult-char-ls.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-mult.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-same-dr.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u16-i2.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u16-i4.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u32-i4.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u32-i8.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i2-gap.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i2.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap2-big-array.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap2.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap4-big-array.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap4-unknown.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap4.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap7-big-array.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8-gap7.c (y, main): Likewise.
+ * gcc.dg/vect/vect-strided-u8-i8.c (y, main): Likewise.
+ * gcc.dg/vect/vect-10-big-array.c (y): Delete.
+ (foo): Prevent vectorisation with asm volatile ("" ::: "memory")
+ instead of a conditional abort.
+ * gcc.dg/vect/vect-double-reduc-6-big-array.c (y, foo): As for
+ vect-10-big-array.c.
+ * gcc.dg/vect/vect-reduc-pattern-1b-big-array.c (y, foo): Likewise.
+ * gcc.dg/vect/vect-reduc-pattern-1c-big-array.c (y, foo): Likewise.
+ * gcc.dg/vect/vect-reduc-pattern-2b-big-array.c (y, foo): Likewise.
+ * gcc.dg/vect/vect-117.c (foo): Delete.
+ (main): Don't initalize it.
+
+2017-11-09 Jan Hubicka <hubicka@ucw.cz>
+
+ * gcc.c-torture/compile/pr82879.c: New testcase.
+
+2017-11-09 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/82902
+ * g++.dg/torture/pr82902.C: New testcase.
+
+2017-11-09 Martin Liska <mliska@suse.cz>
+
+ PR target/82863
+ * gcc.dg/pr82863.c: New test.
+
+2017-11-09 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * gnat.dg/unreferenced.adb: New testcase.
+
+2017-11-09 Ed Schonberg <schonberg@adacore.com>
+
+ * gnat.dg/out_param.adb: New testcase.
+
+2017-11-09 Hristian Kirtchev <kirtchev@adacore.com>
+
+ * gnat.dg/elab3.adb, gnat.dg/elab3.ads, gnat.dg/elab3_pkg.adb,
+ gnat.dg/elab3_pkg.ads: New testcase.
+
+2017-11-09 Pierre-Marie de Rodat <derodat@adacore.com>
+
+ * gnat.dg/controlled2.adb, gnat.dg/controlled4.adb,
+ gnat.dg/finalized.adb: Disable new warning.
+
+2017-11-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR debug/82837
+ * gcc.dg/debug/dwarf2/pr82837.c: New test.
+
+2017-11-08 Andi Kleen <ak@linux.intel.com>
+
+ * gcc.target/i386/force-indirect-call-1.c: New test.
+ * gcc.target/i386/force-indirect-call-2.c: New test.
+ * gcc.target/i386/force-indirect-call-3.c: New test.
+
+2017-11-08 Steven G. Kargl <kargl@kgcc.gnu.org>
+
+ PR Fortran/82841
+ * gfortran.dg/transfer_simplify_11.f90: New test.
+
+2017-11-08 Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/82884
+ * gfortran.dg/hollerith_character_array_constructor.f90: New test.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * gcc.target/aarch64/store_v2vec_lanes.c: New test.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * gcc.target/aarch64/load_v2vec_lanes_1.c: New test.
+
+2017-11-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * gcc.target/aarch64/construct_lane_zero_1.c: New test.
+
+2017-11-08 Ed Schonberg <schonberg@adacore.com>
+
+ * gnat.dg/delta_aggr.adb: New testcase.
+
+2017-11-08 Jakub Jelinek <jakub@redhat.com>
+
+ * g++.dg/pr57878.C (__sso_string_base::_M_get_allocator): Return
+ a value.
+
+ PR tree-optimization/78821
+ * gcc.dg/store_merging_2.c: Expect 3 store mergings instead of 2.
+ * gcc.dg/store_merging_13.c (f7, f8, f9, f10, f11, f12, f13): New
+ functions.
+ (main): Test also those. Expect 13 store mergings instead of 6.
+ * gcc.dg/store_merging_14.c (f7, f8, f9): New functions.
+ (main): Test also those. Expect 9 store mergings instead of 6.
+
+2017-11-08 Wilco Dijkstra <wdijkstr@arm.com>
+
+ * gcc.target/aarch64/dwarf-cfa-reg.c: Update.
+
+2017-11-08 Javier Miranda <miranda@adacore.com>
+
+ * gnat.dg/overriding_ops2.adb, gnat.dg/overriding_ops2.ads,
+ gnat.dg/overriding_ops2_pkg.ads, gnat.dg/overriding_ops2_pkg-high.ads:
+ New testcase.
+
+2017-11-08 Andreas Schwab <schwab@suse.de>
+
+ * c-c++-common/torture/aarch64-vect-lane-2.c (search_line_fast):
+ Change type to void.
+
+2017-11-08 Janne Blomqvist <jb@gcc.gnu.org>
+
+ PR 82869
+ * gfortran.dg/logical_temp_io.f90: New test.
+ * gfortran.dg/logical_temp_io_kind8.f90: New test.
+
+2017-11-08 Martin Liska <mliska@suse.cz>
+
+ * gcc.dg/tree-ssa/vrp101.c: Update expected pattern as
+ frequencies are not longer printed in dump output.
+
+2017-11-08 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.dg/strlenopt-33g.c: Remove duplicate dg-do command.
+
+2017-11-08 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * g++.old-deja/g++.brendan/asm-extn1.C: Accept all sparc* targets.
+ (main): Add return type.
+
+2017-11-08 Martin Liska <mliska@suse.cz>
+
+ PR sanitizer/82792
+ * g++.dg/asan/pr82792.C: New test.
+
+2017-11-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/82855
+ * gcc.target/i386/avx512dq-pr82855.c: New test.
+
+2017-11-07 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/80425
+ * gcc.target/i386/pr80425-3.c: New test.
+
+2017-11-07 Andreas Schwab <schwab@suse.de>
+
+ * g++.dg/pr50763-3.C (evalPoint): Return a value.
+
+2017-11-07 Wilco Dijkstra <wdijkstr@arm.com>
+ Jackson Woodruff <jackson.woodruff@arm.com>
+
+ PR tree-optimization/71026
+ * gcc.dg/div_neg: New test.
+
+2017-11-07 Sudakshina Das <sudi.das@arm.com>
+
+ PR middle-end/80131
+ * gcc.dg/pr80131-1.c: New Test.
+
+2017-11-07 Marc Glisse <marc.glisse@inria.fr>
+
+ * gcc.dg/tree-ssa/bitops-1.c: New file.
+
+2017-11-07 Marc Glisse <marc.glisse@inria.fr>
+
+ * gcc.dg/tree-ssa/negminus.c: New test.
+
+2017-11-06 Jeff Law <law@redhat.com>
+
+ * gcc.target/i386/stack-check-12.c: Revert to initial version. Then..
+ Add -fomit-frame-pointer.
+
+2017-11-06 Carl Love <cel@us.ibm.com>
+
+ * gcc.target/powerpc/builtins-revb-runnable.c: New runnable test file.
+
+2017-11-06 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ PR target/82748
+ * gcc.target/powerpc/pr82748-1.c: New test.
+ * gcc.target/powerpc/pr82748-2.c: Likewise.
+
+2017-11-06 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/65579
+ * g++.dg/cpp0x/constexpr-template11.C: New.
+
+2017-11-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/82838
+ * gcc.c-torture/compile/pr82838.c: New test.
+
+2017-11-06 Jeff Law <law@redhat.com>
+
+ PR target/82788
+ * gcc.dg/pr82788.c: New test.
+
+2017-11-06 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc.c-torture/compile/pr82816.c: New test.
+
+2017-11-06 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/sad-vectorize-1.c: New file.
+ * gcc.target/powerpc/sad-vectorize-2.c: New file.
+ * gcc.target/powerpc/sad-vectorize-3.c: New file.
+ * gcc.target/powerpc/sad-vectorize-4.c: New file.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ * c-c++-common/cilk-plus/AN/pr57541-2.c (foo1): Return a value
+ for functions with non-void return type, or change type to void,
+ or add -Wno-return-type for test.
+ (foo2): Likewise.
+ * c-c++-common/cilk-plus/AN/pr57541.c (foo): Likewise.
+ (foo1): Likewise.
+ * c-c++-common/cilk-plus/CK/errors.c: Likewise.
+ * c-c++-common/cilk-plus/CK/pr60197.c: Likewise.
+ * c-c++-common/cilk-plus/CK/spawn_in_return.c: Likewise.
+ * c-c++-common/fold-masked-cmp-1.c (test_pic): Likewise.
+ (test_exe): Likewise.
+ * c-c++-common/fold-masked-cmp-2.c (test_exe): Likewise.
+ * g++.dg/cilk-plus/AN/builtin_fn_mutating_tplt.cc (my_func): Likewise.
+ * g++.dg/cilk-plus/CK/pr68997.cc (fa2): Likewise.
+ * g++.dg/eh/sighandle.C (dosegv): Likewise.
+ * g++.dg/ext/vector14.C (foo): Likewise.
+ (main): Likewise.
+ * g++.dg/graphite/pr41305.C: Likewise.
+ * g++.dg/graphite/pr42930.C: Likewise.
+ * g++.dg/opt/pr46640.C (struct QBasicAtomicInt): Likewise.
+ (makeDir): Likewise.
+ * g++.dg/other/i386-8.C (foo): Likewise.
+ * g++.dg/pr45788.C: Likewise.
+ * g++.dg/pr64688.C (at_c): Likewise.
+ * g++.dg/pr65032.C (G::DecodeVorbis): Likewise.
+ * g++.dg/pr71633.C (c3::fn2): Likewise.
+ * g++.dg/stackprotectexplicit2.C (A): Likewise.
+ * g++.old-deja/g++.law/weak.C (main): Likewise.
+
+2017-11-06 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/gcov: New directory.
+ * gnat.dg/gcov/gcov.exp: New driver.
+ * gnat.dg/gcov/check.adb: New test.
+
+2017-11-06 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.c-torture/execute/pr23135.c: Move dg-add-options after
+ dg-options.
+ * gcc.dg/torture/pr78305.c: Move dg-do as first directive.
+ * gcc.misc-tests/gcov-3.c: Likewise.
+ * gcc.target/arm/cmse/baseline/cmse-11.c: Move dg-options before dg-add-options.
+ * gcc.target/arm/cmse/baseline/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/baseline/cmse-2.c: Likewise.
+ * gcc.target/arm/cmse/baseline/cmse-6.c: Likewise.
+ * gcc.target/arm/cmse/baseline/softfp.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/soft/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-8.c: Likewise.
+ * gcc.target/arm/lp1189445.c: Likewise.
+
+2017-11-06 Mukesh Kapoor <mukesh.kapoor@oracle.com>
+
+ PR c++/80955
+ * g++.dg/cpp0x/udlit-macros.C: New.
+
+2017-11-06 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/69739
+ * gfortran.dg/pr69739.f90: New test.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ * c-c++-common/Wimplicit-fallthrough-8.c: Return a value for
+ functions with non-void return type, or change type to void, or
+ add -Wno-return-type for test.
+ * c-c++-common/asan/pr63638.c (f): Likewise.
+ * c-c++-common/goacc/parallel-1.c (firstprivate): Likewise.
+ * c-c++-common/gomp/sink-1.c (depend): Likewise.
+ * c-c++-common/missing-symbol.c: Likewise.
+ * c-c++-common/pr36513-2.c (main2): Likewise.
+ * c-c++-common/pr36513.c (main1): Likewise.
+ * c-c++-common/pr49706-2.c: Likewise.
+ * c-c++-common/pr65120.c: Likewise.
+ * c-c++-common/tm/volatile-1.c (f): Likewise.
+ * c-c++-common/vector-1.c (f): Likewise.
+ * c-c++-common/vector-2.c (f): Likewise.
+ * g++.dg/abi/abi-tag14.C (f): Likewise.
+ (g): Likewise.
+ * g++.dg/abi/abi-tag18.C (f): Likewise.
+ * g++.dg/abi/abi-tag18a.C (f): Likewise.
+ * g++.dg/abi/covariant2.C (struct c3): Likewise.
+ (struct c7): Likewise.
+ * g++.dg/abi/covariant3.C (c1::f6): Likewise.
+ * g++.dg/abi/mangle7.C (f1): Likewise.
+ * g++.dg/asan/pr81340.C (class e): Likewise.
+ (e::f): Likewise.
+ * g++.dg/concepts/fn8.C (struct S): Likewise.
+ * g++.dg/concepts/pr65575.C (f): Likewise.
+ * g++.dg/concepts/template-parm11.C (f): Likewise.
+ * g++.dg/conversion/op6.C: Likewise.
+ * g++.dg/cpp0x/Wunused-variable-1.C (foo): Likewise.
+ * g++.dg/cpp0x/access01.C: Likewise.
+ * g++.dg/cpp0x/alignas3.C (class alignas): Likewise.
+ * g++.dg/cpp0x/auto2.C (f): Likewise.
+ (struct A): Likewise.
+ (main): Likewise.
+ * g++.dg/cpp0x/constexpr-array17.C (struct D): Likewise.
+ * g++.dg/cpp0x/constexpr-defarg2.C (a): Likewise.
+ (B::foo): Likewise.
+ (B::bar): Likewise.
+ * g++.dg/cpp0x/constexpr-memfn1.C (struct Y): Likewise.
+ * g++.dg/cpp0x/dc1.C (struct D): Likewise.
+ * g++.dg/cpp0x/dc3.C (struct D): Likewise.
+ * g++.dg/cpp0x/decltype12.C: Likewise.
+ * g++.dg/cpp0x/decltype17.C (main): Likewise.
+ * g++.dg/cpp0x/decltype3.C: Likewise.
+ * g++.dg/cpp0x/decltype41.C (struct C): Likewise.
+ (struct D): Likewise.
+ * g++.dg/cpp0x/defaulted28.C (f): Likewise.
+ * g++.dg/cpp0x/enum_base3.C (struct D): Likewise.
+ * g++.dg/cpp0x/gen-attrs-4.C (five): Likewise.
+ * g++.dg/cpp0x/initlist96.C: Likewise.
+ * g++.dg/cpp0x/lambda/lambda-58566.C (struct A): Likewise.
+ * g++.dg/cpp0x/lambda/lambda-conv10.C: Likewise.
+ * g++.dg/cpp0x/lambda/lambda-conv12.C: Likewise.
+ * g++.dg/cpp0x/lambda/lambda-defarg3.C: Likewise.
+ * g++.dg/cpp0x/lambda/lambda-ice3.C (Klass::dostuff): Likewise.
+ * g++.dg/cpp0x/lambda/lambda-ice5.C (foo): Likewise.
+ * g++.dg/cpp0x/lambda/lambda-nested2.C (f1): Likewise.
+ * g++.dg/cpp0x/lambda/lambda-template12.C (class X): Likewise.
+ * g++.dg/cpp0x/lambda/lambda-template2.C (struct T): Likewise.
+ * g++.dg/cpp0x/lambda/lambda-this12.C (struct A): Likewise.
+ * g++.dg/cpp0x/nolinkage1.C (main): Likewise.
+ * g++.dg/cpp0x/nolinkage1a.cc (dummy): Likewise.
+ * g++.dg/cpp0x/nsdmi-template5.C: Likewise.
+ * g++.dg/cpp0x/parse1.C (B::B): Likewise.
+ * g++.dg/cpp0x/pr34054.C (foo): Likewise.
+ * g++.dg/cpp0x/pr47416.C: Likewise.
+ * g++.dg/cpp0x/pr58781.C: Likewise.
+ * g++.dg/cpp0x/pr70538.C: Likewise.
+ * g++.dg/cpp0x/pr81325.C: Likewise.
+ * g++.dg/cpp0x/range-for13.C (begin): Likewise.
+ (end): Likewise.
+ * g++.dg/cpp0x/range-for14.C (begin): Likewise.
+ (end): Likewise.
+ * g++.dg/cpp0x/rv2n.C (test2_18): Likewise.
+ (test2_28): Likewise.
+ (test2_38): Likewise.
+ (test2_58): Likewise.
+ (test2_68): Likewise.
+ (test2_78): Likewise.
+ * g++.dg/cpp0x/rv3n.C (test3_128): Likewise.
+ * g++.dg/cpp0x/static_assert10.C (foo): Likewise.
+ * g++.dg/cpp0x/static_assert11.C (struct A): Likewise.
+ * g++.dg/cpp0x/static_assert12.C: Likewise.
+ * g++.dg/cpp0x/static_assert13.C: Likewise.
+ * g++.dg/cpp0x/trailing1.C (struct A): Likewise.
+ * g++.dg/cpp0x/trailing5.C (foo): Likewise.
+ (bar): Likewise.
+ * g++.dg/cpp0x/variadic114.C: Likewise.
+ * g++.dg/cpp0x/variadic57.C (Dims...>::foo): Likewise.
+ (bar): Likewise.
+ * g++.dg/cpp0x/variadic65.C: Likewise.
+ * g++.dg/cpp0x/variadic66.C (bind): Likewise.
+ * g++.dg/cpp0x/variadic97.C: Likewise.
+ * g++.dg/cpp0x/variadic98.C (__attribute__): Likewise.
+ * g++.dg/cpp1y/auto-fn11.C: Likewise.
+ * g++.dg/cpp1y/auto-fn29.C: Likewise.
+ * g++.dg/cpp1y/auto-fn38.C: Likewise.
+ * g++.dg/cpp1y/constexpr-return2.C: Likewise.
+ * g++.dg/cpp1y/lambda-init7.C (foo): Likewise.
+ * g++.dg/cpp1y/pr63996.C: Likewise.
+ * g++.dg/cpp1y/pr65202.C: Likewise.
+ * g++.dg/cpp1y/pr66443-cxx14.C (Ok): Likewise.
+ * g++.dg/cpp1y/pr79253.C (struct D): Likewise.
+ * g++.dg/cpp1y/static_assert1.C: Likewise.
+ * g++.dg/cpp1y/static_assert2.C: Likewise.
+ * g++.dg/cpp1y/var-templ44.C: Likewise.
+ * g++.dg/cpp1z/fold6.C (f): Likewise.
+ * g++.dg/cpp1z/inline-var2.C (foo): Likewise.
+ * g++.dg/cpp1z/lambda-this1.C (struct B): Likewise.
+ * g++.dg/cpp1z/static_assert-nomsg.C: Likewise.
+ * g++.dg/debug/dwarf-eh-personality-1.C (foobar): Likewise.
+ * g++.dg/debug/dwarf2/dwarf4-typedef.C (struct B): Likewise.
+ * g++.dg/debug/dwarf2/icf.C: Likewise.
+ * g++.dg/debug/dwarf2/pr61433.C (main): Likewise.
+ * g++.dg/debug/nullptr01.C (g): Likewise.
+ * g++.dg/debug/pr16792.C (foo): Likewise.
+ * g++.dg/debug/pr46241.C (class btCollisionWorld): Likewise.
+ * g++.dg/debug/pr46338.C (struct S): Likewise.
+ * g++.dg/debug/pr47106.C (baz): Likewise.
+ (bar): Likewise.
+ (foo): Likewise.
+ * g++.dg/debug/pr71057.C (fn1): Likewise.
+ * g++.dg/debug/pr71432.C (class CLIParameterType): Likewise.
+ (CLIParameterType::checkSwitched): Likewise.
+ * g++.dg/debug/pr80461.C (struct B): Likewise.
+ * g++.dg/dfp/44473-1.C (bar): Likewise.
+ * g++.dg/dfp/44473-2.C (bar): Likewise.
+ (foo): Likewise.
+ * g++.dg/eh/builtin1.C: Likewise.
+ * g++.dg/eh/builtin2.C: Likewise.
+ * g++.dg/eh/builtin3.C: Likewise.
+ * g++.dg/eh/pr45569.C (j): Likewise.
+ * g++.dg/eh/unwind2.C: Likewise.
+ * g++.dg/expr/bitfield11.C: Likewise.
+ * g++.dg/expr/static_cast7.C (f): Likewise.
+ * g++.dg/ext/altivec-14.C: Likewise.
+ * g++.dg/ext/asm13.C (fn1): Likewise.
+ * g++.dg/ext/builtin-object-size3.C: Likewise.
+ * g++.dg/ext/has_nothrow_assign_odr.C (main): Likewise.
+ (S::operator=): Likewise.
+ * g++.dg/ext/label7.C (f): Likewise.
+ * g++.dg/ext/label8.C (f): Likewise.
+ * g++.dg/ext/tmplattr7.C (test): Likewise.
+ * g++.dg/ext/vector8.C (f): Likewise.
+ * g++.dg/ext/visibility/anon1.C: Likewise.
+ * g++.dg/ext/visibility/anon2.C (f): Likewise.
+ * g++.dg/ext/visibility/namespace1.C (__attribute): Likewise.
+ * g++.dg/ext/vla16.C (fn1): Likewise.
+ * g++.dg/goacc/reference.C: Likewise.
+ * g++.dg/gomp/pr37189.C: Likewise.
+ * g++.dg/gomp/pr39495-1.C: Likewise.
+ * g++.dg/gomp/pr39495-2.C: Likewise.
+ * g++.dg/gomp/pr82054.C: Likewise.
+ * g++.dg/inherit/covariant10.C (struct c6): Likewise.
+ (struct c17): Likewise.
+ * g++.dg/inherit/covariant11.C (struct c1): Likewise.
+ (struct c3): Likewise.
+ (struct c11): Likewise.
+ (struct c15): Likewise.
+ * g++.dg/inherit/protected1.C (A::operator==): Likewise.
+ * g++.dg/init/inline1.C (struct A): Likewise.
+ * g++.dg/init/new18.C: Likewise.
+ * g++.dg/init/reference2.C (f): Likewise.
+ * g++.dg/init/reference3.C: Likewise.
+ * g++.dg/init/switch1.C (f): Likewise.
+ * g++.dg/ipa/devirt-10.C (struct wxDCBase): Likewise.
+ * g++.dg/ipa/devirt-13.C (main): Likewise.
+ * g++.dg/ipa/devirt-14.C (main): Likewise.
+ * g++.dg/ipa/devirt-15.C (main): Likewise.
+ * g++.dg/ipa/devirt-16.C (main): Likewise.
+ * g++.dg/ipa/devirt-17.C (main): Likewise.
+ * g++.dg/ipa/devirt-18.C (main): Likewise.
+ * g++.dg/ipa/devirt-19.C: Likewise.
+ * g++.dg/ipa/devirt-21.C (main): Likewise.
+ * g++.dg/ipa/devirt-23.C (main): Likewise.
+ * g++.dg/ipa/devirt-38.C: Likewise.
+ * g++.dg/ipa/devirt-40.C (A::m_fn1): Likewise.
+ * g++.dg/ipa/devirt-41.C (main): Likewise.
+ * g++.dg/ipa/devirt-42.C (main): Likewise.
+ * g++.dg/ipa/devirt-44.C (struct A): Likewise.
+ (main): Likewise.
+ * g++.dg/ipa/devirt-45.C (struct A): Likewise.
+ (main): Likewise.
+ * g++.dg/ipa/devirt-48.C (struct B): Likewise.
+ (struct D): Likewise.
+ * g++.dg/ipa/devirt-52.C: Likewise.
+ * g++.dg/ipa/nothrow-1.C (main): Likewise.
+ * g++.dg/ipa/pr43812.C (LocalSurface::bbox): Likewise.
+ * g++.dg/ipa/pr44372.C: Likewise.
+ * g++.dg/ipa/pr45572-1.C (fgetc_unlocked): Likewise.
+ (putc_unlocked): Likewise.
+ (getline): Likewise.
+ (ferror_unlocked): Likewise.
+ * g++.dg/ipa/pr58371.C: Likewise.
+ * g++.dg/ipa/pr59176.C: Likewise.
+ * g++.dg/ipa/pr60640-1.C (class G): Likewise.
+ * g++.dg/ipa/pr61540.C (struct top): Likewise.
+ * g++.dg/ipa/pr63470.C (class FTjackSupport): Likewise.
+ * g++.dg/ipa/pr63587-1.C: Likewise.
+ * g++.dg/ipa/pr63587-2.C: Likewise.
+ * g++.dg/ipa/pr63838.C (__attribute__): Likewise.
+ * g++.dg/ipa/pr63894.C (J::m_fn3): Likewise.
+ * g++.dg/ipa/pr64068.C (class A): Likewise.
+ (A::m_fn2): Likewise.
+ (class C): Likewise.
+ * g++.dg/ipa/pr64896.C (struct D): Likewise.
+ * g++.dg/ipa/pr65002.C: Likewise.
+ * g++.dg/ipa/pr65008.C (__attribute__): Likewise.
+ * g++.dg/ipa/pr65465.C (struct D): Likewise.
+ * g++.dg/ipa/pr66896.C (struct A): Likewise.
+ * g++.dg/ipa/pr68851.C (class G): Likewise.
+ (C::checkPseudoClass): Likewise.
+ * g++.dg/ipa/pr78211.C: Likewise.
+ * g++.dg/ipa/pr79931.C (AttrImpl::insertBefore): Likewise.
+ * g++.dg/ipa/pure-const-1.C (main): Likewise.
+ * g++.dg/ipa/pure-const-2.C (main): Likewise.
+ * g++.dg/ipa/pure-const-3.C (main): Likewise.
+ * g++.dg/ipa/remref-1.C (main): Likewise.
+ * g++.dg/ipa/remref-2.C (main): Likewise.
+ * g++.dg/lookup/builtin2.C (f): Likewise.
+ * g++.dg/lookup/crash3.C (struct A): Likewise.
+ (struct B): Likewise.
+ (crash): Likewise.
+ * g++.dg/lookup/friend20.C: Likewise.
+ * g++.dg/lookup/pr80891-5.C (vf2_subgraph_iso): Likewise.
+ * g++.dg/lookup/struct2.C (A::c): Likewise.
+ * g++.dg/lto/20080709_0.C (f): Likewise.
+ * g++.dg/lto/20080907_0.C: Likewise.
+ * g++.dg/lto/20080915_0.C (struct Baz): Likewise.
+ * g++.dg/lto/20080916_0.C (g): Likewise.
+ * g++.dg/lto/20081022_0.C (main): Likewise.
+ * g++.dg/lto/20081023_0.C (main): Likewise.
+ * g++.dg/lto/20081118_0.C (foo::method): Likewise.
+ * g++.dg/lto/20081118_1.C (bar::method): Likewise.
+ * g++.dg/lto/20081120-1_0.C: Likewise.
+ * g++.dg/lto/20081120-1_1.C: Likewise.
+ * g++.dg/lto/20081127_1.C (main): Likewise.
+ * g++.dg/lto/20081217-2_0.C (struct A): Likewise.
+ * g++.dg/lto/20090303_0.C: Likewise.
+ * g++.dg/lto/20090311-1_0.C: Likewise.
+ * g++.dg/lto/20090312_0.C: Likewise.
+ * g++.dg/lto/20090315_0.C (main): Likewise.
+ * g++.dg/lto/20091002-1_0.C: Likewise.
+ * g++.dg/lto/20091002-2_0.C (class DataArray): Likewise.
+ * g++.dg/lto/20091002-3_0.C (class DataArray): Likewise.
+ * g++.dg/lto/20091004-1_0.C: Likewise.
+ * g++.dg/lto/20091004-2_0.C: Likewise.
+ * g++.dg/lto/20091004-3_1.C (All_Torus_Intersections): Likewise.
+ * g++.dg/lto/20100721-1_0.C (__gthread_active_p): Likewise.
+ * g++.dg/lto/20101010-1_0.C: Likewise.
+ * g++.dg/lto/20101010-2_0.C: Likewise.
+ * g++.dg/lto/pr45679-1_0.C: Likewise.
+ * g++.dg/lto/pr45679-1_1.C: Likewise.
+ * g++.dg/lto/pr45679-2_0.C: Likewise.
+ * g++.dg/lto/pr48042_0.C (B::x): Likewise.
+ * g++.dg/lto/pr51650-1_0.C (fn): Likewise.
+ (main): Likewise.
+ * g++.dg/lto/pr51650-3_0.C (fn): Likewise.
+ (main): Likewise.
+ * g++.dg/lto/pr63270_1.C: Likewise.
+ * g++.dg/lto/pr65193_0.C: Likewise.
+ * g++.dg/lto/pr65302_0.C: Likewise.
+ * g++.dg/lto/pr65316_0.C: Likewise.
+ * g++.dg/lto/pr65475c_0.C: Likewise.
+ * g++.dg/lto/pr65549_0.C (main): Likewise.
+ * g++.dg/lto/pr69077_0.C (cWeightedStdDev::netPack): Likewise.
+ * g++.dg/lto/pr69589_0.C: Likewise.
+ * g++.dg/opt/combine.C (qvariant_cast): Likewise.
+ (QScriptDebuggerBackendPrivate::trace): Likewise.
+ * g++.dg/opt/complex3.C (j): Likewise.
+ * g++.dg/opt/covariant1.C (struct T): Likewise.
+ * g++.dg/opt/declone3.C (Item::m_fn1): Likewise.
+ * g++.dg/opt/dump1.C (__attribute__): Likewise.
+ * g++.dg/opt/inline15.C (struct C): Likewise.
+ (fn2): Likewise.
+ * g++.dg/opt/local1.C (h): Likewise.
+ * g++.dg/opt/memcpy1.C (csBoxClipper::Clip): Likewise.
+ * g++.dg/opt/new1.C: Likewise.
+ * g++.dg/opt/nrv8.C (main): Likewise.
+ * g++.dg/opt/pr23299.C (struct A): Likewise.
+ (struct B): Likewise.
+ (struct C): Likewise.
+ * g++.dg/opt/pr27826.C (struct Geometry): Likewise.
+ * g++.dg/opt/pr44919.C (back_inserter): Likewise.
+ * g++.dg/opt/pr47615.C (main): Likewise.
+ * g++.dg/opt/pr55329.C (struct A): Likewise.
+ * g++.dg/opt/pr61456.C (Set): Likewise.
+ * g++.dg/opt/pr65003.C (D::foo): Likewise.
+ (F::foo): Likewise.
+ * g++.dg/opt/pr65554.C: Likewise.
+ * g++.dg/opt/pr69432.C (struct C): Likewise.
+ * g++.dg/opt/pr78373.C (struct D): Likewise.
+ (Traits>::m_fn4): Likewise.
+ * g++.dg/opt/pr79267.C (struct F): Likewise.
+ * g++.dg/opt/pr82159-2.C: Likewise.
+ * g++.dg/other/array3.C (reserve): Likewise.
+ * g++.dg/other/crash-5.C (f): Likewise.
+ * g++.dg/other/crash-8.C: Likewise.
+ * g++.dg/other/error34.C (S): Likewise.
+ * g++.dg/other/pr22003.C (c3::func): Likewise.
+ * g++.dg/other/pr24623.C (RefCountPointer): Likewise.
+ * g++.dg/other/pr29610.C (struct __normal_iterator): Likewise.
+ (Painter::for_each): Likewise.
+ (Painter::redraw_window): Likewise.
+ * g++.dg/other/pr42645-1.C (struct S): Likewise.
+ * g++.dg/other/pr42645-2.C (foo): Likewise.
+ (f3): Likewise.
+ * g++.dg/other/pr52048.C: Likewise.
+ * g++.dg/other/typedef3.C (XalanCProcessor::getParseOption): Likewise.
+ * g++.dg/overload/defarg4.C (class foo): Likewise.
+ (bar::Initialize): Likewise.
+ * g++.dg/overload/operator5.C (equalIgnoringCase): Likewise.
+ * g++.dg/overload/ref-conv1.C: Likewise.
+ * g++.dg/overload/template5.C (test): Likewise.
+ * g++.dg/parse/crash40.C (class AAA): Likewise.
+ * g++.dg/parse/crash61.C: Likewise.
+ * g++.dg/parse/crash67.C: Likewise.
+ * g++.dg/parse/ctor5.C: Likewise.
+ * g++.dg/parse/defarg4.C (Foo): Likewise.
+ * g++.dg/parse/defarg6.C: Likewise.
+ * g++.dg/parse/error5.C (class Foo): Likewise.
+ * g++.dg/parse/expr2.C (foo): Likewise.
+ * g++.dg/parse/friend7.C: Likewise.
+ * g++.dg/parse/namespace1.C (bar): Likewise.
+ * g++.dg/parse/namespace9.C (g): Likewise.
+ * g++.dg/parse/ret-type2.C: Likewise.
+ * g++.dg/parse/typedef8.C (foo): Likewise.
+ * g++.dg/pch/static-1.C (LocalStaticTest): Likewise.
+ (main): Likewise.
+ * g++.dg/plugin/diagnostic-test-expressions-1.C (test_structure_references): Likewise.
+ (test_postfix_incdec): Likewise.
+ (test_sizeof): Likewise.
+ (test_alignof): Likewise.
+ (test_prefix_incdec): Likewise.
+ * g++.dg/plugin/dumb-plugin-test-1.C (func): Likewise.
+ * g++.dg/plugin/self-assign-test-1.C (func): Likewise.
+ * g++.dg/plugin/self-assign-test-2.C (func): Likewise.
+ * g++.dg/plugin/self-assign-test-3.C (func): Likewise.
+ * g++.dg/pr55513.C (main): Likewise.
+ * g++.dg/pr55604.C (main): Likewise.
+ * g++.dg/pr57662.C: Likewise.
+ * g++.dg/pr58389.C (F::m_fn1): Likewise.
+ * g++.dg/pr59510.C: Likewise.
+ * g++.dg/pr67989.C: Likewise.
+ * g++.dg/pr70590-2.C: Likewise.
+ * g++.dg/pr70590.C: Likewise.
+ * g++.dg/pr70965.C (foo): Likewise.
+ * g++.dg/pr77550.C: Likewise.
+ * g++.dg/pr80287.C (struct A): Likewise.
+ * g++.dg/pr80707.C (A::m_fn1): Likewise.
+ * g++.dg/pr81194.C: Likewise.
+ * g++.dg/spellcheck-identifiers.C: Likewise.
+ * g++.dg/tc1/dr152.C: Likewise.
+ * g++.dg/template/aggr-init1.C (CreateA): Likewise.
+ * g++.dg/template/anon1.C (struct x): Likewise.
+ (struct vector): Likewise.
+ * g++.dg/template/array29.C: Likewise.
+ * g++.dg/template/array7.C (bar): Likewise.
+ * g++.dg/template/canon-type-8.C: Likewise.
+ * g++.dg/template/conv1.C (First::Foo): Likewise.
+ * g++.dg/template/crash107.C: Likewise.
+ * g++.dg/template/crash23.C (f): Likewise.
+ * g++.dg/template/crash8.C (struct bar): Likewise.
+ * g++.dg/template/defarg4.C (struct A): Likewise.
+ * g++.dg/template/dependent-expr9.C: Likewise.
+ * g++.dg/template/error10.C (Btest): Likewise.
+ * g++.dg/template/friend32.C (f): Likewise.
+ * g++.dg/template/init6.C (Graph::Inner::get): Likewise.
+ (main): Likewise.
+ * g++.dg/template/memfriend7.C (A::h): Likewise.
+ * g++.dg/template/new10.C (Analyzer::ReadDictionary): Likewise.
+ * g++.dg/template/nontype12.C (baz): Likewise.
+ * g++.dg/template/overload12.C (foo2): Likewise.
+ * g++.dg/template/overload5.C (foo::f): Likewise.
+ * g++.dg/template/overload8.C (struct A): Likewise.
+ * g++.dg/template/partial10.C (fn): Likewise.
+ (main): Likewise.
+ * g++.dg/template/partial9.C (f): Likewise.
+ * g++.dg/template/qual1.C (shift_compare): Likewise.
+ * g++.dg/template/show-template-tree-3.C: Likewise.
+ * g++.dg/template/sizeof8.C (S<sizeof): Likewise.
+ * g++.dg/template/sizeof9.C (d): Likewise.
+ * g++.dg/template/spec6.C: Likewise.
+ * g++.dg/template/spec7.C (h): Likewise.
+ * g++.dg/template/typedef8.C: Likewise.
+ * g++.dg/template/using20.C (f): Likewise.
+ * g++.dg/template/vla1.C (label): Likewise.
+ * g++.dg/tm/cgraph_edge.C: Likewise.
+ * g++.dg/tm/pr46646.C: Likewise.
+ * g++.dg/tm/pr47554.C (class list): Likewise.
+ * g++.dg/tm/pr47573.C (getStringHeight): Likewise.
+ * g++.dg/tm/unsafe1.C (f): Likewise.
+ * g++.dg/tm/unsafe2.C (g): Likewise.
+ * g++.dg/torture/pr70971.C: Likewise.
+ * g++.dg/torture/20070621-1.C: Likewise.
+ * g++.dg/torture/20090329-1.C: Likewise.
+ * g++.dg/torture/20141013.C: Likewise.
+ * g++.dg/torture/pr33134.C (fxsaveGIF): Likewise.
+ * g++.dg/torture/pr33340.C (new): Likewise.
+ * g++.dg/torture/pr33627.C (class pf_Frag): Likewise.
+ * g++.dg/torture/pr34222.C (readFloat): Likewise.
+ * g++.dg/torture/pr34241.C (test): Likewise.
+ * g++.dg/torture/pr34641.C: Likewise.
+ * g++.dg/torture/pr34850.C (OctetString::operator^=): Likewise.
+ * g++.dg/torture/pr35164-1.C: Likewise.
+ * g++.dg/torture/pr36745.C: Likewise.
+ * g++.dg/torture/pr38705.C (S::bar): Likewise.
+ * g++.dg/torture/pr38811.C (AbcAbcdTracer::TestIsoAbcde): Likewise.
+ * g++.dg/torture/pr39362.C: Likewise.
+ * g++.dg/torture/pr39732.C (f): Likewise.
+ * g++.dg/torture/pr40991.C: Likewise.
+ * g++.dg/torture/pr41775.C: Likewise.
+ * g++.dg/torture/pr42183.C: Likewise.
+ * g++.dg/torture/pr42450.C: Likewise.
+ * g++.dg/torture/pr42704.C: Likewise.
+ * g++.dg/torture/pr42760.C (baz): Likewise.
+ (bar): Likewise.
+ * g++.dg/torture/pr42773.C (Cell::obscuringCells): Likewise.
+ * g++.dg/torture/pr42883.C: Likewise.
+ * g++.dg/torture/pr43905.C (struct Matrix): Likewise.
+ * g++.dg/torture/pr44148.C: Likewise.
+ * g++.dg/torture/pr44295.C: Likewise.
+ * g++.dg/torture/pr44357.C: Likewise.
+ * g++.dg/torture/pr44813.C: Likewise.
+ * g++.dg/torture/pr45580.C: Likewise.
+ * g++.dg/torture/pr45874.C (Mpeg2FrameConstructor::ParsePictureHeader): Likewise.
+ * g++.dg/torture/pr45877.C: Likewise.
+ * g++.dg/torture/pr46383.C: Likewise.
+ * g++.dg/torture/pr46469.C (__attribute__): Likewise.
+ (identifierByPthreadHandle): Likewise.
+ * g++.dg/torture/pr47313.C: Likewise.
+ * g++.dg/torture/pr48271.C: Likewise.
+ * g++.dg/torture/pr49615.C (Dispatch): Likewise.
+ (C::f): Likewise.
+ * g++.dg/torture/pr49770.C (main): Likewise.
+ * g++.dg/torture/pr49938.C: Likewise.
+ * g++.dg/torture/pr51436.C: Likewise.
+ * g++.dg/torture/pr51482.C (anim_track_bez_wvect::tangent): Likewise.
+ * g++.dg/torture/pr51737.C (id_state::start_file): Likewise.
+ * g++.dg/torture/pr51959.C: Likewise.
+ * g++.dg/torture/pr52772.C (class c6): Likewise.
+ * g++.dg/torture/pr52918-2.C (__cxa_allocate_exception): Likewise.
+ * g++.dg/torture/pr53011.C: Likewise.
+ * g++.dg/torture/pr53602.C: Likewise.
+ * g++.dg/torture/pr53752.C: Likewise.
+ * g++.dg/torture/pr54838.C: Likewise.
+ * g++.dg/torture/pr54902.C: Likewise.
+ * g++.dg/torture/pr56029.C: Likewise.
+ * g++.dg/torture/pr56768.C (operator!=): Likewise.
+ * g++.dg/torture/pr57107.C: Likewise.
+ * g++.dg/torture/pr57140.C: Likewise.
+ * g++.dg/torture/pr57235.C: Likewise.
+ * g++.dg/torture/pr58252.C: Likewise.
+ * g++.dg/torture/pr58555.C: Likewise.
+ * g++.dg/torture/pr59208.C (get_dbx_doc): Likewise.
+ * g++.dg/torture/pr60438-1.C (foo): Likewise.
+ * g++.dg/torture/pr60746.C (Two::run): Likewise.
+ * g++.dg/torture/pr61554.C: Likewise.
+ * g++.dg/torture/pr63419.C: Likewise.
+ * g++.dg/torture/pr63476.C: Likewise.
+ * g++.dg/torture/pr63512.C (C::m_fn3): Likewise.
+ * g++.dg/torture/pr64282.C (class H): Likewise.
+ * g++.dg/torture/pr64378.C (struct top): Likewise.
+ * g++.dg/torture/pr64565.C: Likewise.
+ * g++.dg/torture/pr64568-2.C: Likewise.
+ * g++.dg/torture/pr64669.C (Lex::advance_one_char): Likewise.
+ * g++.dg/torture/pr64686.C (B::m_fn1): Likewise.
+ * g++.dg/torture/pr64978.C (B::m_fn2): Likewise.
+ * g++.dg/torture/pr64995.C (A::m_fn2): Likewise.
+ * g++.dg/torture/pr65655.C: Likewise.
+ * g++.dg/torture/pr65851.C: Likewise.
+ * g++.dg/torture/pr67055.C: Likewise.
+ * g++.dg/torture/pr67191.C: Likewise.
+ * g++.dg/torture/pr68852.C: Likewise.
+ * g++.dg/torture/pr69264.C: Likewise.
+ * g++.dg/torture/pr77674.C: Likewise.
+ * g++.dg/torture/pr77947.C (B::m_fn2): Likewise.
+ * g++.dg/torture/pr78268.C: Likewise.
+ * g++.dg/torture/pr78507.C: Likewise.
+ * g++.dg/torture/pr78692.C (F::g): Likewise.
+ * g++.dg/torture/pr80171.C: Likewise.
+ * g++.dg/torture/pr82154.C (class f): Likewise.
+ (f::k): Likewise.
+ * g++.dg/tree-ssa/copyprop.C: Likewise.
+ * g++.dg/tree-ssa/pr22444.C: Likewise.
+ * g++.dg/tree-ssa/pr23948.C (make_scheduler_request): Likewise.
+ * g++.dg/tree-ssa/pr24172.C (dummy): Likewise.
+ * g++.dg/tree-ssa/pr24351-3.C: Likewise.
+ * g++.dg/tree-ssa/pr27283.C: Likewise.
+ * g++.dg/tree-ssa/pr27291.C: Likewise.
+ * g++.dg/tree-ssa/pr27548.C: Likewise.
+ * g++.dg/tree-ssa/pr34355.C (Parse_Float): Likewise.
+ * g++.dg/tree-ssa/pr42337.C: Likewise.
+ * g++.dg/tree-ssa/pred-1.C (main): Likewise.
+ * g++.dg/ubsan/pr65019.C (C::foo): Likewise.
+ * g++.dg/ubsan/pr65583.C: Likewise.
+ * g++.dg/vect/pr60836.cc (norm_): Likewise.
+ * g++.dg/vect/pr68145.cc: Likewise.
+ * g++.dg/vect/pr70729-nest.cc (my_alloc): Likewise.
+ * g++.dg/vect/pr70729.cc (my_alloc): Likewise.
+ * g++.dg/warn/Waddress-3.C: Likewise.
+ * g++.dg/warn/Wconversion-null-2.C (warn_for___null): Likewise.
+ * g++.dg/warn/Wnull-conversion-2.C (main): Likewise.
+ * g++.dg/warn/Wparentheses-10.C: Likewise.
+ * g++.dg/warn/Wparentheses-11.C: Likewise.
+ * g++.dg/warn/Wparentheses-12.C: Likewise.
+ * g++.dg/warn/Wparentheses-25.C: Likewise.
+ * g++.dg/warn/Wparentheses-6.C: Likewise.
+ * g++.dg/warn/Wparentheses-7.C: Likewise.
+ * g++.dg/warn/Wparentheses-8.C: Likewise.
+ * g++.dg/warn/Wparentheses-9.C: Likewise.
+ * g++.dg/warn/Wshadow-5.C: Likewise.
+ * g++.dg/warn/Wtype-limits-Wextra.C (ff): Likewise.
+ (gg): Likewise.
+ * g++.dg/warn/Wtype-limits-no.C (ff): Likewise.
+ (gg): Likewise.
+ * g++.dg/warn/Wtype-limits.C (ff): Likewise.
+ (gg): Likewise.
+ * g++.dg/warn/Wunused-local-typedefs.C: Likewise.
+ * g++.dg/warn/Wzero-as-null-pointer-constant-5.C: Likewise.
+ * g++.dg/warn/pmf1.C (a::f): Likewise.
+ * g++.old-deja/g++.benjamin/p13417.C: Likewise.
+ * g++.old-deja/g++.brendan/crash24.C (main): Likewise.
+ * g++.old-deja/g++.ext/constructor.C: Likewise.
+ * g++.old-deja/g++.ext/namedret1.C (f): Likewise.
+ * g++.old-deja/g++.ext/namedret3.C: Likewise.
+ * g++.old-deja/g++.ext/return1.C: Likewise.
+ * g++.old-deja/g++.jason/anon4.C (main): Likewise.
+ * g++.old-deja/g++.jason/enum6.C: Likewise.
+ * g++.old-deja/g++.jason/lineno2.C (main): Likewise.
+ * g++.old-deja/g++.jason/lineno3.C: Likewise.
+ * g++.old-deja/g++.jason/lineno4.C: Likewise.
+ * g++.old-deja/g++.jason/new2.C (main): Likewise.
+ * g++.old-deja/g++.jason/new4.C (main): Likewise.
+ * g++.old-deja/g++.jason/shadow1.C (main): Likewise.
+ * g++.old-deja/g++.jason/tempcons.C (struct A): Likewise.
+ * g++.old-deja/g++.jason/thunk2.C (main): Likewise.
+ * g++.old-deja/g++.law/builtin1.C (main): Likewise.
+ * g++.old-deja/g++.law/enum9.C: Likewise.
+ * g++.old-deja/g++.law/except3.C: Likewise.
+ * g++.old-deja/g++.law/init6.C: Likewise.
+ * g++.old-deja/g++.law/profile1.C (main): Likewise.
+ * g++.old-deja/g++.law/shadow2.C (main): Likewise.
+ * g++.old-deja/g++.law/temps4.C (main): Likewise.
+ * g++.old-deja/g++.mike/bool2.C (main): Likewise.
+ * g++.old-deja/g++.mike/eh1.C: Likewise.
+ * g++.old-deja/g++.mike/eh10.C: Likewise.
+ * g++.old-deja/g++.mike/eh13.C (main): Likewise.
+ * g++.old-deja/g++.mike/eh16.C: Likewise.
+ * g++.old-deja/g++.mike/eh17.C: Likewise.
+ * g++.old-deja/g++.mike/eh2.C: Likewise.
+ * g++.old-deja/g++.mike/eh23.C: Likewise.
+ * g++.old-deja/g++.mike/eh24.C: Likewise.
+ * g++.old-deja/g++.mike/eh25.C: Likewise.
+ * g++.old-deja/g++.mike/eh26.C: Likewise.
+ * g++.old-deja/g++.mike/eh27.C: Likewise.
+ * g++.old-deja/g++.mike/eh28.C: Likewise.
+ * g++.old-deja/g++.mike/eh29.C: Likewise.
+ * g++.old-deja/g++.mike/eh30.C: Likewise.
+ * g++.old-deja/g++.mike/eh31.C: Likewise.
+ * g++.old-deja/g++.mike/eh35.C: Likewise.
+ * g++.old-deja/g++.mike/eh36.C: Likewise.
+ * g++.old-deja/g++.mike/eh37.C: Likewise.
+ * g++.old-deja/g++.mike/eh38.C: Likewise.
+ * g++.old-deja/g++.mike/eh39.C: Likewise.
+ * g++.old-deja/g++.mike/eh40.C: Likewise.
+ * g++.old-deja/g++.mike/eh47.C: Likewise.
+ * g++.old-deja/g++.mike/eh50.C: Likewise.
+ * g++.old-deja/g++.mike/eh51.C: Likewise.
+ * g++.old-deja/g++.mike/eh7.C: Likewise.
+ * g++.old-deja/g++.mike/eh8.C: Likewise.
+ * g++.old-deja/g++.mike/eh9.C: Likewise.
+ * g++.old-deja/g++.mike/mangle1.C: Likewise.
+ * g++.old-deja/g++.mike/p5958.C: Likewise.
+ * g++.old-deja/g++.mike/p6004.C: Likewise.
+ * g++.old-deja/g++.mike/p700.C: Likewise.
+ * g++.old-deja/g++.mike/p7912.C: Likewise.
+ * g++.old-deja/g++.mike/p811.C (main): Likewise.
+ * g++.old-deja/g++.mike/virt4.C (main): Likewise.
+ * g++.old-deja/g++.oliva/nameret1.C: Likewise.
+ * g++.old-deja/g++.oliva/nameret2.C: Likewise.
+ * g++.old-deja/g++.other/decl1.C (bar): Likewise.
+ * g++.old-deja/g++.other/expr1.C (struct T): Likewise.
+ (main): Likewise.
+ * g++.old-deja/g++.other/inline8.C (main): Likewise.
+ * g++.old-deja/g++.other/loop1.C: Likewise.
+ * g++.old-deja/g++.other/syntax1.C (main): Likewise.
+ * g++.old-deja/g++.pt/repo3.C (main): Likewise.
+ * g++.old-deja/g++.robertl/eb27.C (main): Likewise.
+ * g++.old-deja/g++.robertl/eb83.C (main): Likewise.
+ * gcc.dg/pr44545.c: Likewise.
+ * obj-c++.dg/comp-types-8.mm: Likewise.
+ * obj-c++.dg/demangle-3.mm: Likewise.
+ * obj-c++.dg/super-class-1.mm: Likewise.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ * c-c++-common/dfp/call-by-value.c (foo32): Return a default
+ value of change return type to void.
+ (foo64): Likewise.
+ (foo128): Likewise.
+ * g++.dg/bprob/g++-bprob-1.C: Likewise.
+ * g++.dg/cpp0x/lambda/lambda-template.C (f): Likewise.
+ * g++.dg/cpp0x/range-for6.C (foo): Likewise.
+ * g++.dg/cpp0x/udlit-template.C: Likewise.
+ * g++.dg/cpp1z/eval-order3.C (struct A): Likewise.
+ (operator>>): Likewise.
+ * g++.dg/expr/cond12.C (struct X): Likewise.
+ (X::operator=): Likewise.
+ * g++.dg/gcov/gcov-1.C: Likewise.
+ * g++.dg/gcov/gcov-threads-1.C (ContentionNoDeadlock_thread): Likewise.
+ * g++.dg/ipa/devirt-21.C: Likewise.
+ * g++.dg/ipa/devirt-23.C: Likewise.
+ * g++.dg/ipa/devirt-34.C (t): Likewise.
+ * g++.dg/missing-return.C: New test. Likewise.
+ * g++.dg/opt/20050511-1.C (bar): Likewise.
+ * g++.dg/opt/const3.C (A::foo1): Likewise.
+ (A::foo2): Likewise.
+ * g++.dg/opt/pr23299.C (E::c): Likewise.
+ * g++.dg/other/copy2.C (A::operator=): Likewise.
+ * g++.dg/overload/addr1.C: Likewise.
+ * g++.dg/pr48484.C: Likewise.
+ * g++.dg/tls/thread_local3.C (thread_main): Likewise.
+ * g++.dg/tls/thread_local3g.C (thread_main): Likewise.
+ * g++.dg/tls/thread_local5.C (thread_main): Likewise.
+ * g++.dg/tls/thread_local5g.C (thread_main): Likewise.
+ * g++.dg/tls/thread_local6.C (thread_main): Likewise.
+ * g++.dg/tls/thread_local6g.C (thread_main): Likewise.
+ * g++.dg/torture/pr34850.C (OctetString::operator^=): Likewise.
+ * g++.dg/tree-prof/pr79259.C (fn2): Likewise.
+ * g++.dg/tree-ssa/pr33604.C (struct Value): Likewise.
+ * g++.dg/tree-ssa/pr81408.C (struct p): Likewise.
+ (av): Likewise.
+ * g++.dg/warn/string1.C (test): Likewise.
+
+2017-11-05 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/82471
+ * gfortran.dg/loop_interchange_1.f90: New test.
+
+2017-11-05 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/78641
+ * gfortran.dg/class_66.f90: New test.
+
+2017-11-05 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/81447
+ * gfortran.dg/class_65.f90: New test.
+ * gfortran.dg/alloc_comp_basics_1.f90: Increase builtin_free
+ count from 18 to 21.
+ * gfortran.dg/allocatable_scalar_9.f90: Increase builtin_free
+ count from 32 to 54.
+ * gfortran.dg/auto_dealloc_1.f90: Increase builtin_free
+ count from 4 to 10.
+ * gfortran.dg/coarray_lib_realloc_1.f90: Increase builtin_free
+ count from 3 to 6. Likewise _gfortran_caf_deregister from 2 to
+ 3, builtin_malloc from 1 to 4 and builtin_memcpy|= MEM from
+ 2 to 5.
+ * gfortran.dg/finalize_28.f90: Increase builtin_free
+ count from 3 to 6.
+ * gfortran.dg/move_alloc_15.f90: Increase builtin_free and
+ builtin_malloc counts from 11 to 14.
+ * gfortran.dg/typebound_proc_27.f03: Increase builtin_free
+ count from 7 to 10. Likewise builtin_malloc from 12 to 15.
+
+2017-11-04 Daniel Santos <daniel.santos@pobox.com>
+
+ gcc.target/i386/pr82002-2a.c: Change from xfail to fail.
+ gcc.target/i386/pr82002-2b.c: Likewise.
+
+2017-11-04 Andre Vehreschild <vehre@gcc.gnu.org>
+
+ * gfortran.dg/coarray/send_char_array_1.f90: New test.
+
+2017-11-04 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/70330
+ * gfortran.dg/pr70330.f90: New test.
+
+2017-11-04 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/29600
+ * gfortran.dg/minmaxloc_8.f90: New test.
+
2017-11-04 Paul Thomas <pault@gcc.gnu.org>
PR fortran/81735
diff --git a/gcc/testsuite/c-c++-common/Wimplicit-fallthrough-8.c b/gcc/testsuite/c-c++-common/Wimplicit-fallthrough-8.c
index 0ed7928fd79..d146c788b74 100644
--- a/gcc/testsuite/c-c++-common/Wimplicit-fallthrough-8.c
+++ b/gcc/testsuite/c-c++-common/Wimplicit-fallthrough-8.c
@@ -4,7 +4,7 @@
extern void grace (int);
-int
+void
fn1 (int i)
{
switch (i)
@@ -16,7 +16,7 @@ fn1 (int i)
done:;
}
-int
+void
fn2 (int i)
{
switch (i)
@@ -32,7 +32,7 @@ fn2 (int i)
done:;
}
-int
+void
fn3 (int i)
{
switch (i)
@@ -46,7 +46,7 @@ fn3 (int i)
done:;
}
-int
+void
fn4 (int i)
{
switch (i)
@@ -64,7 +64,7 @@ fn4 (int i)
done:;
}
-int
+void
fn5 (int i)
{
switch (i)
@@ -83,7 +83,7 @@ fn5 (int i)
done:;
}
-int
+void
fn6 (int i)
{
switch (i)
diff --git a/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess2.c b/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess2.c
index 895a50e2677..f7bfa35913c 100644
--- a/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess2.c
+++ b/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess2.c
@@ -1,7 +1,7 @@
/* Test -Wsizeof-pointer-memaccess warnings. */
/* { dg-do compile } */
-/* { dg-options "-Wall -O2 -Wno-sizeof-array-argument -ftrack-macro-expansion=0" } */
-/* { dg-options "-Wall -O2 -Wno-sizeof-array-argument -Wno-c++-compat -ftrack-macro-expansion=0" {target c} } */
+/* { dg-options "-Wall -O2 -Wno-sizeof-array-argument -Wno-stringop-truncation -ftrack-macro-expansion=0" } */
+/* { dg-options "-Wall -O2 -Wno-sizeof-array-argument -Wno-stringop-truncation -Wno-c++-compat -ftrack-macro-expansion=0" {target c} } */
/* { dg-require-effective-target alloca } */
#define bos(ptr) __builtin_object_size (ptr, 1)
@@ -473,12 +473,15 @@ f4 (char *x, char **y, int z, char w[64])
strncat (w, s2, sizeof (w)); /* { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" } */
stpncpy (w, s1, sizeof (w)); /* { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" } */
- /* These are correct, no warning. */
+ /* These are pointless when the destination is large enough, and
+ cause overflow otherwise. If the copies are guaranteed to be
+ safe the calls might as well be replaced by strcat(), strcpy(),
+ or memcpy(). */
const char s3[] = "foobarbaz";
const char s4[] = "abcde12345678";
- strncpy (x, s3, sizeof (s3));
- strncat (x, s4, sizeof (s4));
- stpncpy (x, s3, sizeof (s3));
+ strncpy (x, s3, sizeof (s3)); /* { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" } */
+ strncat (x, s4, sizeof (s4)); /* { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" } */
+ stpncpy (x, s3, sizeof (s3)); /* { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" } */
}
/* { dg-prune-output "\[\n\r\]*writing\[\n\r\]*" } */
diff --git a/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess3.c b/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess3.c
new file mode 100644
index 00000000000..97598c42346
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/Wsizeof-pointer-memaccess3.c
@@ -0,0 +1,132 @@
+/* Test -Wsizeof-pointer-memaccess warnings. */
+/* { dg-do compile } */
+/* { dg-options "-Wsizeof-pointer-memaccess -Wno-stringop-overflow -Wno-stringop-truncation -ftrack-macro-expansion=0" } */
+
+#define bos(ptr) __builtin_object_size (ptr, 1)
+#define bos0(ptr) __builtin_object_size (ptr, 0)
+
+#define memset(dst, val, sz) \
+ (FUNC (memset, dst, val, sz, bos (dst)), sink ((dst)))
+
+#define memcpy(dst, src, sz) \
+ (FUNC (memcpy, dst, src, sz, bos (dst)), sink ((dst)))
+
+#define memmove(dst, src, sz) \
+ (FUNC (memmove, dst, src, sz, bos (dst)), sink ((dst)))
+
+#define mempcpy(dst, src, sz) \
+ (FUNC (mempcpy, dst, src, sz, bos (dst)), sink ((dst)))
+
+#define strncpy(dst, src, sz) \
+ (FUNC (strncpy, dst, src, sz, bos (dst)), sink (dst))
+
+#define strncat(dst, src, sz) \
+ (FUNC (strncat, dst, src, sz, bos (dst)), sink (dst))
+
+#define stpncpy(dst, src, sz) \
+ (FUNC (stpncpy, dst, src, sz, bos (dst)), sink (dst))
+
+void sink (void*);
+
+#define S10 "123456789"
+extern char a10[10];
+
+void test_string_literal (char *dst)
+{
+#define FUNC(f, d, s, n, x) __builtin_ ## f (d, s, n)
+
+ /* It's common to call memcpy and other raw memory functions with
+ size drerived from the source argument. Verify that no warning
+ is ussued for such calls. */
+ memcpy (dst, S10, sizeof S10);
+ mempcpy (dst, S10, sizeof S10);
+ memmove (dst, S10, sizeof S10);
+
+ memset (dst, 0, sizeof S10);
+
+ stpncpy (dst, S10, sizeof S10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncpy (dst, S10, sizeof S10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncat (dst, S10, sizeof S10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ /* Unlike in the cases above, even though the calls below are likely
+ wrong, it's not easy to detect that the expression (sizeof X - 1)
+ involves sizeof of the source, so no warning is issued here, as
+ helpful as one might be. Whether -Wstringop-truncation is issued
+ is tested elsewhere. */
+ stpncpy (dst, S10, sizeof S10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncpy (dst, S10, sizeof S10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncat (dst, S10, sizeof S10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+}
+
+
+void test_char_array (char *dst)
+{
+ memcpy (dst, a10, sizeof a10);
+ mempcpy (dst, a10, sizeof a10);
+ memmove (dst, a10, sizeof a10);
+
+ memset (dst, 0, sizeof a10);
+
+ stpncpy (dst, a10, sizeof a10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncpy (dst, a10, sizeof a10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncat (dst, a10, sizeof a10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ stpncpy (dst, a10, sizeof a10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncpy (dst, a10, sizeof a10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncat (dst, a10, sizeof a10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+}
+
+
+#undef FUNC
+#define FUNC(f, d, s, n, os) __builtin___ ## f ## _chk (d, s, n, os)
+
+void test_char_array_chk (char *dst)
+{
+ memcpy (dst, S10, sizeof S10);
+ mempcpy (dst, S10, sizeof S10);
+ memmove (dst, S10, sizeof S10);
+
+ memset (dst, 0, sizeof S10);
+
+ stpncpy (dst, S10, sizeof S10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncpy (dst, S10, sizeof S10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncat (dst, S10, sizeof S10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ stpncpy (dst, S10, sizeof S10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncpy (dst, S10, sizeof S10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncat (dst, S10, sizeof S10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+}
+
+
+void test_string_literal_chk (char *dst)
+{
+ memcpy (dst, a10, sizeof a10);
+ mempcpy (dst, a10, sizeof a10);
+ memmove (dst, a10, sizeof a10);
+
+ memset (dst, 0, sizeof a10);
+
+ stpncpy (dst, a10, sizeof a10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncpy (dst, a10, sizeof a10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ strncat (dst, a10, sizeof a10); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" } */
+
+ stpncpy (dst, a10, sizeof a10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncpy (dst, a10, sizeof a10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+
+ strncat (dst, a10, sizeof a10 - 1); /* { dg-warning "\\\[-Wsizeof-pointer-memaccess]" "" { xfail *-*-* } } */
+}
diff --git a/gcc/testsuite/c-c++-common/Wstringop-overflow.c b/gcc/testsuite/c-c++-common/Wstringop-overflow.c
new file mode 100644
index 00000000000..53f5166f30a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/Wstringop-overflow.c
@@ -0,0 +1,158 @@
+/* PR middle-end/81117 - Improve buffer overflow checking in strncpy
+ { dg-do compile }
+ { dg-options "-O2 -Wstringop-overflow -Wno-stringop-truncation -ftrack-macro-expansion=0" } */
+
+typedef __SIZE_TYPE__ size_t;
+
+#if __cplusplus
+extern "C" {
+#endif
+
+size_t strlen (const char*);
+char* strncat (char*, const char*, size_t);
+char* strncpy (char*, const char*, size_t);
+#if __cplusplus
+}
+#endif
+
+const char ar[] = "123";
+
+void test_strncat (char **d, const char* s, int i)
+{
+ /* Use a fresh pointer for each test to prevent the optimizer from
+ eliminating redundant writes into the same destination. Avoid
+ calling functions like sink() on the result that would have to
+ be assumed to change the source string by the alias oracle. */
+#define T(d, s, len) strncat (*d++, (s), (len))
+
+ T (d, "", 0);
+ T (d, "", 1);
+ T (d, "", 2);
+ T (d, "", 3);
+ T (d, "123", 0);
+ /* The following two calls truncate the copy and are diagnosed
+ by -Wstringop-truncation but there is evidence of overflow so
+ they're not diagnosed by -Wstringop-overflow. */
+ T (d, "123", 1);
+ T (d, "123", 2);
+
+ T (d, "123", 3); /* { dg-warning ".strncat\[^\n\r\]* specified bound 3 equals source length" } */
+ T (d, "123", 4);
+ T (d, "123", 9);
+
+ T (d, s, strlen (s)); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ T (d, s, strlen (s) + 1); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ /* The following could also be diagnosed by -Wstringop-truncation
+ (with some effort to distinguish the pattern from others like
+ the one above. */
+ T (d, s, strlen (s) - 1); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ T (d, s, strlen (s) - i); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+
+ /* The following is dubious but not necessarily a smoking gun. */
+ T (d, s, strlen (s) - strlen (s));
+
+ {
+ signed char n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+
+ {
+ short n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+
+ {
+ int n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+
+ {
+ unsigned n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+
+ {
+ size_t n;
+ n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+
+ {
+ size_t n;
+ n = strlen (s) - 1; /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-message ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+
+ {
+ /* This doesn't overflow so iit should not be diagnosed. */
+ size_t n = strlen (s) - strlen (s);
+ T (d, s, n);
+ }
+
+ {
+ size_t n = i < strlen (s) ? i : strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-message ".strncat\[^\n\r\]* specified bound depends on the length of the source argument" } */
+ }
+}
+
+
+void test_strncpy (char **d, const char* s, int i)
+{
+#undef T
+#define T(d, s, len) strncpy (*d++, (s), (len))
+
+ T (d, "", 0);
+ T (d, "", 1);
+ T (d, "", 2);
+ T (d, "", 3);
+ T (d, "123", 0);
+ T (d, "123", 1);
+ T (d, "123", 2);
+ T (d, "123", 3);
+ T (d, "123", 4);
+ T (d, "123", 9);
+
+ T (d, "123", sizeof "123");
+ T (d, ar, sizeof ar);
+
+ T (d, s, strlen (s)); /* { dg-warning "\\\[-Wstringop-overflow=]" } */
+
+ {
+ int n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning "\\\[-Wstringop-overflow=]" } */
+ }
+
+ {
+ unsigned n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning "\\\[-Wstringop-overflow=]" } */
+ }
+
+ {
+ size_t n;
+ n = strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning "\\\[-Wstringop-overflow=]" } */
+ }
+
+ {
+ size_t n;
+ n = strlen (s) - 1; /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-warning "\\\[-Wstringop-overflow=]" } */
+ }
+
+ {
+ /* This is diagnosed by -Wstringop-truncation. Verify that it isn't
+ also diagnosed by -Wstringop-overflow. */
+ size_t n = strlen (s) - strlen (s);
+ T (d, s, n);
+ }
+
+ {
+ /* This use of strncpy is certainly dubious and it could well be
+ diagnosed by -Wstringop-truncation but it isn't. That it is
+ diagnosed with -Wstringop-overflow is more by accident than
+ by design. -Wstringop-overflow considers any dependency of
+ the bound on strlen(s) a potential bug. */
+ size_t n = i < strlen (s) ? i : strlen (s); /* { dg-message "length computed here" } */
+ T (d, s, n); /* { dg-message ".strncpy\[^\n\r]* specified bound depends on the length of the source argument" } */
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/Wstringop-truncation.c b/gcc/testsuite/c-c++-common/Wstringop-truncation.c
new file mode 100644
index 00000000000..7fc439fb630
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/Wstringop-truncation.c
@@ -0,0 +1,449 @@
+/* PR middle-end/81117 - Improve buffer overflow checking in strncpy
+ { dg-do compile }
+ { dg-options "-O2 -Wstringop-truncation -Wno-stringop-overflow -ftrack-macro-expansion=0" }
+ { dg-require-effective-target alloca } */
+
+
+typedef __SIZE_TYPE__ size_t;
+
+#if __cplusplus
+extern "C" {
+#endif
+
+size_t strlen (const char*);
+char* strncat (char*, const char*, size_t);
+char* strncpy (char*, const char*, size_t);
+
+#if __cplusplus
+}
+#endif
+
+extern size_t unsigned_value (void)
+{
+ extern volatile size_t unsigned_value_source;
+ return unsigned_value_source;
+}
+
+size_t unsigned_range (size_t min, size_t max)
+{
+ size_t val = unsigned_value ();
+ return val < min || max < val ? min : val;
+}
+
+#define UR(min, max) unsigned_range (min, max)
+
+void sink (void*);
+
+#define S4 "123"
+const char a4[] = "123";
+
+#define CHOOSE(a, b) (unsigned_value () & 1 ? a : b)
+
+
+typedef struct Dest
+{
+ char a5[5];
+ char b7[7];
+ char c3ns[3] __attribute__ ((nonstring));
+} Dest;
+
+char dst7[7];
+char dst2_5[2][5];
+
+/* Verify strncat warnings for arrays of known bounds. */
+
+void test_strncat_array (Dest *pd)
+{
+#define CAT(d, s, len) (strncat ((d), (s), (len)), sink (d))
+
+ CAT (dst7, S4, 2); /* { dg-warning "output truncated copying 2 bytes from a string of length 3" } */
+
+ CAT (dst7, a4, 1); /* { dg-warning "output truncated copying 1 byte from a string of length 3" } */
+
+ /* There is no truncation here but possible overflow so these
+ are diagnosed by -Wstringop-overflow:
+ CAT (dst7, S4, 3);
+ CAT (dst7, a4, 3);
+ */
+
+ CAT (pd->a5, S4, 2); /* { dg-warning "output truncated copying 2 bytes from a string of length 3" } */
+ CAT (pd->a5, S4, 1); /* { dg-warning "output truncated copying 1 byte from a string of length 3" } */
+}
+
+/* Verify strncat warnings for arrays of known bounds and a non-const
+ character count in some range. */
+
+void test_strncat_array_range (Dest *pd)
+{
+ CAT (dst7, S4, UR (0, 1)); /* { dg-warning "output truncated copying between 0 and 1 bytes from a string of length 3" } */
+ CAT (dst7, S4, UR (0, 2)); /* { dg-warning "output truncated copying between 0 and 2 bytes from a string of length 3" } */
+ CAT (dst7, S4, UR (1, 3)); /* { dg-warning "output truncated copying between 1 and 3 bytes from a string of length 3" } */
+ CAT (dst7, S4, UR (2, 4)); /* { dg-warning "output may be truncated copying between 2 and 4 bytes from a string of length 3" } */
+
+ CAT (dst7, S4, UR (0, 7));
+ CAT (dst7, S4, UR (1, 7));
+ CAT (dst7, S4, UR (6, 7));
+
+ CAT (dst7, S4, UR (0, 99));
+
+ CAT (dst7, S4, UR (0, 99));
+}
+
+/* Verify strncat warnings for arrays of unknown bounds. */
+
+void test_strncat_vla (char *d, unsigned n)
+{
+ CAT (d, S4, 2); /* { dg-warning "output truncated copying 2 bytes from a string of length 3" } */
+ CAT (d, S4, 4);
+
+ CAT (d, a4, 2); /* { dg-warning "output truncated copying 2 bytes from a string of length 3" } */
+
+ /* There is no truncation here but possible overflow so these
+ are diagnosed by -Wstringop-overflow:
+ CAT (d, S4, 3);
+ CAT (d, a4, 3);
+ */
+ CAT (d, a4, 4);
+
+ char vla[n];
+
+ CAT (vla, S4, 2); /* { dg-warning "output truncated copying 2 bytes from a string of length 3" } */
+
+ CAT (vla, S4, 4);
+ CAT (vla, S4, n);
+
+ CAT (vla, a4, 2); /* { dg-warning "output truncated copying 2 bytes from a string of length 3" } */
+
+ CAT (vla, a4, 4);
+ CAT (vla, a4, n);
+
+ CAT (d, vla, 1);
+ CAT (d, vla, 3);
+ CAT (d, vla, 4);
+ CAT (d, vla, n);
+
+ /* There is no truncation here but possible overflow so these
+ are diagnosed by -Wstringop-overflow:
+ CAT (vla, S4, 3);
+ CAT (vla, a4, 3);
+ */
+}
+
+/* Verify strncpy warnings with at least one pointer to an object
+ or string of unknown size (destination) or length (source). */
+
+void test_strncpy_ptr (char *d, const char* s, const char *t, int i)
+{
+#define CPY(d, s, len) (strncpy ((d), (s), (len)), sink (d))
+
+ /* Strncpy doesn't nul-terminate so the following is diagnosed. */
+ CPY (d, "", 0); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+ CPY (d, s, 0); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+
+ /* This is safe. */
+ CPY (d, "", 1);
+ CPY (d, "", 2);
+
+ /* This could be safe. */
+ CPY (d, s, 1);
+ CPY (d, s, 2);
+
+ /* Truncation. */
+ CPY (d, "123", 1); /* { dg-warning ".strncpy\[^\n\r\]* output truncated copying 1 byte from a string of length 3" } */
+ CPY (d, "123", 2); /* { dg-warning ".strncpy\[^\n\r\]* output truncated copying 2 bytes from a string of length 3" } */
+ CPY (d, "123", 3); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+ CPY (d, "123", 4);
+ CPY (d, "123", 9);
+
+ CPY (d, S4, sizeof S4); /* Covered by -Wsizeof-pointer-memaccess. */
+ CPY (d, S4, sizeof S4 - 1); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+
+ CPY (d, a4, sizeof a4); /* Covered by -Wsizeof-pointer-memaccess. */
+ CPY (d, a4, sizeof a4 - 1); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+ CPY (d, a4, sizeof a4 - 3); /* { dg-warning ".strncpy\[^\n\r\]* output truncated copying 1 byte from a string of length 3" } */
+ CPY (d, a4, sizeof a4 - 4); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes from a string of length 3" } */
+
+ CPY (d, S4, strlen (S4)); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+ /* Likely buggy but no truncation. Diagnosed by -Wstringop-overflow. */
+ CPY (d, a4, strlen (a4) + 1);
+ CPY (d, S4, strlen (S4) + i);
+
+ CPY (d, a4, strlen (a4)); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+ /* As above, buggy but no evidence of truncation. */
+ CPY (d, S4, strlen (S4) + 1);
+
+ CPY (d, CHOOSE ("", "1"), 0); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+ CPY (d, CHOOSE ("1", "12"), 0); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+
+ CPY (d, CHOOSE ("", "1"), 1); /* { dg-warning ".strncpy\[^\n\r\]* output may be truncated copying 1 byte from a string of length 1" } */
+ CPY (d, CHOOSE ("1", ""), 1); /* { dg-warning ".strncpy\[^\n\r\]* output may be truncated copying 1 byte from a string of length 1" } */
+ CPY (d, CHOOSE (s, "1"), 1); /* { dg-warning ".strncpy\[^\n\r\]* output may be truncated copying 1 byte from a string of length 1" } */
+ CPY (d, CHOOSE (s, t), 1);
+
+ CPY (d, CHOOSE ("", "1"), 2);
+ CPY (d, CHOOSE ("1", ""), 2);
+ CPY (d, CHOOSE ("1", "2"), 2);
+ CPY (d, CHOOSE ("1", s), 2);
+ CPY (d, CHOOSE (s, "1"), 2);
+ CPY (d, CHOOSE (s, t), 2);
+
+ CPY (d, CHOOSE ("", "123"), 1); /* { dg-warning ".strncpy\[^\n\r\]* output may be truncated copying 1 byte from a string of length 3" } */
+ CPY (d, CHOOSE ("1", "123"), 1); /* { dg-warning ".strncpy\[^\n\r\]* output truncated copying 1 byte from a string of length 1" } */
+ CPY (d, CHOOSE ("12", "123"), 1); /* { dg-warning ".strncpy\[^\n\r\]* output truncated copying 1 byte from a string of length 2" } */
+ CPY (d, CHOOSE ("123", "12"), 1); /* { dg-warning ".strncpy\[^\n\r\]* output truncated copying 1 byte from a string of length 2" } */
+
+ {
+ signed char n = strlen (s); /* { dg-message "length computed here" } */
+ CPY (d, s, n); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying as many bytes from a string as its length" } */
+ }
+
+ {
+ short n = strlen (s); /* { dg-message "length computed here" } */
+ CPY (d, s, n); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying as many bytes from a string as its length" } */
+ }
+
+ {
+ int n = strlen (s); /* { dg-message "length computed here" } */
+ CPY (d, s, n); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying as many bytes from a string as its length" } */
+ }
+
+ {
+ unsigned n = strlen (s); /* { dg-message "length computed here" } */
+ CPY (d, s, n); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying as many bytes from a string as its length" } */
+ }
+
+ {
+ size_t n;
+ n = strlen (s); /* { dg-message "length computed here" } */
+ CPY (d, s, n); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying as many bytes from a string as its length" } */
+ }
+
+ {
+ size_t n;
+ char *dp2 = d + 1;
+ n = strlen (s); /* { dg-message "length computed here" } */
+ CPY (dp2, s, n); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying as many bytes from a string as its length" } */
+ }
+
+ {
+ /* The following is likely buggy but there's no apparent truncation
+ so it's not diagnosed by -Wstringop-truncation. Instead, it is
+ diagnosed by -Wstringop-overflow (tested elsewhere). */
+ int n;
+ n = strlen (s) - 1;
+ CPY (d, s, n);
+ }
+
+ {
+ /* Same as above. */
+ size_t n;
+ n = strlen (s) - 1;
+ CPY (d, s, n);
+ }
+
+ {
+ size_t n = strlen (s) - strlen (s);
+ CPY (d, s, n); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+ }
+
+ {
+ /* This use of strncpy is dubious but it's probably not worth
+ worrying about (truncation may not actually take place when
+ i is the result). It is diagnosed with -Wstringop-overflow
+ (although more by accident than by design).
+
+ size_t n = i < strlen (s) ? i : strlen (s);
+ CPY (d, s, n);
+ */
+ }
+}
+
+
+/* Verify strncpy warnings for arrays of known bounds. */
+
+void test_strncpy_array (Dest *pd, int i, const char* s)
+{
+#undef CPY
+#define CPY(d, s, len) (strncpy ((d), (s), (len)), sink (d))
+
+ CPY (dst7, s, 7); /* { dg-warning "specified bound 7 equals destination size" } */
+ CPY (dst7, s, sizeof dst7); /* { dg-warning "specified bound 7 equals destination size" } */
+
+ CPY (dst2_5[0], s, sizeof dst2_5[0]); /* { dg-warning "specified bound 5 equals destination size" "bug 77293" { xfail *-*-* } } */
+ CPY (dst2_5[1], s, sizeof dst2_5[1]); /* { dg-warning "specified bound 5 equals destination size" } */
+
+ /* Verify that copies that nul-terminate are not diagnosed. */
+ CPY (dst7, "", sizeof dst7);
+ CPY (dst7 + 6, "", sizeof dst7 - 6);
+ CPY (dst7, "1", sizeof dst7);
+ CPY (dst7 + 1, "1", sizeof dst7 - 1);
+ CPY (dst7, "123456", sizeof dst7);
+ CPY (dst7 + 1, "12345", sizeof dst7 - 1);
+
+ CPY (dst7 + i, s, 6);
+ CPY (dst7 + i, s, 7); /* { dg-warning "specified bound 7 equals destination size" } */
+ /* The following two calls are diagnosed by -Wstringop-overflow. */
+ CPY (dst7 + i, s, 8);
+ CPY (dst7 + i, s, UR (8, 9));
+
+ /* No nul-termination here. */
+ CPY (dst7 + 2, "12345", sizeof dst7 - 2); /* { dg-warning "output truncated before terminating nul copying 5 bytes from a string of the same length" } */
+
+ /* Because strnlen appends as many NULs as necessary to write the specified
+ number of byts the following doesn't (necessarily) truncate but rather
+ overflow, and so is diagnosed by -Wstringop-overflow. */
+ CPY (dst7, s, 8);
+
+ CPY (dst7 + 1, s, 6); /* { dg-warning "specified bound 6 equals destination size" } */
+ CPY (dst7 + 6, s, 1); /* { dg-warning "specified bound 1 equals destination size" } */
+
+ CPY (pd->a5, s, 5); /* { dg-warning "specified bound 5 equals destination size" } */
+ CPY (pd->a5, s, sizeof pd->a5); /* { dg-warning "specified bound 5 equals destination size" } */
+
+ /* The following is not yet handled. */
+ CPY (pd->a5 + i, s, sizeof pd->a5); /* { dg-warning "specified bound 5 equals destination size" "member array" { xfail *-*-* } } */
+
+ /* Verify that a copy that nul-terminates is not diagnosed. */
+ CPY (pd->a5, "1234", sizeof pd->a5);
+
+ /* Same above, diagnosed by -Wstringop-overflow. */
+ CPY (pd->a5, s, 6);
+
+ /* Exercise destination with attribute "nonstring". */
+ CPY (pd->c3ns, "", 3);
+ CPY (pd->c3ns, "", 1);
+ /* Truncation is still diagnosed -- using strncpy in this case is
+ pointless and should be replaced with memcpy. */
+ CPY (pd->c3ns, "12", 1); /* { dg-warning "output truncated copying 1 byte from a string of length 2" } */
+ CPY (pd->c3ns, "12", 2);
+ CPY (pd->c3ns, "12", 3);
+ CPY (pd->c3ns, "123", 3);
+ CPY (pd->c3ns, s, 3);
+ CPY (pd->c3ns, s, sizeof pd->c3ns);
+
+ /* Verify that the idiom of calling strncpy with a bound equal to
+ the size of the destination (and thus potentially without NUL-
+ terminating it) immediately followed by setting the last element
+ of the array to NUL is not diagnosed. */
+ {
+ /* This might be better written using memcpy() but it's safe so
+ it probably shouldn't be diagnosed. It currently triggers
+ a warning because of bug 81704. */
+ strncpy (dst7, "0123456", sizeof dst7); /* { dg-bogus "truncated" "bug 81704" { xfail *-*-* } } */
+ dst7[sizeof dst7 - 1] = '\0';
+ sink (dst7);
+ }
+
+ {
+ const char a[] = "0123456789";
+ strncpy (dst7, a, sizeof dst7);
+ dst7[sizeof dst7 - 1] = '\0';
+ sink (dst7);
+ }
+
+ {
+ strncpy (dst7, s, sizeof dst7);
+ dst7[sizeof dst7 - 1] = '\0';
+ sink (dst7);
+ }
+
+ {
+ strncpy (pd->a5, "01234", sizeof pd->a5); /* { dg-bogus "truncated" "bug 81704" { xfail *-*-* } } */
+ pd->a5[sizeof pd->a5 - 1] = '\0';
+ sink (pd);
+ }
+
+ {
+ strncpy (pd->a5, s, sizeof pd->a5);
+ pd->a5[sizeof pd->a5 - 1] = '\0';
+ sink (pd);
+ }
+
+ {
+ unsigned n = 7;
+ char *p = (char*)__builtin_malloc (n);
+ strncpy (p, s, n);
+ p[n - 1] = '\0';
+ sink (p);
+ }
+
+ {
+ /* This should be diagnosed because the NUL-termination doesn't
+ immediately follow the strncpy call (sink may expect pd->a5
+ to be NUL-terminated). */
+ strncpy (pd->a5, s, sizeof pd->a5); /* { dg-warning "specified bound 5 equals destination size" } */
+ sink (pd);
+ pd->a5[sizeof pd->a5] = '\0';
+ sink (pd);
+ }
+}
+
+typedef struct Flex
+{
+ size_t n;
+ char a0[0];
+ char ax[];
+} Flex;
+
+extern char array[];
+
+/* Verify that no warning is issued for array of unknown bound, flexible
+ array members, or zero-length arrays, except when the source is definitely
+ truncated. */
+
+void test_strncpy_flexarray (Flex *pf, const char* s)
+{
+#undef CPY
+#define CPY(d, s, len) (strncpy ((d), (s), (len)), sink (d))
+
+ CPY (array, "12345", 7);
+ CPY (array, "12345", 123);
+
+ CPY (array, s, 7);
+ CPY (array, s, 123);
+
+ CPY (pf->a0, s, 1);
+ CPY (pf->a0, s, 1234);
+
+ CPY (pf->a0, "", 1);
+ CPY (pf->a0, "12345", 5); /* { dg-warning "output truncated before terminating nul copying 5 bytes from a string of the same length" } */
+ CPY (pf->a0, "12345", 1234);
+
+ CPY (pf->ax, s, 5);
+ CPY (pf->ax, s, 12345);
+
+ CPY (pf->ax, "1234", 5);
+ CPY (pf->ax, "12345", 5); /* { dg-warning "output truncated before terminating nul copying 5 bytes from a string of the same length" } */
+ CPY (pf->ax, "12345", 12345);
+}
+
+/* Verify warnings for dynamically allocated objects. */
+
+void test_strncpy_alloc (const char* s)
+{
+ size_t n = 7;
+ char *d = (char *)__builtin_malloc (n);
+
+ CPY (d, s, n); /* { dg-warning "specified bound 7 equals destination size" "bug 79016" { xfail *-*-* } } */
+
+ Dest *pd = (Dest *)__builtin_malloc (sizeof *pd * n);
+ CPY (pd->a5, s, 5); /* { dg-warning "specified bound 5 equals destination size" } */
+ CPY (pd->a5, s, sizeof pd->a5); /* { dg-warning "specified bound 5 equals destination size" } */
+}
+
+/* Verify warnings for VLAs. */
+
+void test_strncpy_vla (unsigned n, const char* s)
+{
+ char vla[n];
+ CPY (vla, s, 0); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+
+ CPY (vla, s, 1);
+ CPY (vla, s, 2);
+ CPY (vla, s, n);
+
+ CPY (vla, "", 0); /* { dg-warning ".strncpy\[^\n\r\]* destination unchanged after copying no bytes" } */
+ CPY (vla, "", 1);
+ CPY (vla, S4, 3); /* { dg-warning ".strncpy\[^\n\r\]* output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+ CPY (vla, S4, n);
+}
diff --git a/gcc/testsuite/c-c++-common/asan/pr63638.c b/gcc/testsuite/c-c++-common/asan/pr63638.c
index a8bafc5aad7..619a2b6142a 100644
--- a/gcc/testsuite/c-c++-common/asan/pr63638.c
+++ b/gcc/testsuite/c-c++-common/asan/pr63638.c
@@ -12,7 +12,7 @@ struct S{
struct S s[6];
-int f(struct S *p)
+void f(struct S *p)
{
memcpy(p, &s[2], sizeof(*p));
memcpy(p, &s[1], sizeof(*p));
diff --git a/gcc/testsuite/c-c++-common/attr-nonstring-1.c b/gcc/testsuite/c-c++-common/attr-nonstring-1.c
new file mode 100644
index 00000000000..10a66887fa2
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/attr-nonstring-1.c
@@ -0,0 +1,60 @@
+/* Test to exercise attribute "nonstring" syntax.
+ { dg-do compile }
+ { dg-options "-Wattributes" } */
+
+#define ATTR(list) __attribute__ (list)
+#define NONSTR ATTR ((nonstring))
+
+/* Verify it's accepted on char arrays. */
+extern NONSTR char nsx_1[];
+extern char NONSTR nsx_2[];
+extern char nsx_3[] NONSTR;
+
+extern NONSTR char ns1[1];
+extern char NONSTR ns3[3];
+extern char ns5[5] NONSTR;
+
+/* Verify it's accepted on char pointers. */
+extern NONSTR char* pns_1;
+extern char NONSTR* pns_2;
+extern char* NONSTR pns_3;
+
+struct S
+{
+/* Verify it's accepted on char member pointers. */
+ NONSTR char* mpns_1;
+ char NONSTR* mpns_2;
+ char* NONSTR mpns_3;
+
+/* Verify it's accepted on char member arrays. */
+ NONSTR char mns1[1];
+ char NONSTR mns3[3];
+ char mns5[5] NONSTR;
+
+/* Verify it's accepted on char flexible array members. */
+ char mnsx[] NONSTR;
+};
+
+/* Verify it's rejected on non-array and non-pointer objects. */
+extern NONSTR char c1; /* { dg-warning ".nonstring. attribute ignored on objects of type .char." } */
+
+extern NONSTR int i1; /* { dg-warning ".nonstring. attribute ignored on objects of type .int." } */
+
+extern NONSTR int ia1[]; /* { dg-warning ".nonstring. attribute ignored on objects of type .int *\\\[\\\]." } */
+
+extern NONSTR int* pi1; /* { dg-warning ".nonstring. attribute ignored on objects of type .int *\\*." } */
+
+extern NONSTR
+void f (void); /* { dg-warning ".nonstring. attribute does not apply to functions" } */
+
+struct NONSTR
+NonStrType { int i; }; /* { dg-warning ".nonstring. attribute does not apply to types" } */
+
+typedef char NONSTR nschar_t; /* { dg-warning ".nonstring. attribute does not apply to types" } */
+
+void func (NONSTR char *pns1, char NONSTR *pns2, char* NONSTR pns3)
+{
+ (void)pns1;
+ (void)pns2;
+ (void)pns3;
+}
diff --git a/gcc/testsuite/c-c++-common/attr-nonstring-2.c b/gcc/testsuite/c-c++-common/attr-nonstring-2.c
new file mode 100644
index 00000000000..6e273e785a0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/attr-nonstring-2.c
@@ -0,0 +1,123 @@
+/* Test to exercise attribute "nonstring".
+ { dg-do compile }
+ { dg-options "-O2 -Wattributes -Wstringop-truncation -ftrack-macro-expansion=0" } */
+
+#define ATTR(list) __attribute__ (list)
+#define NONSTR ATTR ((nonstring))
+#define strncpy(d, s, n) (__builtin_strncpy ((d), (s), (n)), sink (d))
+
+void sink (void*);
+
+/* Global string with an unknown bound. */
+extern char gsx[];
+
+/* Global string with an known bound. */
+extern char gs3[3];
+
+/* Global non-strings with an unknown bound. */
+extern NONSTR char gax_1[];
+extern char NONSTR gax_2[];
+extern char gax_3[] NONSTR;
+
+/* Global non-strings with a known bound. */
+NONSTR char gns3[3];
+char NONSTR gns4[4];
+char gns5[5] NONSTR;
+
+/* Global string pointer. */
+extern char *ps_1;
+
+/* Global non-string pointers. */
+extern NONSTR char *pns_1;
+extern char* NONSTR pns_2;
+extern char *pns_3 NONSTR;
+
+struct MemArrays
+{
+ NONSTR char ma3[3];
+ char NONSTR ma4[4];
+ char ma5[5] NONSTR;
+ char max[] NONSTR;
+};
+
+
+void test_array (const char *s, unsigned n)
+{
+ const char s7[] = "1234567";
+
+ strncpy (gs3, "", 0); /* { dg-warning "destination unchanged after copying no bytes" } */
+ strncpy (gs3, "a", 1); /* { dg-warning "output truncated before terminating nul copying 1 byte from a string of the same length" } */
+ strncpy (gs3, "a", 2);
+ strncpy (gs3, "a", 3);
+ strncpy (gs3, "ab", 3);
+ strncpy (gs3, "abc", 3); /* { dg-warning "output truncated before terminating nul copying 3 bytes from a string of the same length" } */
+
+ /* It might perhaps be helpful to diagnose certain truncation even
+ for non-strings. Then again, since the destination has been
+ explicitly annotated as non-string, it might be viewed as a false
+ positive. A valid use case seen in Glibc goes something like this:
+
+ #if FOO
+ # define S "1234"
+ #else
+ # define S "12345678"
+ #endif
+
+ strncpy (d, S, 8);
+ */
+ strncpy (gax_3, s7, 3);
+
+ strncpy (gax_1, "a", 1);
+ strncpy (gax_2, "ab", 2);
+ strncpy (gax_3, "abc", 3);
+ strncpy (gax_3, s7, 3);
+
+ strncpy (gax_1, s, 1);
+ strncpy (gax_2, s, 1);
+ strncpy (gax_3, s, 1);
+
+ strncpy (gax_1, s, n);
+ strncpy (gax_2, s, n);
+ strncpy (gax_3, s, n);
+}
+
+
+void test_pointer (const char *s, unsigned n)
+{
+ const char s7[] = "1234567";
+
+ strncpy (pns_1, "a", 1);
+ strncpy (pns_2, "ab", 2);
+ strncpy (pns_3, "abc", 3);
+ strncpy (pns_3, s7, 3); /* { dg-warning "output truncated copying 3 bytes from a string of length 7" } */
+
+ strncpy (pns_1, s, 1);
+ strncpy (pns_2, s, 1);
+ strncpy (pns_3, s, 1);
+
+ strncpy (pns_1, s, n);
+ strncpy (pns_2, s, n);
+ strncpy (pns_3, s, n);
+}
+
+
+void test_member_array (struct MemArrays *p, const char *s, unsigned n)
+{
+ const char s7[] = "1234567";
+
+ strncpy (p->ma3, "a", 1);
+ strncpy (p->ma4, "ab", 2);
+ strncpy (p->ma5, "abc", 3);
+ strncpy (p->max, "abcd", 4);
+ strncpy (p->max, s7, 5);
+
+ strncpy (p->ma3, s, 1);
+ strncpy (p->ma4, s, 1);
+ strncpy (p->ma5, s, 1);
+ strncpy (p->max, s, 1);
+
+ strncpy (p->ma3, s7, n);
+ strncpy (p->ma4, s7, n);
+ strncpy (p->ma5, s7, n);
+ strncpy (p->max, s7, n);
+}
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541-2.c b/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541-2.c
index 83325a77501..89a3d57ebdd 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541-2.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541-2.c
@@ -2,13 +2,13 @@
/* { dg-do compile } */
/* { dg-options "-fcilkplus" } */
-int foo1 ()
+void foo1 ()
{
int a;
a = __sec_reduce_add (1); /* { dg-error "Invalid builtin arguments" } */
}
-int foo2 ()
+void foo2 ()
{
int a;
a = __sec_reduce_add (); /* { dg-error "Invalid builtin arguments" } */
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541.c b/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541.c
index a956d0e18ab..b47de1e7ebc 100755
--- a/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/AN/pr57541.c
@@ -4,8 +4,7 @@
int A[10];
-int foo () {
-
+void foo () {
/* C compiler uses the term "undeclared" whereas C++ compiler uses
"not declared". Thus, grepping for declared seem to be the easiest. */
char c = (char)N; /* { dg-error "declared" } */
@@ -15,12 +14,8 @@ int foo () {
A[l:s:c];
}
-int foo1 (int N) {
-
+void foo1 (int N) {
char c = (char)N;
short s = (short)N;
A[l:s:c]; /* { dg-error "declared" } */
}
-
-
-
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/CK/cilk_for_grain_errors.c b/gcc/testsuite/c-c++-common/cilk-plus/CK/cilk_for_grain_errors.c
index 214c6be1e94..bb722811202 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/CK/cilk_for_grain_errors.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/CK/cilk_for_grain_errors.c
@@ -18,7 +18,7 @@ int main(int argc, char **argv)
_Cilk_for (int ii = 0; ii < 10; ii++)
Array1[ii] = 0;
-#pragma cilk grainsiz = 2 /* { dg-warning "ignoring #pragma cilk grainsiz" } */
+#pragma cilk grainsiz = 2 /* { dg-warning "-:ignoring #pragma cilk grainsiz" } */
_Cilk_for (int ii = 0; ii < 10; ii++)
Array1[ii] = 0;
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/CK/errors.c b/gcc/testsuite/c-c++-common/cilk-plus/CK/errors.c
index b1e336e3328..d637924e9e1 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/CK/errors.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/CK/errors.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-fcilkplus" } */
+/* { dg-options "-fcilkplus -Wno-return-type" } */
int func_2(void);
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/CK/pr60197.c b/gcc/testsuite/c-c++-common/cilk-plus/CK/pr60197.c
index 2b47d1efb7d..301a6f2e9d1 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/CK/pr60197.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/CK/pr60197.c
@@ -1,6 +1,6 @@
/* PR c/60197 */
/* { dg-do compile } */
-/* { dg-options "-fcilkplus" } */
+/* { dg-options "-fcilkplus -Wno-return-type" } */
extern int foo (void);
extern int bar (int);
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/CK/spawn_in_return.c b/gcc/testsuite/c-c++-common/cilk-plus/CK/spawn_in_return.c
index 14b7eef1276..602971e02c9 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/CK/spawn_in_return.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/CK/spawn_in_return.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-fcilkplus" } */
+/* { dg-options "-fcilkplus -Wno-return-type" } */
int main (void)
{
diff --git a/gcc/testsuite/c-c++-common/cpp/pr58844-1.c b/gcc/testsuite/c-c++-common/cpp/pr58844-1.c
index 3abf8a76803..2a2e277b0fc 100644
--- a/gcc/testsuite/c-c++-common/cpp/pr58844-1.c
+++ b/gcc/testsuite/c-c++-common/cpp/pr58844-1.c
@@ -4,5 +4,5 @@
#define A x######x
int A = 1;
-#define A x######x /* { dg-message "previous definition" } */
-#define A x##x /* { dg-warning "redefined" } */
+#define A x######x /* { dg-message "-:previous definition" } */
+#define A x##x /* { dg-warning "-:redefined" } */
diff --git a/gcc/testsuite/c-c++-common/cpp/pr58844-2.c b/gcc/testsuite/c-c++-common/cpp/pr58844-2.c
index 1e219152fc5..52993b314be 100644
--- a/gcc/testsuite/c-c++-common/cpp/pr58844-2.c
+++ b/gcc/testsuite/c-c++-common/cpp/pr58844-2.c
@@ -4,5 +4,5 @@
#define A x######x
int A = 1;
-#define A x######x /* { dg-message "previous definition" } */
-#define A x##x /* { dg-warning "redefined" } */
+#define A x######x /* { dg-message "-:previous definition" } */
+#define A x##x /* { dg-warning "-:redefined" } */
diff --git a/gcc/testsuite/c-c++-common/cpp/va-opt-error.c b/gcc/testsuite/c-c++-common/cpp/va-opt-error.c
new file mode 100644
index 00000000000..f32f0551723
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/cpp/va-opt-error.c
@@ -0,0 +1,28 @@
+/* { dg-do preprocess }*/
+/* { dg-options "-std=gnu99" { target c } } */
+/* { dg-options "-std=c++2a" { target c++ } } */
+
+#define ERR1(x) __VA_OPT__ /* { dg-warning "__VA_OPT__ can only appear" } */
+#define ERR2(x) __VA_OPT__( /* { dg-warning "can only appear" } */
+#define ERR3(x) __VA_OPT__() /* { dg-warning "can only appear" } */
+
+#define ERR4(x,...) __VA_OPT__ /* { dg-error "unterminated __VA_OPT__" } */
+#define ERR5(x,...) __VA_OPT__( /* { dg-error "unterminated" } */
+#define ERR6(x,...) __VA_OPT__(() /* { dg-error "unterminated" } */
+
+#define ERR7(x,...) __VA_OPT__(__VA_OPT__) /* { dg-error "may not appear" } */
+#define ERR7(x,...) __VA_OPT__(__VA_OPT__()) /* { dg-error "may not appear" } */
+
+#define ERR8(x, y,...) x __VA_OPT__(##) y /* { dg-error "either end" } */
+#define ERR9(x, y,...) x __VA_OPT__(x ##) y /* { dg-error "either end" } */
+#define ERRA(x, y,...) x x __VA_OPT__(## y) /* { dg-error "either end" } */
+
+#define ERRB __VA_OPT__ /* { dg-warning "can only appear" } */
+#define ERRC(__VA_OPT__) x /* { dg-warning "can only appear" } */
+
+__VA_OPT__ /* { dg-warning "can only appear" } */
+
+#define ERRD(x)
+ERRD(__VA_OPT__) /* { dg-warning "can only appear" } */
+
+#define __VA_OPT__ /* { dg-warning "can only appear" } */
diff --git a/gcc/testsuite/c-c++-common/cpp/va-opt-pedantic.c b/gcc/testsuite/c-c++-common/cpp/va-opt-pedantic.c
new file mode 100644
index 00000000000..5887bf5a484
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/cpp/va-opt-pedantic.c
@@ -0,0 +1,5 @@
+/* { dg-do preprocess }*/
+/* { dg-options "-std=c11 -pedantic-errors" { target c } } */
+/* { dg-options "-std=c++17 -pedantic-errors" { target c++ } } */
+
+#define CALL(F, ...) F (7 __VA_OPT__(,) __VA_ARGS__) /* { dg-error "__VA_OPT__ is not available" } */
diff --git a/gcc/testsuite/c-c++-common/cpp/va-opt.c b/gcc/testsuite/c-c++-common/cpp/va-opt.c
new file mode 100644
index 00000000000..243d33b2cf1
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/cpp/va-opt.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-options "-std=gnu99" { target c } } */
+/* { dg-options "-std=c++2a" { target c++ } } */
+
+extern void f0 (void);
+extern void f1 (int);
+extern void f2 (int, int);
+extern void f3 (int, int, int);
+extern void f4 (int, int, int, int);
+extern int s (const char *);
+
+#define CALL(F, ...) F (7 __VA_OPT__(,) __VA_ARGS__)
+#define CP(F, X, Y, ...) F (__VA_OPT__(X ## Y,) __VA_ARGS__)
+#define CS(F, ...) F(__VA_OPT__(s(# __VA_ARGS__)))
+#define D(F, ...) F(__VA_OPT__(__VA_ARGS__) __VA_OPT__(,) __VA_ARGS__)
+#define CALL0(...) __VA_OPT__(f2)(0 __VA_OPT__(,)__VA_ARGS__)
+
+void t (void)
+{
+ CALL (f1);
+ CALL (f1, );
+ CALL (f2, 1);
+ CALL (f3, 1, 2);
+
+ int one = 1;
+ int two = 2;
+ int onetwo = 23;
+
+ CP (f0, one, two);
+ CP (f0, one, two, );
+ CP (f2, one, two, 3);
+
+ CS (f0);
+ CS (f1, 1, 2, 3, 4);
+
+ D (f0);
+ D (f2, 1);
+ D (f4, 1, 2);
+
+ CALL0 ();
+ CALL0 (23);
+}
diff --git a/gcc/testsuite/c-c++-common/cpp/warning-zero-location.c b/gcc/testsuite/c-c++-common/cpp/warning-zero-location.c
index 2b9c9a95217..57544b6aff1 100644
--- a/gcc/testsuite/c-c++-common/cpp/warning-zero-location.c
+++ b/gcc/testsuite/c-c++-common/cpp/warning-zero-location.c
@@ -3,6 +3,6 @@
{ dg-do compile }
*/
-#define _GNU_SOURCE /* { dg-warning "redefined" } */
+#define _GNU_SOURCE /* { dg-warning "-:redefined" } */
/* { dg-message "" "#define _GNU_SOURCE" {target *-*-* } 0 } */
diff --git a/gcc/testsuite/c-c++-common/dfp/call-by-value.c b/gcc/testsuite/c-c++-common/dfp/call-by-value.c
index 74aec53aefb..e7aea3076cf 100644
--- a/gcc/testsuite/c-c++-common/dfp/call-by-value.c
+++ b/gcc/testsuite/c-c++-common/dfp/call-by-value.c
@@ -5,17 +5,17 @@
#include "dfp-dbg.h"
-int foo32 (_Decimal32 z)
+void foo32 (_Decimal32 z)
{
z = z + 1.0df;
}
-int foo64 (_Decimal64 z)
+void foo64 (_Decimal64 z)
{
z = z + 1.0dd;
}
-int foo128 (_Decimal128 z)
+void foo128 (_Decimal128 z)
{
z = z + 1.0dl;
}
diff --git a/gcc/testsuite/c-c++-common/fold-masked-cmp-1.c b/gcc/testsuite/c-c++-common/fold-masked-cmp-1.c
index c56adc4d662..658cc092995 100644
--- a/gcc/testsuite/c-c++-common/fold-masked-cmp-1.c
+++ b/gcc/testsuite/c-c++-common/fold-masked-cmp-1.c
@@ -29,13 +29,15 @@ int result;
void test_pic (struct bfd_link_info *info)
{
if (bfd_link_pic (info))
- result++;
+ result++;
}
int test_exe (struct bfd_link_info *info)
{
if (bfd_link_executable (info))
result++;
+
+ return 0;
}
/* { dg-final { scan-assembler-times "testn?b" 2 } } */
diff --git a/gcc/testsuite/c-c++-common/fold-masked-cmp-2.c b/gcc/testsuite/c-c++-common/fold-masked-cmp-2.c
index a14bceb4566..c95cc56dfa9 100644
--- a/gcc/testsuite/c-c++-common/fold-masked-cmp-2.c
+++ b/gcc/testsuite/c-c++-common/fold-masked-cmp-2.c
@@ -36,6 +36,8 @@ int test_exe (struct bfd_link_info *info)
{
if (bfd_link_executable (info))
result++;
+
+ return 0;
}
/* { dg-final { scan-assembler-times "testn?b" 2 } } */
diff --git a/gcc/testsuite/c-c++-common/goacc/parallel-1.c b/gcc/testsuite/c-c++-common/goacc/parallel-1.c
index 6c6cc88ecad..0afc53adaa8 100644
--- a/gcc/testsuite/c-c++-common/goacc/parallel-1.c
+++ b/gcc/testsuite/c-c++-common/goacc/parallel-1.c
@@ -35,4 +35,6 @@ parallel_clauses (void)
#pragma acc parallel firstprivate (a, b)
;
+
+ return 0;
}
diff --git a/gcc/testsuite/c-c++-common/gomp/sink-1.c b/gcc/testsuite/c-c++-common/gomp/sink-1.c
index 4872a072315..5ee562bfbf7 100644
--- a/gcc/testsuite/c-c++-common/gomp/sink-1.c
+++ b/gcc/testsuite/c-c++-common/gomp/sink-1.c
@@ -93,4 +93,6 @@ baz ()
bar (i, j, 0);
#pragma omp ordered depend(source)
}
+
+ return 0;
}
diff --git a/gcc/testsuite/c-c++-common/missing-symbol.c b/gcc/testsuite/c-c++-common/missing-symbol.c
index 326b9faad7a..ed319d82e54 100644
--- a/gcc/testsuite/c-c++-common/missing-symbol.c
+++ b/gcc/testsuite/c-c++-common/missing-symbol.c
@@ -1,4 +1,4 @@
-/* { dg-options "-fdiagnostics-show-caret" } */
+/* { dg-options "-fdiagnostics-show-caret -Wno-return-type" } */
extern int foo (void);
extern int bar (void);
diff --git a/gcc/testsuite/c-c++-common/pr36513-2.c b/gcc/testsuite/c-c++-common/pr36513-2.c
index 3c12e1ae89e..d81c81dd4da 100644
--- a/gcc/testsuite/c-c++-common/pr36513-2.c
+++ b/gcc/testsuite/c-c++-common/pr36513-2.c
@@ -10,4 +10,5 @@ int main2 ()
{
char *s, t;
strchr (s, t);
+ return 0;
}
diff --git a/gcc/testsuite/c-c++-common/pr36513.c b/gcc/testsuite/c-c++-common/pr36513.c
index 026325410a1..b8b2d6d8060 100644
--- a/gcc/testsuite/c-c++-common/pr36513.c
+++ b/gcc/testsuite/c-c++-common/pr36513.c
@@ -11,5 +11,7 @@ int main1 ()
&& (t) == '\0'
? (char *) __rawmemchr (s, t)
: __builtin_strchr (s, t)));
+
+ return 0;
}
diff --git a/gcc/testsuite/c-c++-common/pr49706-2.c b/gcc/testsuite/c-c++-common/pr49706-2.c
index 09cc9eb1407..30a46c286e0 100644
--- a/gcc/testsuite/c-c++-common/pr49706-2.c
+++ b/gcc/testsuite/c-c++-common/pr49706-2.c
@@ -10,7 +10,7 @@
bool r;
-int
+void
same (int a, int b)
{
r = !a == !b;
diff --git a/gcc/testsuite/c-c++-common/pr65120.c b/gcc/testsuite/c-c++-common/pr65120.c
index c9c1f5f7e65..c8762e057d5 100644
--- a/gcc/testsuite/c-c++-common/pr65120.c
+++ b/gcc/testsuite/c-c++-common/pr65120.c
@@ -9,7 +9,7 @@
bool r;
-int
+void
f1 (int a)
{
r = !a == 0;
@@ -18,7 +18,7 @@ f1 (int a)
r = !a != 1; /* { dg-warning "logical not is only applied to the left hand side of comparison" } */
}
-int
+void
f2 (int a)
{
r = !a > 0; /* { dg-warning "logical not is only applied to the left hand side of comparison" } */
diff --git a/gcc/testsuite/c-c++-common/tm/volatile-1.c b/gcc/testsuite/c-c++-common/tm/volatile-1.c
index eb3799dd972..40b41803555 100644
--- a/gcc/testsuite/c-c++-common/tm/volatile-1.c
+++ b/gcc/testsuite/c-c++-common/tm/volatile-1.c
@@ -3,7 +3,7 @@
volatile int * p = 0;
__attribute ((transaction_safe))
-int f() {
+void f() {
int x = 0; // ok: not volatile
p = &x; // ok: the pointer is not volatile
int i = *p; // { dg-error "volatile" "read through volatile glvalue" }
diff --git a/gcc/testsuite/c-c++-common/torture/aarch64-vect-lane-2.c b/gcc/testsuite/c-c++-common/torture/aarch64-vect-lane-2.c
index 745c434b060..a2d9e45c21e 100644
--- a/gcc/testsuite/c-c++-common/torture/aarch64-vect-lane-2.c
+++ b/gcc/testsuite/c-c++-common/torture/aarch64-vect-lane-2.c
@@ -1,6 +1,6 @@
// { dg-do compile { target "aarch64*-*-*" } }
// { dg-xfail-if "" { *-*-* } { "-flto -fuse-linker-plugin" } { "" } }
-int
+void
search_line_fast (void)
{
__builtin_aarch64_im_lane_boundsi (4, 0, 0); /* { dg-error "" } */
diff --git a/gcc/testsuite/c-c++-common/vector-1.c b/gcc/testsuite/c-c++-common/vector-1.c
index 288dd1e924c..027d1777943 100644
--- a/gcc/testsuite/c-c++-common/vector-1.c
+++ b/gcc/testsuite/c-c++-common/vector-1.c
@@ -8,7 +8,7 @@
vector float a;
vector int a1;
-int f(void)
+void f(void)
{
a = ~a; /* { dg-error "" } */
a1 = ~a1;
diff --git a/gcc/testsuite/c-c++-common/vector-2.c b/gcc/testsuite/c-c++-common/vector-2.c
index e9f40a35892..9db53a88c5f 100644
--- a/gcc/testsuite/c-c++-common/vector-2.c
+++ b/gcc/testsuite/c-c++-common/vector-2.c
@@ -9,7 +9,7 @@ vector int a1;
vector float b;
vector int b1;
-int f(void)
+void f(void)
{
a = a | b; /* { dg-error "" } */
a = a & b; /* { dg-error "" } */
diff --git a/gcc/testsuite/g++.dg/abi/abi-tag14.C b/gcc/testsuite/g++.dg/abi/abi-tag14.C
index a66e6552cba..3017f492cda 100644
--- a/gcc/testsuite/g++.dg/abi/abi-tag14.C
+++ b/gcc/testsuite/g++.dg/abi/abi-tag14.C
@@ -8,20 +8,20 @@ inline namespace __cxx11 __attribute ((abi_tag ("cxx11"))) {
A a; // { dg-warning "\"cxx11\"" }
// { dg-final { scan-assembler "_Z1fB5cxx11v" } }
-A f() {} // { dg-warning "\"cxx11\"" }
+A f() { return a; } // { dg-warning "\"cxx11\"" }
namespace {
A a2;
- A f2() {}
+ A f2() { return a2; }
struct B: A {};
}
// { dg-final { scan-assembler "_Z1fPN7__cxx111AE" } }
-A f(A*) {}
+A f(A*) { return a; }
// { dg-final { scan-assembler "_Z1gIN7__cxx111AEET_v" } }
template <class T> T g() { }
-template <> A g<A>() { }
+template <> A g<A>() { return a; }
// { dg-final { scan-assembler "_Z1vIN7__cxx111AEE" { target c++14 } } }
#if __cplusplus >= 201402L
diff --git a/gcc/testsuite/g++.dg/abi/abi-tag18.C b/gcc/testsuite/g++.dg/abi/abi-tag18.C
index 89ee737bf57..ad8e16e692b 100644
--- a/gcc/testsuite/g++.dg/abi/abi-tag18.C
+++ b/gcc/testsuite/g++.dg/abi/abi-tag18.C
@@ -11,9 +11,11 @@ inline A1 f() {
struct T {
A2 g() { // { dg-warning "mangled name" }
static X x; // { dg-warning "mangled name" }
+ return A2();
}
};
T().g();
+ return A1();
}
int main() {
f();
diff --git a/gcc/testsuite/g++.dg/abi/abi-tag18a.C b/gcc/testsuite/g++.dg/abi/abi-tag18a.C
index f65f629bd94..6c569502338 100644
--- a/gcc/testsuite/g++.dg/abi/abi-tag18a.C
+++ b/gcc/testsuite/g++.dg/abi/abi-tag18a.C
@@ -11,9 +11,11 @@ inline A1 f() {
struct T {
A2 g() {
static X x;
+ return A2();
}
};
T().g();
+ return A1();
}
int main() {
f();
diff --git a/gcc/testsuite/g++.dg/abi/covariant2.C b/gcc/testsuite/g++.dg/abi/covariant2.C
index 3231cc4c84c..6c55ad6bb67 100644
--- a/gcc/testsuite/g++.dg/abi/covariant2.C
+++ b/gcc/testsuite/g++.dg/abi/covariant2.C
@@ -10,7 +10,7 @@ struct c1 {};
struct c3 : virtual c1
{
- virtual c1* f6() {}
+ virtual c1* f6() { return 0; }
int i;
};
@@ -18,7 +18,7 @@ struct c6 : virtual c3 { };
struct c7 : c3
{
- virtual c3* f6() {}
+ virtual c3* f6() { return 0; }
};
struct c24 : virtual c7
diff --git a/gcc/testsuite/g++.dg/abi/covariant3.C b/gcc/testsuite/g++.dg/abi/covariant3.C
index 178157c58b2..09b9912524d 100644
--- a/gcc/testsuite/g++.dg/abi/covariant3.C
+++ b/gcc/testsuite/g++.dg/abi/covariant3.C
@@ -34,7 +34,7 @@ struct c28 : virtual c0, virtual c11 {
virtual c18* f6();
};
-c0 *c1::f6 () {}
+c0 *c1::f6 () { return 0; }
void c5::foo () {}
void c10::foo () {}
void c18::bar () {}
diff --git a/gcc/testsuite/g++.dg/abi/mangle7.C b/gcc/testsuite/g++.dg/abi/mangle7.C
index af178d3e599..14c65a24da8 100644
--- a/gcc/testsuite/g++.dg/abi/mangle7.C
+++ b/gcc/testsuite/g++.dg/abi/mangle7.C
@@ -1,6 +1,6 @@
/* { dg-do compile } */
typedef void *const t1[2];
-float const f1(t1 (&)[79], ...) {}
+float const f1(t1 (&)[79], ...) { return 0.0f; }
/* { dg-final { scan-assembler _Z2f1RA79_A2_KPvz } } */
diff --git a/gcc/testsuite/g++.dg/asan/pr81340.C b/gcc/testsuite/g++.dg/asan/pr81340.C
index 76ac08a9a56..9db5bb46ce7 100644
--- a/gcc/testsuite/g++.dg/asan/pr81340.C
+++ b/gcc/testsuite/g++.dg/asan/pr81340.C
@@ -10,13 +10,13 @@ public:
a(char *) : c(0, d) {}
};
class e {
- int f(const int &, const int &, const int &, bool, bool, bool, int, bool);
+ void f(const int &, const int &, const int &, bool, bool, bool, int, bool);
};
class g {
public:
static g *h();
void i(a, void *);
};
-int e::f(const int &, const int &, const int &, bool j, bool, bool, int, bool) {
+void e::f(const int &, const int &, const int &, bool j, bool, bool, int, bool) {
g::h()->i("", &j);
}
diff --git a/gcc/testsuite/g++.dg/asan/pr82792.C b/gcc/testsuite/g++.dg/asan/pr82792.C
new file mode 100644
index 00000000000..99f1c35328c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/asan/pr82792.C
@@ -0,0 +1,32 @@
+/* PR sanitizer/82792 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=address" } */
+
+extern int
+test (int i, int j)
+{
+ long c;
+ (c) = 1;
+ switch (i)
+ {
+ case 1:
+ if (j)
+ {
+ c = 1;
+ }
+ goto default_case;
+ case 2:
+ {
+ if (j)
+ {
+ c = 0;
+ }
+ }
+ __attribute ((fallthrough));
+ default_case:
+ default:
+ c = 0;
+ break;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/bprob/g++-bprob-1.C b/gcc/testsuite/g++.dg/bprob/g++-bprob-1.C
index b1a1de77e98..3aafc06d51d 100644
--- a/gcc/testsuite/g++.dg/bprob/g++-bprob-1.C
+++ b/gcc/testsuite/g++.dg/bprob/g++-bprob-1.C
@@ -35,7 +35,7 @@ test_for2 (int m, int n, int o)
return for_temp; /* count(6) */
}
-int
+void
call_for ()
{
for_val1 += test_for1 (0);
diff --git a/gcc/testsuite/g++.dg/cilk-plus/AN/builtin_fn_mutating_tplt.cc b/gcc/testsuite/g++.dg/cilk-plus/AN/builtin_fn_mutating_tplt.cc
index db81912cbe5..111a2a29686 100644
--- a/gcc/testsuite/g++.dg/cilk-plus/AN/builtin_fn_mutating_tplt.cc
+++ b/gcc/testsuite/g++.dg/cilk-plus/AN/builtin_fn_mutating_tplt.cc
@@ -15,6 +15,8 @@ T my_func (T *x, T y)
*x = y;
else
*x = *x;
+
+ return T();
}
template <class T> T my_func (T *x, T y);
diff --git a/gcc/testsuite/g++.dg/cilk-plus/CK/pr68997.cc b/gcc/testsuite/g++.dg/cilk-plus/CK/pr68997.cc
index b442bf9cc24..a9a8a51ce7e 100644
--- a/gcc/testsuite/g++.dg/cilk-plus/CK/pr68997.cc
+++ b/gcc/testsuite/g++.dg/cilk-plus/CK/pr68997.cc
@@ -16,7 +16,7 @@ struct A2 {
};
A2 fa2 () {
- A2 ();
+ return A2 ();
}
struct B1 {
diff --git a/gcc/testsuite/g++.dg/concepts/fn8.C b/gcc/testsuite/g++.dg/concepts/fn8.C
index 5c796c7e3b2..b91f1ae9511 100644
--- a/gcc/testsuite/g++.dg/concepts/fn8.C
+++ b/gcc/testsuite/g++.dg/concepts/fn8.C
@@ -12,7 +12,7 @@ void (*p2)(int) = &f<int>; // { dg-error "no matches" }
void (*p3)(int) = &f; // { dg-error "no matches" }
struct S {
- template<Class T> int f(T) { }
+ template<Class T> int f(T) { return 0; }
};
auto p4 = &S::template f<int>; // { dg-error "no matches" }
diff --git a/gcc/testsuite/g++.dg/concepts/pr65575.C b/gcc/testsuite/g++.dg/concepts/pr65575.C
index e027dccf7d8..6745b843d31 100644
--- a/gcc/testsuite/g++.dg/concepts/pr65575.C
+++ b/gcc/testsuite/g++.dg/concepts/pr65575.C
@@ -14,7 +14,7 @@ int (*p)() requires true; // { dg-error "" }
int (&p)() requires true; // { dg-error "" }
int g(int (*)() requires true); // { dg-error "" }
-int f() { }
+int f() { return 0; }
int
main()
diff --git a/gcc/testsuite/g++.dg/concepts/template-parm11.C b/gcc/testsuite/g++.dg/concepts/template-parm11.C
index 73f38815fb7..352acc2271d 100644
--- a/gcc/testsuite/g++.dg/concepts/template-parm11.C
+++ b/gcc/testsuite/g++.dg/concepts/template-parm11.C
@@ -12,7 +12,7 @@ template<NameProvider... ColSpec>
void getTable(const ColSpec&...)
{}
-int f()
+void f()
{
getTable(7, 'a'); // { dg-error "cannot call" }
};
diff --git a/gcc/testsuite/g++.dg/conversion/op6.C b/gcc/testsuite/g++.dg/conversion/op6.C
index 9aec9f0a808..8a5efc4023a 100644
--- a/gcc/testsuite/g++.dg/conversion/op6.C
+++ b/gcc/testsuite/g++.dg/conversion/op6.C
@@ -3,9 +3,9 @@
template<class T> class smart_pointer {
public:
- operator T* () const { }
- operator bool () const { }
- operator bool () { }
+ operator T* () const { return 0; }
+ operator bool () const { return true; }
+ operator bool () { return true; }
};
class Context { };
typedef smart_pointer<Context> ContextP;
diff --git a/gcc/testsuite/g++.dg/cpp0x/Wunused-variable-1.C b/gcc/testsuite/g++.dg/cpp0x/Wunused-variable-1.C
index 39592b26a58..dd316d1ace5 100644
--- a/gcc/testsuite/g++.dg/cpp0x/Wunused-variable-1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/Wunused-variable-1.C
@@ -15,6 +15,7 @@ int
foo ()
{
C {} (1, 1L, 1LL, 1.0);
+ return 0;
}
template<int N>
diff --git a/gcc/testsuite/g++.dg/cpp0x/access01.C b/gcc/testsuite/g++.dg/cpp0x/access01.C
index 55c951f97d6..3a7cee4156a 100644
--- a/gcc/testsuite/g++.dg/cpp0x/access01.C
+++ b/gcc/testsuite/g++.dg/cpp0x/access01.C
@@ -6,7 +6,7 @@ class A
{
T p;
public:
- template <class U> auto f() -> decltype(+p) { }
+ template <class U> auto f() -> decltype(+p) { return p; }
};
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/alignas3.C b/gcc/testsuite/g++.dg/cpp0x/alignas3.C
index aa62e5afb2d..af3f171bb3f 100644
--- a/gcc/testsuite/g++.dg/cpp0x/alignas3.C
+++ b/gcc/testsuite/g++.dg/cpp0x/alignas3.C
@@ -16,5 +16,5 @@ template <class, class Y> typename F<Y>::ret_type cast(Y &);
class CompoundStmt;
class alignas(8) Stmt {
Stmt *Children[1];
- CompoundStmt *getBlock() const { cast<CompoundStmt>(Children[0]); }
+ CompoundStmt *getBlock() const { cast<CompoundStmt>(Children[0]); return 0; }
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/auto2.C b/gcc/testsuite/g++.dg/cpp0x/auto2.C
index cff36d212af..e967b9423a5 100644
--- a/gcc/testsuite/g++.dg/cpp0x/auto2.C
+++ b/gcc/testsuite/g++.dg/cpp0x/auto2.C
@@ -4,12 +4,12 @@
#include <typeinfo>
extern "C" void abort();
-int f() {}
+int f() { return 0; }
struct A
{
int i;
- int f() {}
+ int f() { return 0; }
A operator+(A a) { return a; }
};
@@ -70,4 +70,5 @@ int main()
}
auto j = 42, k = 24;
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-array17.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-array17.C
index c6afa507f02..f722b25ec3d 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-array17.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-array17.C
@@ -18,6 +18,7 @@ struct D {
template <typename _ForwardIterator, typename _Size>
static _ForwardIterator __uninit_default_n(_ForwardIterator p1, _Size) {
_Construct(p1);
+ return _ForwardIterator();
}
};
template <typename _ForwardIterator, typename _Size>
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-defarg2.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-defarg2.C
index f1ca05fe9ec..515576e1f62 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-defarg2.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-defarg2.C
@@ -25,6 +25,9 @@ struct A : D
A baz (const char *, A = C ());
+C c;
+A a (c);
+
A
B::foo ()
{
@@ -35,10 +38,13 @@ B::foo ()
catch (...)
{
}
+
+ return a;
}
A
B::bar ()
{
baz ("bar");
+ return a;
}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-memfn1.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-memfn1.C
index d59f465715d..d58e2ec6b15 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-memfn1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-memfn1.C
@@ -13,6 +13,6 @@ constexpr X X::g(X x) { return x; }
struct Y
{
Y() { }
- constexpr Y f(Y y) {} // { dg-error "constexpr" }
- static constexpr Y g(Y y) {} // { dg-error "constexpr" }
+ constexpr Y f(Y y) { return y; } // { dg-error "constexpr" }
+ static constexpr Y g(Y y) { return y; } // { dg-error "constexpr" }
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-template11.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-template11.C
new file mode 100644
index 00000000000..0ad49088ad1
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-template11.C
@@ -0,0 +1,16 @@
+// PR c++/65579
+// { dg-do link { target c++11 } }
+
+template <typename>
+struct S {
+ int i;
+};
+
+struct T {
+ static constexpr S<int> s = { 1 };
+};
+
+int main()
+{
+ return T::s.i;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/dc1.C b/gcc/testsuite/g++.dg/cpp0x/dc1.C
index e7ccb64a3b3..5ce50764b5f 100644
--- a/gcc/testsuite/g++.dg/cpp0x/dc1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/dc1.C
@@ -27,7 +27,7 @@ struct D : public C {
D (int _i) : C(), i(_i) { }
D () : D(-1) { }
virtual ~D() { }
- virtual int f () { }
+ virtual int f () { return 0; }
};
void f_D () { C* c = new D(); }
diff --git a/gcc/testsuite/g++.dg/cpp0x/dc3.C b/gcc/testsuite/g++.dg/cpp0x/dc3.C
index 9c6fd56564c..9c1fd53e4fe 100644
--- a/gcc/testsuite/g++.dg/cpp0x/dc3.C
+++ b/gcc/testsuite/g++.dg/cpp0x/dc3.C
@@ -43,7 +43,7 @@ struct D<X> : public C {
D (int _i) : C(), i(_i) { }
D () : D(-1) { }
virtual ~D() { }
- virtual int f () { }
+ virtual int f () { return 0; }
};
void f_D () { D<X>* d = new D<X>(); }
diff --git a/gcc/testsuite/g++.dg/cpp0x/decltype12.C b/gcc/testsuite/g++.dg/cpp0x/decltype12.C
index eae318db2df..58fd415eea5 100644
--- a/gcc/testsuite/g++.dg/cpp0x/decltype12.C
+++ b/gcc/testsuite/g++.dg/cpp0x/decltype12.C
@@ -1,4 +1,6 @@
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
+
template<typename T, typename U>
struct is_same
{
diff --git a/gcc/testsuite/g++.dg/cpp0x/decltype17.C b/gcc/testsuite/g++.dg/cpp0x/decltype17.C
index 6e5854db6c9..47b70ecd89c 100644
--- a/gcc/testsuite/g++.dg/cpp0x/decltype17.C
+++ b/gcc/testsuite/g++.dg/cpp0x/decltype17.C
@@ -1,5 +1,6 @@
// PR c++/36628
// { dg-do run { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
#include <typeinfo>
#include <string.h>
@@ -25,4 +26,6 @@ int main()
return 2;
if (strcmp (typeid(h).name(), "FOivE") != 0)
return 3;
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/cpp0x/decltype3.C b/gcc/testsuite/g++.dg/cpp0x/decltype3.C
index b2e66243cc7..b921dd6d899 100644
--- a/gcc/testsuite/g++.dg/cpp0x/decltype3.C
+++ b/gcc/testsuite/g++.dg/cpp0x/decltype3.C
@@ -49,7 +49,7 @@ public:
int a;
enum B_enum { b };
decltype(a) c;
- decltype(a) foo() { }
+ decltype(a) foo() { return 0; }
decltype(b) enums_are_in_scope() { return b; } // ok
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/decltype41.C b/gcc/testsuite/g++.dg/cpp0x/decltype41.C
index 1439e15c0d4..65f75b1e4fa 100644
--- a/gcc/testsuite/g++.dg/cpp0x/decltype41.C
+++ b/gcc/testsuite/g++.dg/cpp0x/decltype41.C
@@ -23,15 +23,15 @@ class B
template <class T>
struct C
{
- template <class U> decltype (a.i) f() { } // #1
- template <class U> decltype (b.i) f() { } // #2
+ template <class U> decltype (a.i) f() { return 0; } // #1
+ template <class U> decltype (b.i) f() { return 1; } // #2
};
template <class T>
struct D
{
- template <class U> decltype (A::j) f() { } // #1
- template <class U> decltype (B::j) f() { } // #2
+ template <class U> decltype (A::j) f() { return 2; } // #1
+ template <class U> decltype (B::j) f() { return 3; } // #2
};
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/defaulted28.C b/gcc/testsuite/g++.dg/cpp0x/defaulted28.C
index 451a1b4198f..0e04dbfb8d7 100644
--- a/gcc/testsuite/g++.dg/cpp0x/defaulted28.C
+++ b/gcc/testsuite/g++.dg/cpp0x/defaulted28.C
@@ -9,7 +9,7 @@ private:
A(A const&) = default; // { dg-message "private" }
};
-int f(...) { }
+int f(...) { return 0; }
int main() {
A a;
f(a); // { dg-error "this context" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/enum_base3.C b/gcc/testsuite/g++.dg/cpp0x/enum_base3.C
index 3cb2d6d8186..5f7e83e1419 100644
--- a/gcc/testsuite/g++.dg/cpp0x/enum_base3.C
+++ b/gcc/testsuite/g++.dg/cpp0x/enum_base3.C
@@ -17,7 +17,7 @@ struct C
};
struct D : C
{
- B foo () const { B a; a.foo (d); }
+ B foo () const { B a; a.foo (d); return B(); }
H d;
};
struct F : C
diff --git a/gcc/testsuite/g++.dg/cpp0x/gen-attrs-4.C b/gcc/testsuite/g++.dg/cpp0x/gen-attrs-4.C
index eb585a89be4..023d8396777 100644
--- a/gcc/testsuite/g++.dg/cpp0x/gen-attrs-4.C
+++ b/gcc/testsuite/g++.dg/cpp0x/gen-attrs-4.C
@@ -22,7 +22,7 @@ void two [[gnu::unused]] (void) {}
int
five(void)
[[noreturn]] // { dg-warning "ignored" }
-{}
+{ return 0; }
[[noreturn]]
void
diff --git a/gcc/testsuite/g++.dg/cpp0x/initlist96.C b/gcc/testsuite/g++.dg/cpp0x/initlist96.C
index 45fd128ba83..94e9c0eb148 100644
--- a/gcc/testsuite/g++.dg/cpp0x/initlist96.C
+++ b/gcc/testsuite/g++.dg/cpp0x/initlist96.C
@@ -1,5 +1,6 @@
// PR c++/66515
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
#include <initializer_list>
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-58566.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-58566.C
index 3101d0a895c..7bcfe3ae70b 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-58566.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-58566.C
@@ -6,5 +6,6 @@ struct A
int foo()
{
[this]{ return foo; }; // { dg-error "invalid use of member function|cannot convert" }
+ return 0;
}
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv10.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv10.C
index 8e806c849ae..c2a60900b35 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv10.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv10.C
@@ -1,5 +1,6 @@
// PR c++/69889
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
template <typename F> struct Tag {
static void fp() { f()(0); }
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv12.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv12.C
index 16adee6b9c3..e1bd38a7bf7 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv12.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-conv12.C
@@ -1,5 +1,6 @@
// PR c++/80767
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
template <typename T, typename U = T> struct A { using type = U; };
template <typename F, typename... G> struct B : B<F>::type, B<G...>::type {
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-defarg3.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-defarg3.C
index 1c593930133..27aac578c07 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-defarg3.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-defarg3.C
@@ -10,7 +10,7 @@ struct function
template<typename T> struct C
{
- static T test(function f = [](int i){return i;}) { }
+ static T test(function f = [](int i){return i;}) { return T(); }
};
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice3.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice3.C
index fa8a6e63a93..371d03f941f 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice3.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice3.C
@@ -14,6 +14,8 @@ bool Klass::dostuff()
if (local & 1) { return true; } // { dg-error "not captured|non-static" }
return false;
};
+
+ return true;
}
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice5.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice5.C
index 914e0f71e00..50a340dbb22 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice5.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ice5.C
@@ -4,6 +4,7 @@
template<int> int foo()
{
[] (void i) { return 0; } (0); // { dg-error "incomplete|invalid|no match" }
+ return 0;
}
void bar()
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nested2.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nested2.C
index 9e509513ad9..27954f9408c 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nested2.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nested2.C
@@ -13,7 +13,7 @@ void f1(int i) {
};
struct s1 {
int f;
- int work(int n) {
+ void work(int n) {
int m = n*n;
int j = 40;
auto m3 = [this,m]{
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-switch.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-switch.C
index ee87defac9f..328410e29aa 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-switch.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-switch.C
@@ -16,7 +16,7 @@ main ()
break; // { dg-error "break" }
}
};
- l = []() // { dg-warning "statement will never be executed" }
+ l = []()
{
case 3: // { dg-error "case" }
break; // { dg-error "break" }
@@ -24,3 +24,5 @@ main ()
}
}
}
+
+// { dg-prune-output "\\\[-Wswitch-unreachable]" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template.C
index 66cc7a4e1df..17c6a6e5b52 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template.C
@@ -9,7 +9,7 @@ auto apply (T t) -> decltype (t())
}
template <class T>
-T f(T t)
+void f(T t)
{
T t2 = t;
if (t != [=]()->T { return t; }())
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template12.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template12.C
index 635af97d763..5dfd6ede19c 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template12.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template12.C
@@ -14,6 +14,7 @@ class X
[&a]{
typename remove_reference < decltype (a) >::type t;
};
+ return true;
}
};
template class X< int >;
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template2.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template2.C
index 29f63afe0df..8fbb821a4d3 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template2.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-template2.C
@@ -10,7 +10,7 @@ struct T
foo (S<N> *p)
{
S<N> u;
- [&u] ()->bool {} ();
+ [&u] ()->bool { return true; } ();
}
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-this12.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-this12.C
index ef573b19e02..41e4edd8a0f 100644
--- a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-this12.C
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-this12.C
@@ -3,7 +3,7 @@
struct A
{
- int f() {}
+ int f() { return 0; }
int i;
void foo()
diff --git a/gcc/testsuite/g++.dg/cpp0x/nolinkage1.C b/gcc/testsuite/g++.dg/cpp0x/nolinkage1.C
index d38028c4b43..474727c135c 100644
--- a/gcc/testsuite/g++.dg/cpp0x/nolinkage1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/nolinkage1.C
@@ -18,4 +18,4 @@ static void g()
A<B> a;
}
-int main() { g(); f(0); }
+int main() { g(); f(0); return 0; }
diff --git a/gcc/testsuite/g++.dg/cpp0x/nolinkage1a.cc b/gcc/testsuite/g++.dg/cpp0x/nolinkage1a.cc
index f8528f3e650..6672323fdd4 100644
--- a/gcc/testsuite/g++.dg/cpp0x/nolinkage1a.cc
+++ b/gcc/testsuite/g++.dg/cpp0x/nolinkage1a.cc
@@ -12,4 +12,4 @@ static void g()
A<B> a;
}
-int dummy() { g(); f(0); }
+int dummy() { g(); f(0); return 0; }
diff --git a/gcc/testsuite/g++.dg/cpp0x/nsdmi-template5.C b/gcc/testsuite/g++.dg/cpp0x/nsdmi-template5.C
index fdaf4611ee2..5f23d463003 100644
--- a/gcc/testsuite/g++.dg/cpp0x/nsdmi-template5.C
+++ b/gcc/testsuite/g++.dg/cpp0x/nsdmi-template5.C
@@ -7,7 +7,7 @@ template<> struct A1<0>
{
template<typename, typename...> struct B1
{
- template<typename> int foo1() {}
+ template<typename> int foo1() { return 0; }
int i1 = foo1<int>();
};
@@ -19,7 +19,7 @@ template<> struct A2<0>
{
template<typename, typename> struct B2
{
- template<typename> int foo2() {}
+ template<typename> int foo2() { return 1; }
int i2 = foo2<int>();
};
@@ -31,7 +31,7 @@ template<> struct A3<0>
{
template<typename> struct B3
{
- template<typename> int foo3() {}
+ template<typename> int foo3() { return 2; }
int i3 = foo3<int>();
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/parse1.C b/gcc/testsuite/g++.dg/cpp0x/parse1.C
index 5a11b7337a4..9a2698435b6 100644
--- a/gcc/testsuite/g++.dg/cpp0x/parse1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/parse1.C
@@ -2,4 +2,4 @@
// { dg-do compile { target c++11 } }
typedef int B; // { dg-message "" }
-B::B() {} // { dg-error "" }
+B::B() { return 0; } // { dg-error "" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/pr34054.C b/gcc/testsuite/g++.dg/cpp0x/pr34054.C
index 8043f9660ed..7a8b3249701 100644
--- a/gcc/testsuite/g++.dg/cpp0x/pr34054.C
+++ b/gcc/testsuite/g++.dg/cpp0x/pr34054.C
@@ -1,4 +1,4 @@
// PR c++/34054
// { dg-do compile { target c++11 } }
-template<typename... T> T foo() {} // { dg-error "not expanded|T" }
+template<typename... T> T foo() { return T(); } // { dg-error "not expanded|T" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/pr47416.C b/gcc/testsuite/g++.dg/cpp0x/pr47416.C
index e4eb317e771..cbe2ed09ff8 100644
--- a/gcc/testsuite/g++.dg/cpp0x/pr47416.C
+++ b/gcc/testsuite/g++.dg/cpp0x/pr47416.C
@@ -1,5 +1,6 @@
// PR c++/47416
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
namespace std
{
diff --git a/gcc/testsuite/g++.dg/cpp0x/pr58781.C b/gcc/testsuite/g++.dg/cpp0x/pr58781.C
index 58c972f90f3..069fcd5f6dc 100644
--- a/gcc/testsuite/g++.dg/cpp0x/pr58781.C
+++ b/gcc/testsuite/g++.dg/cpp0x/pr58781.C
@@ -9,7 +9,7 @@ operator""_s(const char32_t *a, size_t b)
return 0;
}
-int
+void
f()
{
using a = decltype(U"\x1181"_s);
diff --git a/gcc/testsuite/g++.dg/cpp0x/pr70538.C b/gcc/testsuite/g++.dg/cpp0x/pr70538.C
index 0347c856c55..05665d6b935 100644
--- a/gcc/testsuite/g++.dg/cpp0x/pr70538.C
+++ b/gcc/testsuite/g++.dg/cpp0x/pr70538.C
@@ -11,5 +11,5 @@ class B {
template <typename> class C : B {
using base_type = B;
base_type::base_type; // { dg-warning "access declarations" }
- PathComponentPiece m_fn1() {}
+ PathComponentPiece m_fn1() { return PathComponentPiece(); }
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/pr81325.C b/gcc/testsuite/g++.dg/cpp0x/pr81325.C
index 11f0900caa2..47f32134477 100644
--- a/gcc/testsuite/g++.dg/cpp0x/pr81325.C
+++ b/gcc/testsuite/g++.dg/cpp0x/pr81325.C
@@ -35,7 +35,7 @@ struct I {
};
template <typename ResultT, typename ArgT> struct J {
void operator()();
- ResultT operator()(ArgT) {}
+ ResultT operator()(ArgT) { return ResultT(); }
};
struct K {
int AllowBind;
diff --git a/gcc/testsuite/g++.dg/cpp0x/range-for13.C b/gcc/testsuite/g++.dg/cpp0x/range-for13.C
index 100f531f760..9ed0458adcc 100644
--- a/gcc/testsuite/g++.dg/cpp0x/range-for13.C
+++ b/gcc/testsuite/g++.dg/cpp0x/range-for13.C
@@ -7,10 +7,12 @@
template<typename T> int *begin(T &t)
{
T::fail;
+ return 0;
}
template<typename T> int *end(T &t)
{
T::fail;
+ return 0;
}
struct container1
diff --git a/gcc/testsuite/g++.dg/cpp0x/range-for14.C b/gcc/testsuite/g++.dg/cpp0x/range-for14.C
index f43e1abcde7..4e0333cf927 100644
--- a/gcc/testsuite/g++.dg/cpp0x/range-for14.C
+++ b/gcc/testsuite/g++.dg/cpp0x/range-for14.C
@@ -7,10 +7,12 @@
template<typename T> int *begin(T &t)
{
T::fail;
+ return 0;
}
template<typename T> int *end(T &t)
{
T::fail;
+ return 0;
}
//Test for defaults
diff --git a/gcc/testsuite/g++.dg/cpp0x/range-for6.C b/gcc/testsuite/g++.dg/cpp0x/range-for6.C
index 366499a34d8..b4d9dd720ee 100644
--- a/gcc/testsuite/g++.dg/cpp0x/range-for6.C
+++ b/gcc/testsuite/g++.dg/cpp0x/range-for6.C
@@ -14,6 +14,8 @@ template<typename T> T foo()
sum += x;
if (sum != T(10))
abort();
+
+ return sum;
}
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/rv-trivial-bug.C b/gcc/testsuite/g++.dg/cpp0x/rv-trivial-bug.C
index b729dc83df1..09732d17cdd 100644
--- a/gcc/testsuite/g++.dg/cpp0x/rv-trivial-bug.C
+++ b/gcc/testsuite/g++.dg/cpp0x/rv-trivial-bug.C
@@ -24,6 +24,7 @@ int test2()
assert(move_assign == 0);
b = static_cast<base2&&>(b2);
assert(move_assign == 1);
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/rv2n.C b/gcc/testsuite/g++.dg/cpp0x/rv2n.C
index 663a66b6d90..65eda80fba0 100644
--- a/gcc/testsuite/g++.dg/cpp0x/rv2n.C
+++ b/gcc/testsuite/g++.dg/cpp0x/rv2n.C
@@ -144,6 +144,7 @@ int test2_18()
sink_2_18(ca); // { dg-error "" }
sink_2_18(va); // { dg-error "" }
sink_2_18(cva); // { dg-error "" }
+ return 0;
}
two sink_2_23(const A&);
@@ -250,6 +251,7 @@ int test2_28()
const volatile A cva = a; // { dg-error "deleted" }
sink_2_28(va); // { dg-error "" }
sink_2_28(cva); // { dg-error "" }
+ return 0;
}
three sink_2_35(volatile A&);
@@ -304,7 +306,7 @@ int test2_37()
three sink_2_38(volatile A&);
eight sink_2_38(const volatile A&&);
-int test2_38()
+void test2_38()
{
A a;
const A ca = a; // { dg-error "deleted" }
@@ -396,7 +398,7 @@ int test2_57()
five sink_2_58( A&&);
eight sink_2_58(const volatile A&&);
-int test2_58()
+void test2_58()
{
A a;
const A ca = a; // { dg-error "deleted" }
@@ -439,6 +441,7 @@ int test2_68()
sink_2_68(ca); // { dg-error "" }
sink_2_68(va); // { dg-error "" }
sink_2_68(cva); // { dg-error "" }
+ return 0;
}
seven sink_2_78(volatile A&&);
@@ -454,6 +457,7 @@ int test2_78()
sink_2_78(ca); // { dg-error "" }
sink_2_78(va); // { dg-error "" }
sink_2_78(cva); // { dg-error "" }
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/cpp0x/rv3n.C b/gcc/testsuite/g++.dg/cpp0x/rv3n.C
index b7c1d7a2343..4549438f8ef 100644
--- a/gcc/testsuite/g++.dg/cpp0x/rv3n.C
+++ b/gcc/testsuite/g++.dg/cpp0x/rv3n.C
@@ -124,6 +124,7 @@ int test3_128()
sink_3_128(va); // { dg-error "" }
sink_3_128(cva); // { dg-error "" }
+ return 0;
}
one sink_3_134( A&);
diff --git a/gcc/testsuite/g++.dg/cpp0x/static_assert10.C b/gcc/testsuite/g++.dg/cpp0x/static_assert10.C
index e7f728e3f4f..ffbf3c047eb 100644
--- a/gcc/testsuite/g++.dg/cpp0x/static_assert10.C
+++ b/gcc/testsuite/g++.dg/cpp0x/static_assert10.C
@@ -5,4 +5,5 @@ template<typename T> bool foo(T)
{
int i;
static_assert(foo(i), "Error"); // { dg-error "non-constant condition|not usable|non-constexpr" }
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/cpp0x/static_assert11.C b/gcc/testsuite/g++.dg/cpp0x/static_assert11.C
index 8a7362d5f56..36bf458e25f 100644
--- a/gcc/testsuite/g++.dg/cpp0x/static_assert11.C
+++ b/gcc/testsuite/g++.dg/cpp0x/static_assert11.C
@@ -6,5 +6,6 @@ struct A
template<typename T> bool foo(T)
{
static_assert(foo(0), "Error"); // { dg-error "non-constant condition|constant expression" }
+ return true;
}
};
diff --git a/gcc/testsuite/g++.dg/cpp0x/static_assert12.C b/gcc/testsuite/g++.dg/cpp0x/static_assert12.C
index ff6f40d918f..5d59e540910 100644
--- a/gcc/testsuite/g++.dg/cpp0x/static_assert12.C
+++ b/gcc/testsuite/g++.dg/cpp0x/static_assert12.C
@@ -14,7 +14,7 @@ template<>
};
template<typename T>
- T
+ void
float_thing(T __x)
{
static_assert(is_float<T>::value, ""); // { dg-error "static assertion failed" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/static_assert13.C b/gcc/testsuite/g++.dg/cpp0x/static_assert13.C
index 86b0b0360d9..7332ff91882 100644
--- a/gcc/testsuite/g++.dg/cpp0x/static_assert13.C
+++ b/gcc/testsuite/g++.dg/cpp0x/static_assert13.C
@@ -14,7 +14,7 @@ template<>
};
template<typename T>
- T
+ void
float_thing(T __x)
{
static_assert(is_float<T>::value, ""); // { dg-error "static assertion failed" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/trailing1.C b/gcc/testsuite/g++.dg/cpp0x/trailing1.C
index 7d9a906d4f3..ecf092bda0d 100644
--- a/gcc/testsuite/g++.dg/cpp0x/trailing1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/trailing1.C
@@ -40,9 +40,9 @@ decltype(*(T*)0+*(U*)0) add4(T t, U u)
template <class T>
struct A
{
- T f() {}
+ T f() { return T(); }
template <class U>
- T g() {}
+ T g() { return T(); }
template <class V>
struct B
{
diff --git a/gcc/testsuite/g++.dg/cpp0x/trailing5.C b/gcc/testsuite/g++.dg/cpp0x/trailing5.C
index 48f31452e5b..32390d60897 100644
--- a/gcc/testsuite/g++.dg/cpp0x/trailing5.C
+++ b/gcc/testsuite/g++.dg/cpp0x/trailing5.C
@@ -2,9 +2,9 @@
// { dg-do compile { target c++11 } }
struct A {};
-auto foo() -> struct A {}
+auto foo() -> struct A { return A(); }
enum B {};
-auto bar() -> enum B {}
+auto bar() -> enum B { return B(); }
auto baz() -> struct C {} {} // { dg-error "" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/udlit-macros.C b/gcc/testsuite/g++.dg/cpp0x/udlit-macros.C
new file mode 100644
index 00000000000..fb518281811
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/udlit-macros.C
@@ -0,0 +1,31 @@
+// PR c++/80955
+// { dg-do run { target c++11 } }
+
+extern "C" int sprintf (char *s, const char *format, ...);
+extern "C" int strcmp (const char *s1, const char *s2);
+
+#define __PRI64_PREFIX "l"
+#define PRId64 __PRI64_PREFIX "d"
+
+using size_t = decltype(sizeof(0));
+#define _zero
+#define _ID _xx
+int operator""_zero(const char*, size_t) { return 0; }
+int operator""_ID(const char*, size_t) { return 0; }
+
+int main()
+{
+ long i64 = 123;
+ char buf[100];
+ sprintf(buf, "%"PRId64"abc", i64); // { dg-warning "invalid suffix on literal" }
+ return strcmp(buf, "123abc")
+ + ""_zero
+ + "bob"_zero
+ + R"#(raw
+ string)#"_zero
+ + "xx"_ID
+ + ""_ID
+ + R"AA(another
+ raw
+ string)AA"_ID;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/udlit-template.C b/gcc/testsuite/g++.dg/cpp0x/udlit-template.C
index de21b66028f..15583199297 100644
--- a/gcc/testsuite/g++.dg/cpp0x/udlit-template.C
+++ b/gcc/testsuite/g++.dg/cpp0x/udlit-template.C
@@ -23,7 +23,7 @@ template<>
operator"" _abc<'6','6','6'>()
{ return 21; }
-int
+void
test1()
{
int i = operator"" _abc<'1','2','3'>();
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic114.C b/gcc/testsuite/g++.dg/cpp0x/variadic114.C
index 82ffe83c7c5..183cb2b5631 100644
--- a/gcc/testsuite/g++.dg/cpp0x/variadic114.C
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic114.C
@@ -1,5 +1,6 @@
// PR c++/49785
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
template <typename, typename ...> struct B { };
template <typename> class A;
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic57.C b/gcc/testsuite/g++.dg/cpp0x/variadic57.C
index a3d2bf1c5df..4bf014b54da 100644
--- a/gcc/testsuite/g++.dg/cpp0x/variadic57.C
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic57.C
@@ -10,7 +10,7 @@ struct array<T, 0> {
};
template<typename T, int... Dims>
-int array<T, Dims...>::foo() { }
+int array<T, Dims...>::foo() { return 0; }
template<typename T>
-int array<T, 0>::bar() { }
+int array<T, 0>::bar() { return 0; }
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic65.C b/gcc/testsuite/g++.dg/cpp0x/variadic65.C
index 0eef2a5ed25..9223235174f 100644
--- a/gcc/testsuite/g++.dg/cpp0x/variadic65.C
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic65.C
@@ -1,4 +1,6 @@
// { dg-do compile { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
+
struct unused;
template<typename T1 = unused, typename T2 = unused, typename T3 = unused,
typename T4 = unused, typename T5 = unused, typename T6 = unused>
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic66.C b/gcc/testsuite/g++.dg/cpp0x/variadic66.C
index ac922215198..bf86deee8fd 100644
--- a/gcc/testsuite/g++.dg/cpp0x/variadic66.C
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic66.C
@@ -1,7 +1,7 @@
// { dg-do compile { target c++11 } }
template<typename Result, typename Functor, typename... ArgTypes>
-Result bind(Functor, ArgTypes...) { }
+Result bind(Functor, ArgTypes...) { return Result(); }
void f()
{
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic97.C b/gcc/testsuite/g++.dg/cpp0x/variadic97.C
index b251cc3da38..12d05c56237 100644
--- a/gcc/testsuite/g++.dg/cpp0x/variadic97.C
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic97.C
@@ -21,7 +21,7 @@ template<typename _Functor, typename... _Bound_args>
template<typename _Functor, typename _Arg>
_Bind<_Functor(_Arg)>
- bind(_Functor, _Arg) { }
+ bind(_Functor, _Arg) { return _Bind<_Functor(_Arg)>(); }
struct State
{
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic98.C b/gcc/testsuite/g++.dg/cpp0x/variadic98.C
index c463cac17e7..6f1d9b40676 100644
--- a/gcc/testsuite/g++.dg/cpp0x/variadic98.C
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic98.C
@@ -1,5 +1,6 @@
// PR c++/42358
// { dg-do assemble { target c++11 } }
+// { dg-additional-options "-Wno-return-type" }
typedef __PTRDIFF_TYPE__ ptrdiff_t;
typedef __SIZE_TYPE__ size_t;
@@ -177,6 +178,7 @@ namespace std __attribute__ ((__visibility__ ("default"))) {
};
template<typename _Tp> struct less : public binary_function<_Tp, _Tp, bool> {
bool operator()(const _Tp& __x, const _Tp& __y) const {
+ return true;
}
};
template<typename _Pair> struct _Select1st : public unary_function<_Pair, typename _Pair::first_type> {
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn11.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn11.C
index b6a4b324b7c..9d79fb61e96 100644
--- a/gcc/testsuite/g++.dg/cpp1y/auto-fn11.C
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn11.C
@@ -1,4 +1,5 @@
// { dg-do compile { target c++14 } }
+// { dg-additional-options "-Wno-return-type" }
auto f() { return; } // OK, return type is void
auto* g() { return; } // { dg-error "no value" }
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn29.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn29.C
index f9260e0ec30..12f13a08906 100644
--- a/gcc/testsuite/g++.dg/cpp1y/auto-fn29.C
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn29.C
@@ -24,6 +24,7 @@ namespace Baboon {
template <typename T>
bool f4(T const& v){
f2(v);
+ return true;
}
}
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn38.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn38.C
index ec3cffb2505..4e541625eec 100644
--- a/gcc/testsuite/g++.dg/cpp1y/auto-fn38.C
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn38.C
@@ -1,5 +1,6 @@
// PR c++/80145
// { dg-do compile { target c++14 } }
+// { dg-additional-options "-Wno-return-type" }
auto* foo() { } // { dg-error "no return statements" }
auto* foo();
diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-return2.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-return2.C
index ae2628d800c..d330ccfed3e 100644
--- a/gcc/testsuite/g++.dg/cpp1y/constexpr-return2.C
+++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-return2.C
@@ -1,4 +1,5 @@
// { dg-do compile { target c++14 } }
+// { dg-additional-options "-Wno-return-type" }
constexpr int f (int i)
{
diff --git a/gcc/testsuite/g++.dg/cpp1y/lambda-init7.C b/gcc/testsuite/g++.dg/cpp1y/lambda-init7.C
index c943c48dfb0..d0b2dd3f256 100644
--- a/gcc/testsuite/g++.dg/cpp1y/lambda-init7.C
+++ b/gcc/testsuite/g++.dg/cpp1y/lambda-init7.C
@@ -1,6 +1,6 @@
// PR c++/59349
// { dg-do compile { target c++14 } }
-int foo () {
+void foo () {
[bar()]{}; // { dg-error "empty initializer" }
}
diff --git a/gcc/testsuite/g++.dg/cpp1y/pr63996.C b/gcc/testsuite/g++.dg/cpp1y/pr63996.C
index 8f66cdc3893..da1e0764a10 100644
--- a/gcc/testsuite/g++.dg/cpp1y/pr63996.C
+++ b/gcc/testsuite/g++.dg/cpp1y/pr63996.C
@@ -1,4 +1,5 @@
// { dg-do compile { target c++14 } }
+// { dg-additional-options "-Wno-return-type" }
constexpr int
foo (int i)
diff --git a/gcc/testsuite/g++.dg/cpp1y/pr65202.C b/gcc/testsuite/g++.dg/cpp1y/pr65202.C
index 7ce4895a134..43eb01f1fcb 100644
--- a/gcc/testsuite/g++.dg/cpp1y/pr65202.C
+++ b/gcc/testsuite/g++.dg/cpp1y/pr65202.C
@@ -1,5 +1,6 @@
// // PR c++/65202
// { dg-do compile { target c++14 } }
+// { dg-additional-options "-Wno-return-type" }
template <typename T> struct is_move_constructible;
template <typename T> struct is_move_assignable;
diff --git a/gcc/testsuite/g++.dg/cpp1y/pr66443-cxx14.C b/gcc/testsuite/g++.dg/cpp1y/pr66443-cxx14.C
index 58348082952..ae76f7faa31 100644
--- a/gcc/testsuite/g++.dg/cpp1y/pr66443-cxx14.C
+++ b/gcc/testsuite/g++.dg/cpp1y/pr66443-cxx14.C
@@ -37,6 +37,7 @@ C::C ()
bool Ok (C &c)
{
+ return true;
}
int main ()
diff --git a/gcc/testsuite/g++.dg/cpp1y/pr79253.C b/gcc/testsuite/g++.dg/cpp1y/pr79253.C
index b15efe8873f..181702acc5b 100644
--- a/gcc/testsuite/g++.dg/cpp1y/pr79253.C
+++ b/gcc/testsuite/g++.dg/cpp1y/pr79253.C
@@ -28,6 +28,6 @@ struct D
baz () { bar<F, B<>>; }
template <bool, bool, bool> struct F
{
- static B<> baz () { foo<E<0, 0, 0>> (0, 0); }
+ static B<> baz () { foo<E<0, 0, 0>> (0, 0); return B<>(); }
};
};
diff --git a/gcc/testsuite/g++.dg/cpp1y/pr81574.C b/gcc/testsuite/g++.dg/cpp1y/pr81574.C
new file mode 100644
index 00000000000..f9949ab90e9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/pr81574.C
@@ -0,0 +1,13 @@
+// { dg-do compile { target c++14 } }
+// PR c++/81574 references to functions are captured by reference.
+
+// 8.1.5.2/10
+// For each entity captured by copy, ... an lvalue reference to the
+// referenced function type if the entity is a reference to a function
+
+void f (void (&b)())
+{
+ [=] { b; } ();
+ [=, b(f)] { b; } ();
+ [=, b(b)] { b; } ();
+}
diff --git a/gcc/testsuite/g++.dg/cpp1y/static_assert1.C b/gcc/testsuite/g++.dg/cpp1y/static_assert1.C
index 513e347d7e5..ef655f49097 100644
--- a/gcc/testsuite/g++.dg/cpp1y/static_assert1.C
+++ b/gcc/testsuite/g++.dg/cpp1y/static_assert1.C
@@ -19,6 +19,7 @@ template<typename T>
{
static_assert(is_float<T>::value, ""); // { dg-error "static assertion failed" }
static_assert(is_float<T>::value); // { dg-error "static assertion failed" }
+ return T();
}
int
diff --git a/gcc/testsuite/g++.dg/cpp1y/static_assert2.C b/gcc/testsuite/g++.dg/cpp1y/static_assert2.C
index d862282cda8..d602b1e04ce 100644
--- a/gcc/testsuite/g++.dg/cpp1y/static_assert2.C
+++ b/gcc/testsuite/g++.dg/cpp1y/static_assert2.C
@@ -19,6 +19,7 @@ template<typename T>
{
static_assert(is_float<T>::value, ""); // { dg-error "static assertion failed" }
static_assert(is_float<T>::value); // { dg-error "static assertion failed" }
+ return T();
}
int
diff --git a/gcc/testsuite/g++.dg/cpp1y/var-templ44.C b/gcc/testsuite/g++.dg/cpp1y/var-templ44.C
index 2fc21a50631..2ef01cf7480 100644
--- a/gcc/testsuite/g++.dg/cpp1y/var-templ44.C
+++ b/gcc/testsuite/g++.dg/cpp1y/var-templ44.C
@@ -1,5 +1,6 @@
// PR c++/67161
// { dg-do compile { target c++14 } }
+// { dg-additional-options "-Wno-return-type" }
template <typename _Tp> struct integral_constant {
static constexpr _Tp value = 0;
diff --git a/gcc/testsuite/g++.dg/cpp1z/eval-order3.C b/gcc/testsuite/g++.dg/cpp1z/eval-order3.C
index 966ac0a6523..b53e96a9f8f 100644
--- a/gcc/testsuite/g++.dg/cpp1z/eval-order3.C
+++ b/gcc/testsuite/g++.dg/cpp1z/eval-order3.C
@@ -26,7 +26,7 @@ struct A
int _i;
A(int i): _i(f(i)) { }
A& memfn(int i, int j) { f(j); return *this; }
- int operator<<(int i) { }
+ int operator<<(int i) { return 0; }
A& operator=(const A&) { return *this; }
A& operator+=(int i) { return *this; }
};
@@ -38,7 +38,7 @@ struct B
B(int i): _i(f(i)) { }
};
-int operator>>(A&, int i) { }
+int operator>>(A&, int i) { return 0; }
A a(0);
A* afn(int i)
diff --git a/gcc/testsuite/g++.dg/cpp1z/fold6.C b/gcc/testsuite/g++.dg/cpp1z/fold6.C
index 48394366692..29a20480e77 100644
--- a/gcc/testsuite/g++.dg/cpp1z/fold6.C
+++ b/gcc/testsuite/g++.dg/cpp1z/fold6.C
@@ -6,7 +6,7 @@
int i;
template <int... Is>
-int f()
+void f()
{
(i ? i : Is + ...); // { dg-error "" }
(i + Is + ...); // { dg-error "" }
diff --git a/gcc/testsuite/g++.dg/cpp1z/inline-var2.C b/gcc/testsuite/g++.dg/cpp1z/inline-var2.C
index 1696cb0f240..852531ad258 100644
--- a/gcc/testsuite/g++.dg/cpp1z/inline-var2.C
+++ b/gcc/testsuite/g++.dg/cpp1z/inline-var2.C
@@ -69,6 +69,7 @@ foo (inline int var31) // { dg-error "'var31' declared as an 'inline' paramet
{
inline int var32; // { dg-error "'inline' specifier invalid for variable 'var32' declared at block scope" }
static inline int var33; // { dg-error "'inline' specifier invalid for variable 'var33' declared at block scope" }
+ return 0;
}
template <typename A, typename B, typename C>
struct Y
diff --git a/gcc/testsuite/g++.dg/cpp1z/lambda-this1.C b/gcc/testsuite/g++.dg/cpp1z/lambda-this1.C
index a15438019b9..2c49dd9bf73 100644
--- a/gcc/testsuite/g++.dg/cpp1z/lambda-this1.C
+++ b/gcc/testsuite/g++.dg/cpp1z/lambda-this1.C
@@ -50,7 +50,7 @@ struct B {
double foo () {
return [this]{ return [*this] { return b; }; }()(); // { dg-error "'*this' capture only available with" "" { target c++14_down } }
}
- double bar () {
+ void bar () {
auto c = []{ return [*this] { return b; }; }; // { dg-error "'this' was not captured for this lambda function" }
} // { dg-error "invalid use of non-static data member 'B::b'" "" { target *-*-* } .-1 }
}; // { dg-error "'*this' capture only available with" "" { target c++14_down } .-2 }
diff --git a/gcc/testsuite/g++.dg/cpp1z/static_assert-nomsg.C b/gcc/testsuite/g++.dg/cpp1z/static_assert-nomsg.C
index 4b265b6716e..6f787cde958 100644
--- a/gcc/testsuite/g++.dg/cpp1z/static_assert-nomsg.C
+++ b/gcc/testsuite/g++.dg/cpp1z/static_assert-nomsg.C
@@ -13,7 +13,7 @@ template<>
};
template<typename T>
- T
+ void
float_thing(T __x)
{
static_assert(is_float<T>::value, ""); // { dg-error "static assertion failed" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/ptrmem1a.C b/gcc/testsuite/g++.dg/cpp2a/ptrmem1a.C
new file mode 100644
index 00000000000..074c8fe5e3a
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/ptrmem1a.C
@@ -0,0 +1,24 @@
+// P0704R1
+// { dg-do compile { target c++11 } }
+// { dg-options "" }
+
+struct S {
+ void ref() & {}
+ void cref() const& {}
+ void vref() volatile & {}
+ void cvref() const volatile & {}
+};
+
+void
+foo ()
+{
+ S{}.ref(); // { dg-error "argument discards qualifiers" }
+ S{}.cref();
+ S{}.vref(); // { dg-error "argument discards qualifiers" }
+ S{}.cvref(); // { dg-error "argument discards qualifiers" }
+
+ (S{}.*&S::ref)(); // { dg-error "pointer-to-member-function type 'void \\(S::\\*\\)\\(\\) &' requires an lvalue" }
+ (S{}.*&S::cref)();
+ (S{}.*&S::vref)(); // { dg-error "pointer-to-member-function type 'void \\(S::\\*\\)\\(\\) volatile &' requires an lvalue" }
+ (S{}.*&S::cvref)(); // { dg-error "pointer-to-member-function type 'void \\(S::\\*\\)\\(\\) const volatile &' requires an lvalue" }
+}
diff --git a/gcc/testsuite/g++.dg/debug/dwarf-eh-personality-1.C b/gcc/testsuite/g++.dg/debug/dwarf-eh-personality-1.C
index 5c72588e513..8f62edc1cf0 100644
--- a/gcc/testsuite/g++.dg/debug/dwarf-eh-personality-1.C
+++ b/gcc/testsuite/g++.dg/debug/dwarf-eh-personality-1.C
@@ -11,7 +11,7 @@ int foo (void)
return 0;
}
-int foobar (void)
+void foobar (void)
{
}
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/dwarf4-typedef.C b/gcc/testsuite/g++.dg/debug/dwarf2/dwarf4-typedef.C
index cd1a0afe868..a514472b397 100644
--- a/gcc/testsuite/g++.dg/debug/dwarf2/dwarf4-typedef.C
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/dwarf4-typedef.C
@@ -11,7 +11,7 @@ struct A {
struct B : public A {
template <typename A>
- bool foo(A x[2]) { }
+ bool foo(A x[2]) { return true; }
};
template <typename T>
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/icf.C b/gcc/testsuite/g++.dg/debug/dwarf2/icf.C
index ca73ab72479..74a11bf25c5 100644
--- a/gcc/testsuite/g++.dg/debug/dwarf2/icf.C
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/icf.C
@@ -21,13 +21,13 @@ class B
int j;
};
-int
+void
test1(A* a)
{
a->work();
}
-int
+void
test2(A* a)
{
if (a->p())
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/pr61433.C b/gcc/testsuite/g++.dg/debug/dwarf2/pr61433.C
index a217d960205..ef2479f0cd9 100644
--- a/gcc/testsuite/g++.dg/debug/dwarf2/pr61433.C
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/pr61433.C
@@ -2,6 +2,7 @@
// { dg-do compile { target c++11 } }
// { dg-xfail-if "" { powerpc-ibm-aix* } }
// { dg-options "-O -fcompare-debug -fno-inline -fno-ipa-pure-const -fipa-sra" }
+// { dg-additional-options "-Wno-return-type" }
template <class T>
struct A
@@ -21,4 +22,5 @@ auto k(T t, U u, V v) -> decltype (t.U::template B<V>::MEM)
int main()
{
k( C(), A<int>(), D() );
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/debug/nullptr01.C b/gcc/testsuite/g++.dg/debug/nullptr01.C
index 63c16ac8f0e..a6514f1ff70 100644
--- a/gcc/testsuite/g++.dg/debug/nullptr01.C
+++ b/gcc/testsuite/g++.dg/debug/nullptr01.C
@@ -10,6 +10,7 @@ template <class T> nullptr_t g(T t);
template <> nullptr_t g(A<nullptr_t>)
{
nullptr_t local;
+ return nullptr;
}
// { dg-final { scan-assembler "_Z1fDn" } }
// { dg-final { scan-assembler "_Z1gI1AIDnEEDnT_" } }
diff --git a/gcc/testsuite/g++.dg/debug/pr16792.C b/gcc/testsuite/g++.dg/debug/pr16792.C
index 39003cbe299..0efcbfa74eb 100644
--- a/gcc/testsuite/g++.dg/debug/pr16792.C
+++ b/gcc/testsuite/g++.dg/debug/pr16792.C
@@ -1,7 +1,7 @@
// { dg-do compile }
struct S { S(); };
-int foo (S b, double j) { };
+int foo (S b, double j) { return 0; };
int main ()
{
diff --git a/gcc/testsuite/g++.dg/debug/pr46241.C b/gcc/testsuite/g++.dg/debug/pr46241.C
index 95c814d4f0a..6ac9116a3d7 100644
--- a/gcc/testsuite/g++.dg/debug/pr46241.C
+++ b/gcc/testsuite/g++.dg/debug/pr46241.C
@@ -1,6 +1,6 @@
class btIDebugDraw;
class btCollisionWorld {
- virtual btIDebugDraw* getDebugDrawer() { };
+ virtual btIDebugDraw* getDebugDrawer() { return 0; };
static void rayTestSingle();
};
class btTriangleCallback {
diff --git a/gcc/testsuite/g++.dg/debug/pr46338.C b/gcc/testsuite/g++.dg/debug/pr46338.C
index caf71170f2c..7e242b9bc3d 100644
--- a/gcc/testsuite/g++.dg/debug/pr46338.C
+++ b/gcc/testsuite/g++.dg/debug/pr46338.C
@@ -9,6 +9,7 @@ struct S
{
int f ()
{
+ return 0;
}
};
diff --git a/gcc/testsuite/g++.dg/debug/pr47106.C b/gcc/testsuite/g++.dg/debug/pr47106.C
index d8d414dd4a5..84c6e3f87be 100644
--- a/gcc/testsuite/g++.dg/debug/pr47106.C
+++ b/gcc/testsuite/g++.dg/debug/pr47106.C
@@ -22,12 +22,14 @@ inline bool
baz (S s1, S)
{
while (f (&s1));
+
+ return true;
}
inline bool
bar (S s1, S s2, S)
{
- baz (s1, s2);
+ return baz (s1, s2);
}
S getS ();
@@ -35,5 +37,5 @@ S getS ();
bool
foo ()
{
- bar (getS (), getS (), getS ());
+ return bar (getS (), getS (), getS ());
}
diff --git a/gcc/testsuite/g++.dg/debug/pr71057.C b/gcc/testsuite/g++.dg/debug/pr71057.C
index 2ed1eed988e..0bd546a74e8 100644
--- a/gcc/testsuite/g++.dg/debug/pr71057.C
+++ b/gcc/testsuite/g++.dg/debug/pr71057.C
@@ -9,4 +9,4 @@ template <typename> struct C {
};
template <typename> struct D {};
C<int> a;
-D<B<int>> fn1() { fn1, a; }
+D<B<int>> fn1() { fn1, a; return D<B<int>>(); }
diff --git a/gcc/testsuite/g++.dg/debug/pr71432.C b/gcc/testsuite/g++.dg/debug/pr71432.C
index 1682f126f12..e9bc88ca863 100644
--- a/gcc/testsuite/g++.dg/debug/pr71432.C
+++ b/gcc/testsuite/g++.dg/debug/pr71432.C
@@ -105,9 +105,11 @@ namespace std
public:
size_type size ()const noexcept
{
+ return 0;
}
const _CharT *data () const noexcept
{
+ return 0;
}
};
}
@@ -127,9 +129,14 @@ class CLIParameterType
{
const std::string & getSwitchOption (unsigned int i) const
{
- } unsigned int getSwitchOptionCount () const
+ static std::string a;
+ return a;
+ }
+ unsigned int getSwitchOptionCount () const
{
- } int checkSwitched (const std::string & value) const;
+ return 0;
+ }
+ int checkSwitched (const std::string & value) const;
};
int
@@ -138,4 +145,6 @@ CLIParameterType::checkSwitched (const std::string & value) const
int contains = false;
for (unsigned int i = 0; !contains && i < getSwitchOptionCount () ;)
contains = getSwitchOption (i) == value;
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/debug/pr80461.C b/gcc/testsuite/g++.dg/debug/pr80461.C
index df7b4229ddf..b472e62c803 100644
--- a/gcc/testsuite/g++.dg/debug/pr80461.C
+++ b/gcc/testsuite/g++.dg/debug/pr80461.C
@@ -6,7 +6,7 @@ template <typename> class A;
struct B
{
template <typename T, typename U>
- static bool foo (U T::*) {}
+ static bool foo (U T::*) { return true; }
};
template <typename, typename> class J;
template <typename T, typename U, typename V, typename... W>
diff --git a/gcc/testsuite/g++.dg/dfp/44473-1.C b/gcc/testsuite/g++.dg/dfp/44473-1.C
index 38689fa165d..940638f4e9b 100644
--- a/gcc/testsuite/g++.dg/dfp/44473-1.C
+++ b/gcc/testsuite/g++.dg/dfp/44473-1.C
@@ -1,4 +1,5 @@
/* { dg-do assemble } */
+/* { dg-additional-options "-Wno-return-type" } */
/* Minimized from the testcase in PR c++/44473; mangling of decimal types
did not include CV qualifiers. */
@@ -119,4 +120,5 @@ bool
bar ()
{
vec.push_back (std::decimal::decimal32 (0));
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/dfp/44473-2.C b/gcc/testsuite/g++.dg/dfp/44473-2.C
index 311f62299b3..60fc1ebca9c 100644
--- a/gcc/testsuite/g++.dg/dfp/44473-2.C
+++ b/gcc/testsuite/g++.dg/dfp/44473-2.C
@@ -14,12 +14,12 @@ namespace std {
}
}
-int bar (const std::decimal::decimal64 & x) { }
+int bar (const std::decimal::decimal64 & x) { return 0; }
int foo ()
{
std::decimal::decimal64 x(0);
- bar (x);
+ return bar (x);
}
// { dg-final { scan-assembler "_Z3barRKDd:" } }
diff --git a/gcc/testsuite/g++.dg/diagnostic/pr77949.C b/gcc/testsuite/g++.dg/diagnostic/pr77949.C
index 0d8b333a2ad..b81d6e2bb46 100644
--- a/gcc/testsuite/g++.dg/diagnostic/pr77949.C
+++ b/gcc/testsuite/g++.dg/diagnostic/pr77949.C
@@ -4,4 +4,4 @@
/* Very long line, where a missing semicolon would be suggested for
insertion at column 4097. */
class test { }
-// { dg-error "0: expected .;. after class definition" "" { target *-*-* } .-1 }
+// { dg-error "-: expected .;. after class definition" "" { target *-*-* } .-1 }
diff --git a/gcc/testsuite/g++.dg/eh/builtin1.C b/gcc/testsuite/g++.dg/eh/builtin1.C
index 4aa70dacd3b..2d04d9c08e8 100644
--- a/gcc/testsuite/g++.dg/eh/builtin1.C
+++ b/gcc/testsuite/g++.dg/eh/builtin1.C
@@ -8,7 +8,7 @@ extern "C" int printf (const char *, ...);
extern void callme (void) throw();
-int
+void
foo (int i)
{
try {
@@ -18,7 +18,7 @@ foo (int i)
}
}
-int
+void
bar (int i)
{
try {
diff --git a/gcc/testsuite/g++.dg/eh/builtin2.C b/gcc/testsuite/g++.dg/eh/builtin2.C
index a92477455e1..0c0bfe6a679 100644
--- a/gcc/testsuite/g++.dg/eh/builtin2.C
+++ b/gcc/testsuite/g++.dg/eh/builtin2.C
@@ -7,7 +7,7 @@ extern "C" int printf (const char *, ...) throw();
extern void callme (void) throw();
-int
+void
foo (int i)
{
try {
@@ -17,7 +17,7 @@ foo (int i)
}
}
-int
+void
bar (int i)
{
try {
diff --git a/gcc/testsuite/g++.dg/eh/builtin3.C b/gcc/testsuite/g++.dg/eh/builtin3.C
index b4a06726305..5b8c62b4b63 100644
--- a/gcc/testsuite/g++.dg/eh/builtin3.C
+++ b/gcc/testsuite/g++.dg/eh/builtin3.C
@@ -5,7 +5,7 @@
extern void callme (void) throw();
-int
+void
bar (int i)
{
try {
diff --git a/gcc/testsuite/g++.dg/eh/pr45569.C b/gcc/testsuite/g++.dg/eh/pr45569.C
index 2c100d2b9bf..4f67770b4a7 100644
--- a/gcc/testsuite/g++.dg/eh/pr45569.C
+++ b/gcc/testsuite/g++.dg/eh/pr45569.C
@@ -7,7 +7,7 @@ _Complex float g ();
void
i (_Complex float);
-float j ()
+void j ()
{
_Complex float x = 0;
try
diff --git a/gcc/testsuite/g++.dg/eh/sighandle.C b/gcc/testsuite/g++.dg/eh/sighandle.C
index 5c4995e7677..c2200024687 100644
--- a/gcc/testsuite/g++.dg/eh/sighandle.C
+++ b/gcc/testsuite/g++.dg/eh/sighandle.C
@@ -12,6 +12,7 @@ void sighandler (int signo, siginfo_t * si, void * uc)
char * dosegv ()
{
* ((volatile int *)0) = 12;
+ return 0;
}
int main ()
diff --git a/gcc/testsuite/g++.dg/eh/unwind2.C b/gcc/testsuite/g++.dg/eh/unwind2.C
index d6181c8c04b..8d98d3fd3d3 100644
--- a/gcc/testsuite/g++.dg/eh/unwind2.C
+++ b/gcc/testsuite/g++.dg/eh/unwind2.C
@@ -46,7 +46,7 @@ namespace
typedef _CharT char_type;
char_type * _M_in_beg;
char_type *eback () { return _M_in_beg; }
- char_type *gptr () {}
+ char_type *gptr () { return 0; }
};
}
namespace std
diff --git a/gcc/testsuite/g++.dg/expr/bitfield11.C b/gcc/testsuite/g++.dg/expr/bitfield11.C
index bab303ef36c..ddf48cb373a 100644
--- a/gcc/testsuite/g++.dg/expr/bitfield11.C
+++ b/gcc/testsuite/g++.dg/expr/bitfield11.C
@@ -6,7 +6,7 @@ struct A
unsigned int a : 1;
};
-bool
+void
foo (A *x, A *y)
{
x->a = y ? y->a : true;
diff --git a/gcc/testsuite/g++.dg/expr/cond12.C b/gcc/testsuite/g++.dg/expr/cond12.C
index 9134f81668f..90d77dbd708 100644
--- a/gcc/testsuite/g++.dg/expr/cond12.C
+++ b/gcc/testsuite/g++.dg/expr/cond12.C
@@ -2,10 +2,16 @@
// { dg-do run }
struct X {
- X& operator=(const X&){}
+ X& operator=(const X&);
X& operator=(X&){__builtin_abort();}
};
+X g;
+X& X::operator=(const X&)
+{
+ return g;
+}
+
int main(int argv,char**) {
X a, b;
((argv > 2) ? a : b) = X();
diff --git a/gcc/testsuite/g++.dg/expr/static_cast7.C b/gcc/testsuite/g++.dg/expr/static_cast7.C
index bced805bcdb..2398bedf1b4 100644
--- a/gcc/testsuite/g++.dg/expr/static_cast7.C
+++ b/gcc/testsuite/g++.dg/expr/static_cast7.C
@@ -1,7 +1,7 @@
// Regression test for bug 39415 (and its duplicate 44916).
struct S {};
struct T : S {};
-int f(const T*) {}
+int f(const T*) { return 0; }
void f(T*);
int main() {
S* s(0);
diff --git a/gcc/testsuite/g++.dg/ext/altivec-14.C b/gcc/testsuite/g++.dg/ext/altivec-14.C
index e5dd81c7553..02e8a23258d 100644
--- a/gcc/testsuite/g++.dg/ext/altivec-14.C
+++ b/gcc/testsuite/g++.dg/ext/altivec-14.C
@@ -5,7 +5,7 @@
void f (__attribute__((altivec (vector__))) signed int * a,
__attribute__((altivec (vector__))) signed int * const b);
-int
+void
foo (void)
{
__attribute__((altivec (vector__))) signed int a[1], b[1];
diff --git a/gcc/testsuite/g++.dg/ext/asm13.C b/gcc/testsuite/g++.dg/ext/asm13.C
index eece05e0fb3..3e1fd4bbe48 100644
--- a/gcc/testsuite/g++.dg/ext/asm13.C
+++ b/gcc/testsuite/g++.dg/ext/asm13.C
@@ -1,6 +1,6 @@
// PR c++/69257
-int fn1() {
+void fn1() {
struct S *x;
__asm ( "": :"" (*x)); // { dg-error "incomplete" }
}
diff --git a/gcc/testsuite/g++.dg/ext/builtin-object-size3.C b/gcc/testsuite/g++.dg/ext/builtin-object-size3.C
index b2a9170fc30..1e158cd6d01 100644
--- a/gcc/testsuite/g++.dg/ext/builtin-object-size3.C
+++ b/gcc/testsuite/g++.dg/ext/builtin-object-size3.C
@@ -5,7 +5,7 @@ void baz (int *, int *);
#define MEMCPY(d,s,l) __builtin___memcpy_chk (d, s, l, __builtin_object_size (d, 0)) // { dg-warning "writing" }
-int
+void
foo ()
{
int *p = new int;
@@ -15,7 +15,7 @@ foo ()
baz (p, q);
}
-int
+void
bar ()
{
int *p = new int;
diff --git a/gcc/testsuite/g++.dg/ext/has_nothrow_assign_odr.C b/gcc/testsuite/g++.dg/ext/has_nothrow_assign_odr.C
index c2e99ef5444..594d3a98672 100644
--- a/gcc/testsuite/g++.dg/ext/has_nothrow_assign_odr.C
+++ b/gcc/testsuite/g++.dg/ext/has_nothrow_assign_odr.C
@@ -1,5 +1,7 @@
// PR c++/36870
// { dg-do run }
+
+
#include <cassert>
struct S { const S& operator= (const S&); };
@@ -9,8 +11,9 @@ bool f ();
int main ()
{
assert (__has_nothrow_assign (S) == f ());
+ return 0;
}
-const S& S::operator= (const S&) { }
+const S& S::operator= (const S&a) { return a; }
bool f () { return __has_nothrow_assign (S); }
diff --git a/gcc/testsuite/g++.dg/ext/label7.C b/gcc/testsuite/g++.dg/ext/label7.C
index e92dccf5df5..d74a66aa224 100644
--- a/gcc/testsuite/g++.dg/ext/label7.C
+++ b/gcc/testsuite/g++.dg/ext/label7.C
@@ -1,7 +1,7 @@
// PR c++/32121
// { dg-do compile }
-int f (void)
+void f (void)
{
a:;
__label__ a; // { dg-error "not at the beginning" }
diff --git a/gcc/testsuite/g++.dg/ext/label8.C b/gcc/testsuite/g++.dg/ext/label8.C
index 1f6175df3a7..edffe35a6c6 100644
--- a/gcc/testsuite/g++.dg/ext/label8.C
+++ b/gcc/testsuite/g++.dg/ext/label8.C
@@ -1,7 +1,7 @@
// PR c++/32121
// { dg-do compile }
-int f (void)
+void f (void)
{
__label__ a, b;
__label__ c;
diff --git a/gcc/testsuite/g++.dg/ext/pr57735.C b/gcc/testsuite/g++.dg/ext/pr57735.C
index a8f7d05712c..d9fc9e4aa5e 100644
--- a/gcc/testsuite/g++.dg/ext/pr57735.C
+++ b/gcc/testsuite/g++.dg/ext/pr57735.C
@@ -2,7 +2,7 @@
/* { dg-require-effective-target arm_arch_v5te_ok } */
/* { dg-require-effective-target arm_arm_ok } */
/* { dg-skip-if "do not override -mfloat-abi" { *-*-* } { "-mfloat-abi=*" } {"-mfloat-abi=soft" } } */
-/* { dg-options "-march=armv5te -marm -mtune=xscale -mfloat-abi=soft -O1" } */
+/* { dg-options "-march=armv5te -marm -mtune=xscale -mfloat-abi=soft -O1 -Wno-return-type" } */
typedef unsigned int size_t;
__extension__
diff --git a/gcc/testsuite/g++.dg/ext/pr81706.C b/gcc/testsuite/g++.dg/ext/pr81706.C
index f0ed8ab6d71..395a81aa8c7 100644
--- a/gcc/testsuite/g++.dg/ext/pr81706.C
+++ b/gcc/testsuite/g++.dg/ext/pr81706.C
@@ -1,8 +1,8 @@
// PR libstdc++/81706
// { dg-do compile { target i?86-*-* x86_64-*-* } }
// { dg-options "-O3 -mavx2 -mno-avx512f" }
-// { dg-final { scan-assembler "call\[^\n\r]_ZGVdN4v_cos" } }
-// { dg-final { scan-assembler "call\[^\n\r]_ZGVdN4v_sin" } }
+// { dg-final { scan-assembler "call\[^\n\r]__?ZGVdN4v_cos" } }
+// { dg-final { scan-assembler "call\[^\n\r]__?ZGVdN4v_sin" } }
#ifdef __cplusplus
extern "C" {
diff --git a/gcc/testsuite/g++.dg/ext/tmplattr7.C b/gcc/testsuite/g++.dg/ext/tmplattr7.C
index ee6c4184768..545e089f1bb 100644
--- a/gcc/testsuite/g++.dg/ext/tmplattr7.C
+++ b/gcc/testsuite/g++.dg/ext/tmplattr7.C
@@ -8,4 +8,4 @@ bool test(const List<int> &);
int i = bar(List<int>());
-bool test(const List<int> &) {}
+bool test(const List<int> &) { return true; }
diff --git a/gcc/testsuite/g++.dg/ext/vector14.C b/gcc/testsuite/g++.dg/ext/vector14.C
index 8e792108fb8..eecff39de47 100644
--- a/gcc/testsuite/g++.dg/ext/vector14.C
+++ b/gcc/testsuite/g++.dg/ext/vector14.C
@@ -8,7 +8,11 @@
#define vector __attribute__((vector_size(16)))
-template<int N> vector signed int foo (vector float value) {}
+template<int N> vector signed int foo (vector float value)
+{
+ vector signed int a;
+ return a;
+}
template<int> void foo (float) {}
@@ -19,4 +23,6 @@ main ()
float f;
foo<1> (v);
foo<1> (f);
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/ext/vector8.C b/gcc/testsuite/g++.dg/ext/vector8.C
index 5f9f9561d7f..49bdc5a5611 100644
--- a/gcc/testsuite/g++.dg/ext/vector8.C
+++ b/gcc/testsuite/g++.dg/ext/vector8.C
@@ -9,7 +9,7 @@ vector int a1;
vector float b;
vector int b1;
-int f(void)
+void f(void)
{
a = a | b; /* { dg-error "" } */
a = a & b; /* { dg-error "" } */
diff --git a/gcc/testsuite/g++.dg/ext/visibility/anon1.C b/gcc/testsuite/g++.dg/ext/visibility/anon1.C
index 0135f931a6f..60a34db6ede 100644
--- a/gcc/testsuite/g++.dg/ext/visibility/anon1.C
+++ b/gcc/testsuite/g++.dg/ext/visibility/anon1.C
@@ -7,5 +7,5 @@
namespace
{
- int f() { }
+ int f() { return 0; }
}
diff --git a/gcc/testsuite/g++.dg/ext/visibility/anon2.C b/gcc/testsuite/g++.dg/ext/visibility/anon2.C
index dcf0e64fa23..7abdd050518 100644
--- a/gcc/testsuite/g++.dg/ext/visibility/anon2.C
+++ b/gcc/testsuite/g++.dg/ext/visibility/anon2.C
@@ -9,4 +9,4 @@ namespace
struct A { };
}
-A f () { }
+A f () { return A(); }
diff --git a/gcc/testsuite/g++.dg/ext/visibility/namespace1.C b/gcc/testsuite/g++.dg/ext/visibility/namespace1.C
index b7773dc9d61..1da1c14d254 100644
--- a/gcc/testsuite/g++.dg/ext/visibility/namespace1.C
+++ b/gcc/testsuite/g++.dg/ext/visibility/namespace1.C
@@ -10,7 +10,7 @@
namespace foo __attribute ((visibility ("hidden")))
{
- int f() { }
+ int f() { return 0; }
void g();
template <typename T> void t() { }
class A
diff --git a/gcc/testsuite/g++.dg/ext/vla16.C b/gcc/testsuite/g++.dg/ext/vla16.C
index c3e6ea1caba..fa51feed8f3 100644
--- a/gcc/testsuite/g++.dg/ext/vla16.C
+++ b/gcc/testsuite/g++.dg/ext/vla16.C
@@ -5,4 +5,6 @@ long fn1() {
const int a = fn1();
int b[a];
int c = *(&b[0] + sizeof(0));
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/gcov/gcov-1.C b/gcc/testsuite/g++.dg/gcov/gcov-1.C
index c279b1452fc..9018b9a3a73 100644
--- a/gcc/testsuite/g++.dg/gcov/gcov-1.C
+++ b/gcc/testsuite/g++.dg/gcov/gcov-1.C
@@ -74,7 +74,7 @@ test_for2 (int m, int n, int o)
return for_temp; /* count(6) */
}
-int
+void
call_for ()
{
for_val1 += test_for1 (0);
diff --git a/gcc/testsuite/g++.dg/gcov/gcov-threads-1.C b/gcc/testsuite/g++.dg/gcov/gcov-threads-1.C
index cc912f9ddf4..b020dd87d4c 100644
--- a/gcc/testsuite/g++.dg/gcov/gcov-threads-1.C
+++ b/gcc/testsuite/g++.dg/gcov/gcov-threads-1.C
@@ -19,6 +19,8 @@ static void *ContentionNoDeadlock_thread(void *start)
for (int32_t i = NR - 1; i >= starti; --i)
pthread_mutex_unlock (&cndMs[i]);
}
+
+ return 0;
}
int main(int argc, char **argv) {
for (unsigned i = 0; i < NR; i++)
diff --git a/gcc/testsuite/g++.dg/goacc/reference.C b/gcc/testsuite/g++.dg/goacc/reference.C
index b000668b1aa..07e6bd42239 100644
--- a/gcc/testsuite/g++.dg/goacc/reference.C
+++ b/gcc/testsuite/g++.dg/goacc/reference.C
@@ -1,4 +1,4 @@
-int
+void
test1 (int &ref)
{
#pragma acc kernels copy (ref)
@@ -7,7 +7,7 @@ test1 (int &ref)
}
}
-int
+void
test2 (int &ref)
{
int b;
diff --git a/gcc/testsuite/g++.dg/gomp/macro-4.C b/gcc/testsuite/g++.dg/gomp/macro-4.C
index 9fc45b143d5..6a69f1a3ca1 100644
--- a/gcc/testsuite/g++.dg/gomp/macro-4.C
+++ b/gcc/testsuite/g++.dg/gomp/macro-4.C
@@ -10,9 +10,9 @@ void bar (void);
void
foo (void)
{
-#pragma omp p // { dg-warning "ignoring #pragma omp _Pragma" }
+#pragma omp p // { dg-warning "-:ignoring #pragma omp _Pragma" }
bar ();
- omp_p // { dg-warning "ignoring #pragma omp _Pragma" }
+ omp_p // { dg-warning "-:ignoring #pragma omp _Pragma" }
bar ();
}
@@ -22,8 +22,8 @@ foo (void)
void
baz (void)
{
-#pragma omp parallel // { dg-warning "ignoring #pragma omp serial" }
+#pragma omp parallel // { dg-warning "-:ignoring #pragma omp serial" }
bar ();
- omp_parallel // { dg-warning "ignoring #pragma omp serial" }
+ omp_parallel // { dg-warning "-:ignoring #pragma omp serial" }
bar ();
}
diff --git a/gcc/testsuite/g++.dg/gomp/pr37189.C b/gcc/testsuite/g++.dg/gomp/pr37189.C
index 31d95f2d2df..a83957ea2a3 100644
--- a/gcc/testsuite/g++.dg/gomp/pr37189.C
+++ b/gcc/testsuite/g++.dg/gomp/pr37189.C
@@ -18,7 +18,7 @@ bar (T &)
{
}
-int
+void
foo ()
{
T t;
diff --git a/gcc/testsuite/g++.dg/gomp/pr39495-1.C b/gcc/testsuite/g++.dg/gomp/pr39495-1.C
index 8563e684fff..dfa7db66d2c 100644
--- a/gcc/testsuite/g++.dg/gomp/pr39495-1.C
+++ b/gcc/testsuite/g++.dg/gomp/pr39495-1.C
@@ -6,7 +6,7 @@
#define INT_MAX __INT_MAX__
#define UINT_MAX (2U * __INT_MAX__ + 1)
-int
+void
foo (void)
{
int i;
@@ -50,7 +50,7 @@ foo (void)
;
}
-int
+void
bar (void)
{
int i;
diff --git a/gcc/testsuite/g++.dg/gomp/pr39495-2.C b/gcc/testsuite/g++.dg/gomp/pr39495-2.C
index c0b4d5dbdee..f8b0f23183c 100644
--- a/gcc/testsuite/g++.dg/gomp/pr39495-2.C
+++ b/gcc/testsuite/g++.dg/gomp/pr39495-2.C
@@ -6,7 +6,7 @@
#define INT_MAX __INT_MAX__
#define UINT_MAX (2U * __INT_MAX__ + 1)
-int
+void
foo (void)
{
int i;
diff --git a/gcc/testsuite/g++.dg/gomp/pr82054.C b/gcc/testsuite/g++.dg/gomp/pr82054.C
index 3c6aa27c7f4..706dd836968 100644
--- a/gcc/testsuite/g++.dg/gomp/pr82054.C
+++ b/gcc/testsuite/g++.dg/gomp/pr82054.C
@@ -10,4 +10,6 @@ a::b ()
{
#pragma omp parallel
;
+
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/graphite/pr41305.C b/gcc/testsuite/g++.dg/graphite/pr41305.C
index 6a30b0e9e39..756b1267288 100644
--- a/gcc/testsuite/g++.dg/graphite/pr41305.C
+++ b/gcc/testsuite/g++.dg/graphite/pr41305.C
@@ -1,5 +1,5 @@
// { dg-do compile }
-// { dg-options "-O3 -floop-interchange -Wno-conversion-null" }
+// { dg-options "-O3 -floop-interchange -Wno-conversion-null -Wno-return-type" }
void __throw_bad_alloc ();
diff --git a/gcc/testsuite/g++.dg/graphite/pr42930.C b/gcc/testsuite/g++.dg/graphite/pr42930.C
index c1150ce9353..e569cea0177 100644
--- a/gcc/testsuite/g++.dg/graphite/pr42930.C
+++ b/gcc/testsuite/g++.dg/graphite/pr42930.C
@@ -1,4 +1,4 @@
-/* { dg-options "-O1 -floop-block" } */
+/* { dg-options "-O1 -floop-block -Wno-return-type" } */
typedef unsigned char byte;
typedef unsigned int uint;
diff --git a/gcc/testsuite/g++.dg/inherit/covariant10.C b/gcc/testsuite/g++.dg/inherit/covariant10.C
index fe5d03c27d3..6c31aaff03d 100644
--- a/gcc/testsuite/g++.dg/inherit/covariant10.C
+++ b/gcc/testsuite/g++.dg/inherit/covariant10.C
@@ -11,12 +11,12 @@ struct c1 {
struct c5 {};
struct c6 : virtual c1 {
- virtual c5* f33() const {}
+ virtual c5* f33() const { return 0; }
};
struct c13 : virtual c5 { };
struct c17 : virtual c6
{
- virtual c13* f33() const {}
+ virtual c13* f33() const { return 0; }
};
diff --git a/gcc/testsuite/g++.dg/inherit/covariant11.C b/gcc/testsuite/g++.dg/inherit/covariant11.C
index acba965f3af..97b8d13cfa5 100644
--- a/gcc/testsuite/g++.dg/inherit/covariant11.C
+++ b/gcc/testsuite/g++.dg/inherit/covariant11.C
@@ -7,19 +7,19 @@
struct c2 { int i; };
struct c1 {
- virtual c2& f8() {}
+ virtual c2& f8() { static c2 a; return a; }
};
struct c3 : c1, c2 {
- virtual c2& f8() {}
+ virtual c2& f8() { static c2 a; return a; }
};
struct c11 : public c1 {
- virtual c3& f8() {}
+ virtual c3& f8() { static c3 a; return a; }
};
struct c15 : virtual c3 {
- virtual c2& f8() {}
+ virtual c2& f8() { static c3 a; return a; }
};
struct c18 : virtual c11 {
diff --git a/gcc/testsuite/g++.dg/inherit/protected1.C b/gcc/testsuite/g++.dg/inherit/protected1.C
index c71be53331f..130b14ad863 100644
--- a/gcc/testsuite/g++.dg/inherit/protected1.C
+++ b/gcc/testsuite/g++.dg/inherit/protected1.C
@@ -9,6 +9,7 @@ template <class X> struct B<A<X> >
{
A<X> a;
a.m_class->m_object;
+ return 0;
}
};
template <class T> class A
@@ -23,6 +24,7 @@ bool
A<T>::operator== (const X &) const
{
B<X>::check;
+ return true;
}
class C
{
diff --git a/gcc/testsuite/g++.dg/init/inline1.C b/gcc/testsuite/g++.dg/init/inline1.C
index f3e323427d3..18d735fd87b 100644
--- a/gcc/testsuite/g++.dg/init/inline1.C
+++ b/gcc/testsuite/g++.dg/init/inline1.C
@@ -3,7 +3,7 @@
struct A {
~A();
- A f(A) { }
+ A f(A) { return A(); }
};
diff --git a/gcc/testsuite/g++.dg/init/new18.C b/gcc/testsuite/g++.dg/init/new18.C
index 45f6e7a095d..5f07aaa40d0 100644
--- a/gcc/testsuite/g++.dg/init/new18.C
+++ b/gcc/testsuite/g++.dg/init/new18.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-O2 -fstrict-aliasing" }
+// { dg-additional-options "-Wno-return-type" }
// This caused an ICE during placement new.
diff --git a/gcc/testsuite/g++.dg/init/pr35878_1.C b/gcc/testsuite/g++.dg/init/pr35878_1.C
index e2fc4933425..7fb3221a327 100644
--- a/gcc/testsuite/g++.dg/init/pr35878_1.C
+++ b/gcc/testsuite/g++.dg/init/pr35878_1.C
@@ -1,7 +1,7 @@
// PR c++/35878
// { dg-do compile }
// { dg-options "-O2 -std=gnu++11 -fdump-tree-optimized" }
-// { dg-final { scan-tree-dump-times "v_\[0-9]+\\(D\\) \[=!]= 0" 1 "optimized" } }
+// { dg-final { scan-tree-dump-not "v_\[0-9]+\\(D\\) \[=!]= 0" "optimized" } }
#include <new>
#include <utility>
diff --git a/gcc/testsuite/g++.dg/init/pr35878_4.C b/gcc/testsuite/g++.dg/init/pr35878_4.C
new file mode 100644
index 00000000000..bd275655a63
--- /dev/null
+++ b/gcc/testsuite/g++.dg/init/pr35878_4.C
@@ -0,0 +1,23 @@
+// PR c++/35878
+// { dg-do compile }
+// { dg-options "-O2 -std=gnu++11 -fcheck-new -fdump-tree-optimized" }
+// { dg-final { scan-tree-dump-times "v_\[0-9]+\\(D\\) \[=!]= 0" 1 "optimized" } }
+
+#include <new>
+#include <utility>
+
+struct s1{
+ int a;
+ int b;
+ int c;
+};
+
+void f1 (s1 * v, s1&& s)
+{
+ new (v) s1(std::move(s));
+}
+
+void f2 (s1 * v, s1&& s)
+{
+ *v = std::move(s);
+}
diff --git a/gcc/testsuite/g++.dg/init/reference2.C b/gcc/testsuite/g++.dg/init/reference2.C
index 42f53742685..903c06496d5 100644
--- a/gcc/testsuite/g++.dg/init/reference2.C
+++ b/gcc/testsuite/g++.dg/init/reference2.C
@@ -5,7 +5,7 @@
// We should we able to diagnostic this without instantiating the template
template <int a1>
-int f()
+void f()
{
typedef int& T;
T a = T(); // { dg-error "value-initialization of reference" }
diff --git a/gcc/testsuite/g++.dg/init/reference3.C b/gcc/testsuite/g++.dg/init/reference3.C
index 8cc5afd6f4c..52ae935ca79 100644
--- a/gcc/testsuite/g++.dg/init/reference3.C
+++ b/gcc/testsuite/g++.dg/init/reference3.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
// This code used to be accepted but it is invalid as there is no
// value initialization of a reference type.
// PR c++/36695
diff --git a/gcc/testsuite/g++.dg/init/switch1.C b/gcc/testsuite/g++.dg/init/switch1.C
index b0b06b748f8..a964a29df58 100644
--- a/gcc/testsuite/g++.dg/init/switch1.C
+++ b/gcc/testsuite/g++.dg/init/switch1.C
@@ -1,4 +1,4 @@
-int f(int c)
+void f(int c)
{
switch (c)
{
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-10.C b/gcc/testsuite/g++.dg/ipa/devirt-10.C
index faab5121695..84120e791f9 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-10.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-10.C
@@ -4,7 +4,7 @@ class wxPaintEvent { };
struct wxDCBase
{
wxDCBase ();
- virtual int GetLayoutDirection() const{}
+ virtual int GetLayoutDirection() const{ return 0; }
virtual void SetLayoutDirection(int){}
};
struct wxWindowDC : public wxDCBase {};
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-13.C b/gcc/testsuite/g++.dg/ipa/devirt-13.C
index fecbbfc93eb..923c98e92eb 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-13.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-13.C
@@ -11,7 +11,8 @@ public:
};
}
class A a, *b=&a;
-main()
+
+int main()
{
return b->foo();
}
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-14.C b/gcc/testsuite/g++.dg/ipa/devirt-14.C
index 1437e7e472b..5efbe68bf03 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-14.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-14.C
@@ -20,7 +20,8 @@ public:
};
}
class B a, *b=&a;
-main()
+
+int main()
{
if (0)
{
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-15.C b/gcc/testsuite/g++.dg/ipa/devirt-15.C
index bf9f2985a34..4a899d4178a 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-15.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-15.C
@@ -30,7 +30,7 @@ m(void)
{
b->foo();
}
-main()
+int main()
{
m();
}
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-16.C b/gcc/testsuite/g++.dg/ipa/devirt-16.C
index 698e3e8ff35..762ecb29fde 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-16.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-16.C
@@ -19,7 +19,7 @@ public:
};
}
class B *b;
-main()
+int main()
{
int c;
if (c)
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-17.C b/gcc/testsuite/g++.dg/ipa/devirt-17.C
index bbbd23c0bad..9edc205ce52 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-17.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-17.C
@@ -20,7 +20,7 @@ public:
}
class B *b;
void get_me_lost (void *);
-main()
+int main()
{
int c;
if (c)
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-18.C b/gcc/testsuite/g++.dg/ipa/devirt-18.C
index 066b775e00c..769e0bbc888 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-18.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-18.C
@@ -19,7 +19,7 @@ public:
};
}
class B *b;
-main()
+int main()
{
if (0)
{
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-19.C b/gcc/testsuite/g++.dg/ipa/devirt-19.C
index fd167b6f373..8277deaabdd 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-19.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-19.C
@@ -3,6 +3,8 @@
a type change. */
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-ipa-cp" } */
+/* { dg-additional-options "-Wno-return-type" } */
+
struct A {
void operator==(const A &);
};
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-21.C b/gcc/testsuite/g++.dg/ipa/devirt-21.C
index a33be4eca04..aa0488df92d 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-21.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-21.C
@@ -12,7 +12,7 @@ class MultiTermDocs : public virtual B {
protected:
A readerTermDocs;
A subReaders;
- virtual B *m_fn1(int *) {}
+ virtual B *m_fn1(int *) { return NULL; }
virtual inline ~MultiTermDocs();
void wrap(void)
{
@@ -33,7 +33,7 @@ MultiTermDocs::~MultiTermDocs() {
B *C::m_fn1(int *) { abort (); }
-main()
+int main()
{
class C c;
}
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-23.C b/gcc/testsuite/g++.dg/ipa/devirt-23.C
index 665e953cae7..15e65b579f7 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-23.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-23.C
@@ -12,7 +12,7 @@ class MultiTermDocs : public virtual B {
protected:
A readerTermDocs;
A subReaders;
- virtual B *m_fn1(int *) {}
+ virtual B *m_fn1(int *) { return NULL; }
virtual inline ~MultiTermDocs();
inline void wrap(void)
{
@@ -41,7 +41,7 @@ MultiTermDocs::~MultiTermDocs() {
B *C::m_fn1(int *) { abort (); }
-main()
+int main()
{
class C c;
}
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-34.C b/gcc/testsuite/g++.dg/ipa/devirt-34.C
index 030e08bba8a..083c305665f 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-34.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-34.C
@@ -10,6 +10,8 @@ t(struct B *b)
{
struct A *a=b;
a->t();
+
+ return 0;
}
/* We should guess that the pointer of type B probably points to an instance
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-38.C b/gcc/testsuite/g++.dg/ipa/devirt-38.C
index 40deb039e7a..fdb5c623533 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-38.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-38.C
@@ -12,7 +12,7 @@ class A : public SnmpSyntax
{
public:
A (int);
- SnmpSyntax *m_fn1 () const {}
+ SnmpSyntax *m_fn1 () const { return 0; }
SnmpSyntax &operator=(const SnmpSyntax &);
};
int a;
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-40.C b/gcc/testsuite/g++.dg/ipa/devirt-40.C
index 5107c290c63..32e0d22c0e7 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-40.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-40.C
@@ -13,10 +13,14 @@ class A
UnicodeString &m_fn1 (UnicodeString &, int &p2, UErrorCode &) const;
};
UnicodeString::UnicodeString () {}
+
+UnicodeString g;
+
UnicodeString &
A::m_fn1 (UnicodeString &, int &p2, UErrorCode &) const
{
UnicodeString a[2];
+ return g;
}
/* { dg-final { scan-tree-dump-not "\\n OBJ_TYPE_REF" "fre3" } } */
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-41.C b/gcc/testsuite/g++.dg/ipa/devirt-41.C
index d8182baae6e..149da911b25 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-41.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-41.C
@@ -13,7 +13,7 @@ test (struct A *a)
__builtin_abort ();
}
-main()
+int main()
{
struct B a;
dostuff (&a);
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-42.C b/gcc/testsuite/g++.dg/ipa/devirt-42.C
index 6fecfe73bec..152b9689dc4 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-42.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-42.C
@@ -16,7 +16,7 @@ A::barbar()
return static_cast<B*>(this)->barbar();
}
-main()
+int main()
{
struct B b;
struct A *a = &b;
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-44.C b/gcc/testsuite/g++.dg/ipa/devirt-44.C
index f69e1aa08ac..5de761412d0 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-44.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-44.C
@@ -2,7 +2,7 @@
/* { dg-options "-O3 -fno-ipa-cp -fdump-ipa-inline-details -fno-early-inlining" } */
struct A {
virtual int foo () {return 1;}
- int wrapfoo () {foo();}
+ void wrapfoo () {foo();}
A() {wrapfoo();}
};
struct B:A {virtual int foo () {return 2;}};
@@ -17,7 +17,7 @@ test (struct A *a)
__builtin_abort ();
}
-main()
+int main()
{
struct B a;
dostuff (&a);
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-45.C b/gcc/testsuite/g++.dg/ipa/devirt-45.C
index 57449b702d3..ce415e7c003 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-45.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-45.C
@@ -2,7 +2,7 @@
/* { dg-options "-O3 -fno-ipa-cp -fdump-ipa-inline-details -fno-early-inlining" } */
struct A {
virtual int foo () {return 1;}
- int wrapfoo () {foo();}
+ void wrapfoo () {foo();}
A() {wrapfoo();}
};
inline void* operator new(__SIZE_TYPE__ s, void* buf) throw() {
@@ -29,7 +29,7 @@ test (struct A *a)
test2(a);
}
-main()
+int main()
{
struct B a;
dostuff (&a);
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-48.C b/gcc/testsuite/g++.dg/ipa/devirt-48.C
index eae93ece71c..1aed85f8642 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-48.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-48.C
@@ -5,14 +5,14 @@ struct A {
};
struct B:A {
virtual int foo(){return 2;}
- int callfoo(){foo();}
+ void callfoo(){foo();}
};
struct C:A {
virtual int foo(){return 3;}
};
struct D:B {
virtual int foo(){return 4;}
- int callfoo(){foo();}
+ void callfoo(){foo();}
};
static void
test (struct A *a)
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-52.C b/gcc/testsuite/g++.dg/ipa/devirt-52.C
index be0ab4c3621..5c736c4037a 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-52.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-52.C
@@ -1,6 +1,7 @@
// PR middle-end/77259
// { dg-do compile { target c++11 } }
// { dg-options "-O2" }
+// { dg-additional-options "-Wno-return-type" }
template <typename, typename = int> class A;
template <typename, typename> struct A
diff --git a/gcc/testsuite/g++.dg/ipa/nothrow-1.C b/gcc/testsuite/g++.dg/ipa/nothrow-1.C
index df2fbae2acc..b30b0215924 100644
--- a/gcc/testsuite/g++.dg/ipa/nothrow-1.C
+++ b/gcc/testsuite/g++.dg/ipa/nothrow-1.C
@@ -11,7 +11,7 @@ int a(void)
{
return *ptr == *ptr;
}
-main()
+int main()
{
int aa;
ptr = &barvar;
diff --git a/gcc/testsuite/g++.dg/ipa/pr43812.C b/gcc/testsuite/g++.dg/ipa/pr43812.C
index cc46eed6501..32d997b927d 100644
--- a/gcc/testsuite/g++.dg/ipa/pr43812.C
+++ b/gcc/testsuite/g++.dg/ipa/pr43812.C
@@ -35,4 +35,4 @@ public:
class LocalSurface : public Surface {
virtual BBox bbox () const;
};
-BBox LocalSurface::bbox () const { }
+BBox LocalSurface::bbox () const { return BBox(); }
diff --git a/gcc/testsuite/g++.dg/ipa/pr44372.C b/gcc/testsuite/g++.dg/ipa/pr44372.C
index 22aa747e435..dbdd865cfb6 100644
--- a/gcc/testsuite/g++.dg/ipa/pr44372.C
+++ b/gcc/testsuite/g++.dg/ipa/pr44372.C
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O -fipa-cp -fipa-cp-clone" } */
+/* { dg-additional-options "-Wno-return-type" } */
template < typename > class S3;
diff --git a/gcc/testsuite/g++.dg/ipa/pr45572-1.C b/gcc/testsuite/g++.dg/ipa/pr45572-1.C
index 82f347052d5..03a299ff422 100644
--- a/gcc/testsuite/g++.dg/ipa/pr45572-1.C
+++ b/gcc/testsuite/g++.dg/ipa/pr45572-1.C
@@ -11,18 +11,22 @@ typedef struct
extern __inline __attribute__ ((__gnu_inline__)) int
fgetc_unlocked (FILE *__fp)
{
+ return 0;
}
extern __inline __attribute__ ((__gnu_inline__)) int
putc_unlocked (int __c, FILE *__stream)
{
+ return 0;
}
extern __inline __attribute__ ((__gnu_inline__)) __ssize_t
getline (char **__lineptr, size_t *__n, FILE *__stream)
{
+ return 0;
}
extern __inline __attribute__ ((__gnu_inline__)) int
ferror_unlocked (FILE *__stream) throw ()
{
+ return 0;
}
}
typedef struct
diff --git a/gcc/testsuite/g++.dg/ipa/pr58371.C b/gcc/testsuite/g++.dg/ipa/pr58371.C
index 00cfbb831fc..cfcf677e3d0 100644
--- a/gcc/testsuite/g++.dg/ipa/pr58371.C
+++ b/gcc/testsuite/g++.dg/ipa/pr58371.C
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O2" } */
+/* { dg-additional-options "-Wno-return-type" } */
typedef int size_t;
diff --git a/gcc/testsuite/g++.dg/ipa/pr59176.C b/gcc/testsuite/g++.dg/ipa/pr59176.C
index d576bc3ba5a..379719708ac 100644
--- a/gcc/testsuite/g++.dg/ipa/pr59176.C
+++ b/gcc/testsuite/g++.dg/ipa/pr59176.C
@@ -33,9 +33,11 @@ class C {
unsigned long C::m_fn1() {
CellHierarchy:
m_fn2().m_fn1();
+ return 0;
}
unsigned long C::m_fn3() {
CellHierarchy:
m_fn2().m_fn1();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/ipa/pr60640-1.C b/gcc/testsuite/g++.dg/ipa/pr60640-1.C
index 7a0b91893f8..2e626126d07 100644
--- a/gcc/testsuite/g++.dg/ipa/pr60640-1.C
+++ b/gcc/testsuite/g++.dg/ipa/pr60640-1.C
@@ -25,7 +25,7 @@ class D : ASN1Object, public B
};
class G : public D
{
- unsigned m_fn1 (bool) const {}
+ unsigned m_fn1 (bool) const { return 0; }
};
class F : A
{
diff --git a/gcc/testsuite/g++.dg/ipa/pr61540.C b/gcc/testsuite/g++.dg/ipa/pr61540.C
index e7dee7262c1..e297fe360dc 100644
--- a/gcc/testsuite/g++.dg/ipa/pr61540.C
+++ b/gcc/testsuite/g++.dg/ipa/pr61540.C
@@ -6,7 +6,7 @@ struct data {
};
struct top {
- virtual int topf() {}
+ virtual int topf() { return 0; }
};
struct intermediate: top {
diff --git a/gcc/testsuite/g++.dg/ipa/pr63470.C b/gcc/testsuite/g++.dg/ipa/pr63470.C
index e6fa73bcd4c..ffef0a284de 100644
--- a/gcc/testsuite/g++.dg/ipa/pr63470.C
+++ b/gcc/testsuite/g++.dg/ipa/pr63470.C
@@ -17,6 +17,7 @@ class FTjackSupport : A
const char **
m_fn2 (int)
{
+ return 0;
}
int _inited;
int *_jackClient;
diff --git a/gcc/testsuite/g++.dg/ipa/pr63587-1.C b/gcc/testsuite/g++.dg/ipa/pr63587-1.C
index cbf872e2969..a727b9e9e4c 100644
--- a/gcc/testsuite/g++.dg/ipa/pr63587-1.C
+++ b/gcc/testsuite/g++.dg/ipa/pr63587-1.C
@@ -61,6 +61,7 @@ I<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::_M_get_insert_unique_pos (
const key_type &p1)
{
_M_impl._M_key_compare (p1, 0);
+ return A<int>();
}
template <typename _Key, typename _Val, typename _KeyOfValue,
typename _Compare, typename _Alloc>
@@ -69,6 +70,7 @@ I<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::_M_get_insert_hint_unique_pos (
H &)
{
_M_get_insert_unique_pos (0);
+ return A<int>();
}
template <typename _Key, typename _Val, typename _KeyOfValue,
typename _Compare, typename _Alloc>
@@ -78,6 +80,7 @@ I<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::_M_emplace_hint_unique (
H p1, _Args &&...)
{
_M_get_insert_hint_unique_pos (p1);
+ return 0;
}
namespace {
struct L;
diff --git a/gcc/testsuite/g++.dg/ipa/pr63587-2.C b/gcc/testsuite/g++.dg/ipa/pr63587-2.C
index a15f17e7ca3..7a9b2454507 100644
--- a/gcc/testsuite/g++.dg/ipa/pr63587-2.C
+++ b/gcc/testsuite/g++.dg/ipa/pr63587-2.C
@@ -1,6 +1,7 @@
// PR ipa/63587
// { dg-do compile { target c++11 } }
// { dg-options "-O2" }
+// { dg-additional-options "-Wno-return-type" }
namespace boost {
class basic_cstring
@@ -22,6 +23,7 @@ template <typename FunctionObj> struct function_obj_invoker0
{
FunctionObj f;
f ();
+ return 0;
}
};
template <typename FunctionObj> struct get_function_obj_invoker0
diff --git a/gcc/testsuite/g++.dg/ipa/pr63838.C b/gcc/testsuite/g++.dg/ipa/pr63838.C
index fb68c9f1ac8..d23b3133748 100644
--- a/gcc/testsuite/g++.dg/ipa/pr63838.C
+++ b/gcc/testsuite/g++.dg/ipa/pr63838.C
@@ -27,7 +27,7 @@ bar (int x)
fn ();
}
-__attribute__((noinline, noclone)) int
+__attribute__((noinline, noclone)) void
baz (int x)
{
S s;
diff --git a/gcc/testsuite/g++.dg/ipa/pr63894.C b/gcc/testsuite/g++.dg/ipa/pr63894.C
index 54409752758..fad02c36acc 100644
--- a/gcc/testsuite/g++.dg/ipa/pr63894.C
+++ b/gcc/testsuite/g++.dg/ipa/pr63894.C
@@ -43,4 +43,5 @@ J::m_fn3 (G *p1)
p1->m_fn1 (0, D (0, D::BOX, 0));
K *d = new (0) K (0, m_fn2 (0));
m_fn3 (d);
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/ipa/pr64068.C b/gcc/testsuite/g++.dg/ipa/pr64068.C
index 95288836c36..008fab1e1e6 100644
--- a/gcc/testsuite/g++.dg/ipa/pr64068.C
+++ b/gcc/testsuite/g++.dg/ipa/pr64068.C
@@ -3,12 +3,12 @@
typedef int PROV_ENUMALGS_EX, PCCRYPT_OID_INFO;
class A {
- int m_fn2();
+ void m_fn2();
virtual bool m_fn1(PCCRYPT_OID_INFO);
};
int fn1();
void fn2();
-int A::m_fn2() { m_fn1(0); }
+void A::m_fn2() { m_fn1(0); }
bool fn3() {
for (;;) {
@@ -27,7 +27,7 @@ public:
B() { fn3(); }
};
class C : A {
- bool m_fn1(PCCRYPT_OID_INFO) { m_fn3(); }
+ bool m_fn1(PCCRYPT_OID_INFO) { m_fn3(); return true; }
int isSupportedByProvider_algId;
PROV_ENUMALGS_EX isSupportedByProvider_outEnumAlgs;
PROV_ENUMALGS_EX isSupportedByProvider_enumAlgs;
diff --git a/gcc/testsuite/g++.dg/ipa/pr64896.C b/gcc/testsuite/g++.dg/ipa/pr64896.C
index 0a78220be8a..7064cd05f9d 100644
--- a/gcc/testsuite/g++.dg/ipa/pr64896.C
+++ b/gcc/testsuite/g++.dg/ipa/pr64896.C
@@ -5,9 +5,9 @@
struct A { int a, b; };
struct B { A c; int d; };
struct C { virtual B fn1 () const; };
-struct D { B fn2 () const; int fn3 () const; C *fn4 () const; };
+struct D { B fn2 () const; void fn3 () const; C *fn4 () const; };
-int
+void
D::fn3 () const
{
fn4 ()->fn1 ();
diff --git a/gcc/testsuite/g++.dg/ipa/pr65002.C b/gcc/testsuite/g++.dg/ipa/pr65002.C
index ac7c66bd19a..b505ac82c54 100644
--- a/gcc/testsuite/g++.dg/ipa/pr65002.C
+++ b/gcc/testsuite/g++.dg/ipa/pr65002.C
@@ -10,12 +10,12 @@ using fastmath::floor;
class A {
public:
A(int, int);
- virtual int m_fn1(float) const;
+ virtual void m_fn1(float) const;
};
class B : A {
public:
B(int, int p2) : A(entity, p2) {}
- int m_fn1(float p1) const { long b(floor(p1)); }
+ void m_fn1(float p1) const { long b(floor(p1)); }
int entity;
};
diff --git a/gcc/testsuite/g++.dg/ipa/pr65008.C b/gcc/testsuite/g++.dg/ipa/pr65008.C
index 29b3a2f161f..84f10bb276c 100644
--- a/gcc/testsuite/g++.dg/ipa/pr65008.C
+++ b/gcc/testsuite/g++.dg/ipa/pr65008.C
@@ -8,7 +8,7 @@ struct A
virtual void foo () {}
};
-static inline int __attribute__ ((always_inline)) call_foo (A *a)
+static inline void __attribute__ ((always_inline)) call_foo (A *a)
{
a->foo ();
}
diff --git a/gcc/testsuite/g++.dg/ipa/pr65465.C b/gcc/testsuite/g++.dg/ipa/pr65465.C
index 436d88f743f..9c7d026b14a 100644
--- a/gcc/testsuite/g++.dg/ipa/pr65465.C
+++ b/gcc/testsuite/g++.dg/ipa/pr65465.C
@@ -4,7 +4,7 @@
struct A {};
struct B { virtual A foo () const; };
struct C { A foo () const; };
-struct D : virtual B { A foo () const {} };
+struct D : virtual B { A foo () const { return A(); } };
struct F : D { virtual int bar () const; };
int F::bar () const { return 0; }
A C::foo () const { return A (); }
diff --git a/gcc/testsuite/g++.dg/ipa/pr66896.C b/gcc/testsuite/g++.dg/ipa/pr66896.C
index 236537a5ea0..841515c846f 100644
--- a/gcc/testsuite/g++.dg/ipa/pr66896.C
+++ b/gcc/testsuite/g++.dg/ipa/pr66896.C
@@ -9,7 +9,7 @@ struct A
int *a;
A ();
~A () { a3 (); }
- int a1 (int * p) { if (!p) f3 (); f2 (p); }
+ void a1 (int * p) { if (!p) f3 (); f2 (p); }
void a3 () { if (*a) a1 (a); }
};
diff --git a/gcc/testsuite/g++.dg/ipa/pr68851.C b/gcc/testsuite/g++.dg/ipa/pr68851.C
index 659e4cdcc4a..73096969eb6 100644
--- a/gcc/testsuite/g++.dg/ipa/pr68851.C
+++ b/gcc/testsuite/g++.dg/ipa/pr68851.C
@@ -8,7 +8,7 @@ public:
};
class A {
public:
- virtual bool isFormControlElement() const {}
+ virtual bool isFormControlElement() const { return false; }
};
class C {
struct D {
@@ -20,10 +20,10 @@ class F {
virtual bool isFormControlElement() const;
};
class G : A, F {
- bool isFormControlElement() const {}
+ bool isFormControlElement() const { return true; }
};
bool C::checkPseudoClass(const D &p1, int &) const {
A &a = *p1.element;
a.isFormControlElement();
- a.isFormControlElement() || a.isFormControlElement();
+ return a.isFormControlElement() || a.isFormControlElement();
}
diff --git a/gcc/testsuite/g++.dg/ipa/pr78211.C b/gcc/testsuite/g++.dg/ipa/pr78211.C
index 8207a6bf7fb..510fca573de 100644
--- a/gcc/testsuite/g++.dg/ipa/pr78211.C
+++ b/gcc/testsuite/g++.dg/ipa/pr78211.C
@@ -1,6 +1,7 @@
// PR lto/78211
// { dg-do compile { target { lto && c++11 } } }
// { dg-options "-fcompare-debug -fno-printf-return-value -flto -fno-use-linker-plugin -O3" }
+// { dg-additional-options "-Wno-return-type" }
namespace std {
typedef __SIZE_TYPE__ size_t;
@@ -37,7 +38,7 @@ namespace __gnu_cxx {
reference operator*() const noexcept { }
};
template<typename _IteratorL, typename _IteratorR, typename _Container>
- inline bool operator!=(const __normal_iterator<_IteratorL, _Container>& __lhs, const __normal_iterator<_IteratorR, _Container>& __rhs) noexcept { }
+ inline bool operator!=(const __normal_iterator<_IteratorL, _Container>& __lhs, const __normal_iterator<_IteratorR, _Container>& __rhs) noexcept { return true; }
}
namespace std {
template<typename _CharT> struct char_traits;
diff --git a/gcc/testsuite/g++.dg/ipa/pr79931.C b/gcc/testsuite/g++.dg/ipa/pr79931.C
index 78f6e03c458..c5f6816a70f 100644
--- a/gcc/testsuite/g++.dg/ipa/pr79931.C
+++ b/gcc/testsuite/g++.dg/ipa/pr79931.C
@@ -21,4 +21,5 @@ NodeImpl *AttrImpl::insertBefore(NodeImpl *newChild, NodeImpl *refChild) {
NodeImpl *oldparent = newChild->getParentNode();
oldparent->removeChild(newChild);
this->getOwnerDocument()->getRanges();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/ipa/pure-const-1.C b/gcc/testsuite/g++.dg/ipa/pure-const-1.C
index a219c713077..61940c670e7 100644
--- a/gcc/testsuite/g++.dg/ipa/pure-const-1.C
+++ b/gcc/testsuite/g++.dg/ipa/pure-const-1.C
@@ -11,7 +11,7 @@ int a(void)
{
return *ptr == *ptr;
}
-main()
+int main()
{
int aa;
ptr = &barvar;
diff --git a/gcc/testsuite/g++.dg/ipa/pure-const-2.C b/gcc/testsuite/g++.dg/ipa/pure-const-2.C
index 9788b8acdd8..6e739de4ade 100644
--- a/gcc/testsuite/g++.dg/ipa/pure-const-2.C
+++ b/gcc/testsuite/g++.dg/ipa/pure-const-2.C
@@ -15,7 +15,7 @@ static int b(void)
{
return a();
}
-main()
+int main()
{
int aa;
ptr = &barvar;
diff --git a/gcc/testsuite/g++.dg/ipa/pure-const-3.C b/gcc/testsuite/g++.dg/ipa/pure-const-3.C
index ff7fe53f335..4cf9a6a9306 100644
--- a/gcc/testsuite/g++.dg/ipa/pure-const-3.C
+++ b/gcc/testsuite/g++.dg/ipa/pure-const-3.C
@@ -21,7 +21,7 @@ static int b(int p)
return a(p+1);
return 1;
}
-main()
+int main()
{
int aa;
ptr = &barvar;
diff --git a/gcc/testsuite/g++.dg/ipa/remref-1.C b/gcc/testsuite/g++.dg/ipa/remref-1.C
index c25c425e9b7..a2c316aa9a9 100644
--- a/gcc/testsuite/g++.dg/ipa/remref-1.C
+++ b/gcc/testsuite/g++.dg/ipa/remref-1.C
@@ -28,7 +28,7 @@ allocate_a ()
a = new A();
}
-main()
+int main()
{
allocate_a();
for (int i=0; i<10000;i++)
diff --git a/gcc/testsuite/g++.dg/ipa/remref-2.C b/gcc/testsuite/g++.dg/ipa/remref-2.C
index 06bc71a5b00..1cece6541c9 100644
--- a/gcc/testsuite/g++.dg/ipa/remref-2.C
+++ b/gcc/testsuite/g++.dg/ipa/remref-2.C
@@ -29,7 +29,7 @@ allocate_a ()
a = new A();
}
-main()
+int main()
{
allocate_a();
for (int i=0; i<10000;i++)
diff --git a/gcc/testsuite/g++.dg/lookup/builtin2.C b/gcc/testsuite/g++.dg/lookup/builtin2.C
index be0a6f65d6c..4649737c3d0 100644
--- a/gcc/testsuite/g++.dg/lookup/builtin2.C
+++ b/gcc/testsuite/g++.dg/lookup/builtin2.C
@@ -14,6 +14,6 @@ namespace std
using ::toupper;
}
-int f () {
+void f () {
std::toupper((signed int)'a');
}
diff --git a/gcc/testsuite/g++.dg/lookup/crash3.C b/gcc/testsuite/g++.dg/lookup/crash3.C
index 5b58e8ed559..1b78dd607f3 100644
--- a/gcc/testsuite/g++.dg/lookup/crash3.C
+++ b/gcc/testsuite/g++.dg/lookup/crash3.C
@@ -4,12 +4,12 @@
typedef __SIZE_TYPE__ size_t;
-struct A { void *operator new(size_t s){} }; // { dg-message "operator new" }
-struct B { void *operator new(size_t s){} }; // { dg-message "operator new" }
+struct A { void *operator new(size_t s){ return 0; } }; // { dg-message "operator new" }
+struct B { void *operator new(size_t s){ return 0; } }; // { dg-message "operator new" }
struct C : A,B {};
-int crash()
+void crash()
{
C *c=new C(); // { dg-error "ambiguous" }
}
diff --git a/gcc/testsuite/g++.dg/lookup/friend20.C b/gcc/testsuite/g++.dg/lookup/friend20.C
index ecdc763ca06..8ef23a67179 100644
--- a/gcc/testsuite/g++.dg/lookup/friend20.C
+++ b/gcc/testsuite/g++.dg/lookup/friend20.C
@@ -12,5 +12,6 @@ template <int = 3> class a
a<>
d ()
{
+ return a<>();
}
};
diff --git a/gcc/testsuite/g++.dg/lookup/pr80891-5.C b/gcc/testsuite/g++.dg/lookup/pr80891-5.C
index ebf64f8758e..e018922d68b 100644
--- a/gcc/testsuite/g++.dg/lookup/pr80891-5.C
+++ b/gcc/testsuite/g++.dg/lookup/pr80891-5.C
@@ -51,6 +51,7 @@ int vf2_subgraph_iso(GraphSmall, GraphLarge, SubGraphIsoMapCallback p3,
IndexMapSmall, IndexMapLarge, VertexOrderSmall,
EdgeEquivalencePredicate, VertexEquivalencePredicate) {
vf2_subgraph_morphism<subgraph_iso>(0, 0, p3, 0, 0, 0, 0, 0);
+ return 0;
}
}
using namespace boost;
diff --git a/gcc/testsuite/g++.dg/lookup/struct2.C b/gcc/testsuite/g++.dg/lookup/struct2.C
index a66f403c291..a63e03dffdc 100644
--- a/gcc/testsuite/g++.dg/lookup/struct2.C
+++ b/gcc/testsuite/g++.dg/lookup/struct2.C
@@ -4,4 +4,4 @@ struct c {};
namespace A {
int c(struct c*req);
}
-int A::c(struct c*req) {}
+int A::c(struct c*req) { return 0; }
diff --git a/gcc/testsuite/g++.dg/lto/20080709_0.C b/gcc/testsuite/g++.dg/lto/20080709_0.C
index 55ae8c9ec2c..c9e81b44e2c 100644
--- a/gcc/testsuite/g++.dg/lto/20080709_0.C
+++ b/gcc/testsuite/g++.dg/lto/20080709_0.C
@@ -4,6 +4,7 @@ class Init {
};
int f(Init *a) {
+ return 0;
}
int main(void){
diff --git a/gcc/testsuite/g++.dg/lto/20080907_0.C b/gcc/testsuite/g++.dg/lto/20080907_0.C
index 9a4552310d9..a423196e7db 100644
--- a/gcc/testsuite/g++.dg/lto/20080907_0.C
+++ b/gcc/testsuite/g++.dg/lto/20080907_0.C
@@ -1,3 +1,5 @@
// { dg-lto-do assemble }
+// { dg-lto-options "-Wno-return-type" }
+
struct Foo { void func (); }; Foo & bar () { } struct Baz { Baz (Baz &); };
Baz dummy() { bar().func(); }
diff --git a/gcc/testsuite/g++.dg/lto/20080915_0.C b/gcc/testsuite/g++.dg/lto/20080915_0.C
index 3789765a964..40c50422857 100644
--- a/gcc/testsuite/g++.dg/lto/20080915_0.C
+++ b/gcc/testsuite/g++.dg/lto/20080915_0.C
@@ -1,4 +1,6 @@
// { dg-lto-do assemble }
+// { dg-lto-options "-Wno-return-type" }
+
struct Foo {
static const int dummy;
@@ -16,7 +18,7 @@ int func(const Bar& b) {
}
struct Baz {
- Bar& operator*() {}
+ Bar& operator*() { static Bar a; return a; }
};
void func1(Baz baz, int i, Bar bar) {
diff --git a/gcc/testsuite/g++.dg/lto/20080916_0.C b/gcc/testsuite/g++.dg/lto/20080916_0.C
index 3c900cd43b5..77494ef689c 100644
--- a/gcc/testsuite/g++.dg/lto/20080916_0.C
+++ b/gcc/testsuite/g++.dg/lto/20080916_0.C
@@ -9,4 +9,6 @@ class ios_base {
};
ios_base& g() {
+ static ios_base a;
+ return a;
}
diff --git a/gcc/testsuite/g++.dg/lto/20081022_0.C b/gcc/testsuite/g++.dg/lto/20081022_0.C
index 219f92d6a96..92d463b1cde 100644
--- a/gcc/testsuite/g++.dg/lto/20081022_0.C
+++ b/gcc/testsuite/g++.dg/lto/20081022_0.C
@@ -6,6 +6,8 @@ f (foo * a)
return a->bar ();
}
+int
main()
{
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/lto/20081023_0.C b/gcc/testsuite/g++.dg/lto/20081023_0.C
index ab3fc36f386..d40ac279b77 100644
--- a/gcc/testsuite/g++.dg/lto/20081023_0.C
+++ b/gcc/testsuite/g++.dg/lto/20081023_0.C
@@ -9,6 +9,8 @@ f (void)
func ();
}
+int
main()
{
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/lto/20081118_0.C b/gcc/testsuite/g++.dg/lto/20081118_0.C
index 7be32b6e2a7..c1f9dfc97a4 100644
--- a/gcc/testsuite/g++.dg/lto/20081118_0.C
+++ b/gcc/testsuite/g++.dg/lto/20081118_0.C
@@ -18,4 +18,5 @@ class foo : public object
int
foo::method(void)
{
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/lto/20081118_1.C b/gcc/testsuite/g++.dg/lto/20081118_1.C
index a1bf08186df..ee870a2eb75 100644
--- a/gcc/testsuite/g++.dg/lto/20081118_1.C
+++ b/gcc/testsuite/g++.dg/lto/20081118_1.C
@@ -18,4 +18,5 @@ bar*
bar::method (void)
{
quxx::method();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/lto/20081120-1_0.C b/gcc/testsuite/g++.dg/lto/20081120-1_0.C
index 6827337787c..03a9740c091 100644
--- a/gcc/testsuite/g++.dg/lto/20081120-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/20081120-1_0.C
@@ -4,6 +4,7 @@ extern "C"
{
extern __inline __attribute__((__gnu_inline__)) int pthread_equal(int, int)
{
+ return 0;
}
}
static __typeof(pthread_equal)
diff --git a/gcc/testsuite/g++.dg/lto/20081120-1_1.C b/gcc/testsuite/g++.dg/lto/20081120-1_1.C
index e7e24a58b76..9c5d9bd6f4c 100644
--- a/gcc/testsuite/g++.dg/lto/20081120-1_1.C
+++ b/gcc/testsuite/g++.dg/lto/20081120-1_1.C
@@ -2,6 +2,7 @@ extern "C"
{
extern __inline __attribute__((__gnu_inline__)) int pthread_equal(int, int)
{
+ return 0;
}
}
static __typeof(pthread_equal)
diff --git a/gcc/testsuite/g++.dg/lto/20081127_1.C b/gcc/testsuite/g++.dg/lto/20081127_1.C
index 6488ac8d797..63ae50c31ec 100644
--- a/gcc/testsuite/g++.dg/lto/20081127_1.C
+++ b/gcc/testsuite/g++.dg/lto/20081127_1.C
@@ -1,3 +1,3 @@
struct Foo { Foo(); };
Foo::Foo() { }
-main() { return 0; }
+int main() { return 0; }
diff --git a/gcc/testsuite/g++.dg/lto/20081217-2_0.C b/gcc/testsuite/g++.dg/lto/20081217-2_0.C
index a47b0b5781b..3bc6ba997a5 100644
--- a/gcc/testsuite/g++.dg/lto/20081217-2_0.C
+++ b/gcc/testsuite/g++.dg/lto/20081217-2_0.C
@@ -1,5 +1,5 @@
struct A {
- virtual int foo() {}
+ virtual int foo() { return 0; }
};
struct B {
virtual int f() {return 1; }
@@ -13,6 +13,7 @@ C::C()
{
}
+int
main()
{
C c;
diff --git a/gcc/testsuite/g++.dg/lto/20090303_0.C b/gcc/testsuite/g++.dg/lto/20090303_0.C
index 88bd6ad9beb..d9ec5a35fed 100644
--- a/gcc/testsuite/g++.dg/lto/20090303_0.C
+++ b/gcc/testsuite/g++.dg/lto/20090303_0.C
@@ -16,6 +16,8 @@ void Test() {
int int_set_;
foobar (&int_set_, &test_ints[j]);
}
+
+int
main()
{
Test();
diff --git a/gcc/testsuite/g++.dg/lto/20090311-1_0.C b/gcc/testsuite/g++.dg/lto/20090311-1_0.C
index 6d403272428..60c002fbfba 100644
--- a/gcc/testsuite/g++.dg/lto/20090311-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/20090311-1_0.C
@@ -26,6 +26,8 @@ struct A {
A a;
extern int foo();
+
+int
main()
{
a.x = 4 + c.x;
diff --git a/gcc/testsuite/g++.dg/lto/20090312_0.C b/gcc/testsuite/g++.dg/lto/20090312_0.C
index b2222c2aa20..579b60e939e 100644
--- a/gcc/testsuite/g++.dg/lto/20090312_0.C
+++ b/gcc/testsuite/g++.dg/lto/20090312_0.C
@@ -5,6 +5,7 @@ extern "C" {
extern JSErrorCallback p;
};
+int
main()
{
if ( x == ONE && p == 0)
diff --git a/gcc/testsuite/g++.dg/lto/20090315_0.C b/gcc/testsuite/g++.dg/lto/20090315_0.C
index 930fb16e5cf..1cae572eeb8 100644
--- a/gcc/testsuite/g++.dg/lto/20090315_0.C
+++ b/gcc/testsuite/g++.dg/lto/20090315_0.C
@@ -6,4 +6,4 @@ struct Foo {
static Foo *foo_;
};
Foo *Foo::foo_;
-main() { return 0; }
+int main() { return 0; }
diff --git a/gcc/testsuite/g++.dg/lto/20091002-1_0.C b/gcc/testsuite/g++.dg/lto/20091002-1_0.C
index c63b079d77b..a59a0cb9505 100644
--- a/gcc/testsuite/g++.dg/lto/20091002-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/20091002-1_0.C
@@ -1,6 +1,6 @@
// { dg-lto-do link }
// { dg-require-effective-target fpic }
-// { dg-lto-options {{-fPIC -flto}} }
+// { dg-lto-options {{-fPIC -flto -Wno-return-type}} }
// { dg-extra-ld-options "-fPIC -r -nostdlib" }
namespace std __attribute__ ((__visibility__ ("default")))
diff --git a/gcc/testsuite/g++.dg/lto/20091002-2_0.C b/gcc/testsuite/g++.dg/lto/20091002-2_0.C
index bf936bf53a3..12a1596b3e2 100644
--- a/gcc/testsuite/g++.dg/lto/20091002-2_0.C
+++ b/gcc/testsuite/g++.dg/lto/20091002-2_0.C
@@ -4,7 +4,7 @@
// { dg-extra-ld-options "-fPIC -r -nostdlib" }
class DataArray {
- int max() const { }
+ int max() const { return 0; }
};
template < class HashItem >
class DataHashTable {
diff --git a/gcc/testsuite/g++.dg/lto/20091002-3_0.C b/gcc/testsuite/g++.dg/lto/20091002-3_0.C
index 4bff78b4937..0d9afc44c28 100644
--- a/gcc/testsuite/g++.dg/lto/20091002-3_0.C
+++ b/gcc/testsuite/g++.dg/lto/20091002-3_0.C
@@ -5,7 +5,7 @@
template < class T >
class DataArray {
- int max() const { }
+ int max() const { return 0; }
};
class Name { };
class DataHashTable {
diff --git a/gcc/testsuite/g++.dg/lto/20091004-1_0.C b/gcc/testsuite/g++.dg/lto/20091004-1_0.C
index 649e35cb80d..536ae46dadf 100644
--- a/gcc/testsuite/g++.dg/lto/20091004-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/20091004-1_0.C
@@ -1,6 +1,6 @@
// { dg-lto-do link }
// { dg-require-effective-target fpic }
-// { dg-lto-options {{-fPIC -O -flto}} }
+// { dg-lto-options {{-fPIC -O -flto -Wno-return-type}} }
typedef double Real;
class Vector {
diff --git a/gcc/testsuite/g++.dg/lto/20091004-2_0.C b/gcc/testsuite/g++.dg/lto/20091004-2_0.C
index cc1204e6f7e..40ef136514b 100644
--- a/gcc/testsuite/g++.dg/lto/20091004-2_0.C
+++ b/gcc/testsuite/g++.dg/lto/20091004-2_0.C
@@ -1,6 +1,6 @@
// { dg-lto-do link }
// { dg-require-effective-target fpic }
-// { dg-lto-options {{-fPIC -O -flto}} }
+// { dg-lto-options {{-fPIC -O -flto -Wno-return-type}} }
typedef double Real;
class Vector {
diff --git a/gcc/testsuite/g++.dg/lto/20091004-3_1.C b/gcc/testsuite/g++.dg/lto/20091004-3_1.C
index 641c7495b4f..f46b8fc6c11 100644
--- a/gcc/testsuite/g++.dg/lto/20091004-3_1.C
+++ b/gcc/testsuite/g++.dg/lto/20091004-3_1.C
@@ -12,5 +12,6 @@ All_Torus_Intersections(void)
VECTOR D;
VLength(len, D);
VLength(len, D);
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/lto/20100721-1_0.C b/gcc/testsuite/g++.dg/lto/20100721-1_0.C
index 09132e5992b..e39184b76d1 100644
--- a/gcc/testsuite/g++.dg/lto/20100721-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/20100721-1_0.C
@@ -1,6 +1,6 @@
/* { dg-lto-do assemble } */
-static inline int __gthread_active_p (void) { }
+static inline int __gthread_active_p (void) { return 0; }
template <int rank, int dim> class Tensor;
template <int dimension> struct G;
template <int dim> class T {
diff --git a/gcc/testsuite/g++.dg/lto/20101010-1_0.C b/gcc/testsuite/g++.dg/lto/20101010-1_0.C
index 6eb40efc868..8f694c78aa7 100644
--- a/gcc/testsuite/g++.dg/lto/20101010-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/20101010-1_0.C
@@ -1,4 +1,5 @@
// { dg-lto-do link }
+// { dg-lto-options "-Wno-return-type" }
typedef long size_t;
template < class, class > struct pair
diff --git a/gcc/testsuite/g++.dg/lto/20101010-2_0.C b/gcc/testsuite/g++.dg/lto/20101010-2_0.C
index c68bcd63238..a26956f7fc7 100644
--- a/gcc/testsuite/g++.dg/lto/20101010-2_0.C
+++ b/gcc/testsuite/g++.dg/lto/20101010-2_0.C
@@ -1,4 +1,5 @@
// { dg-lto-do link }
+// { dg-lto-options "-Wno-return-type" }
typedef int size_t;
template < size_t _Nw > struct _Base_bitset
diff --git a/gcc/testsuite/g++.dg/lto/pr45679-1_0.C b/gcc/testsuite/g++.dg/lto/pr45679-1_0.C
index 349f5c2c1ef..0dc5fd0dcb5 100644
--- a/gcc/testsuite/g++.dg/lto/pr45679-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr45679-1_0.C
@@ -1,5 +1,5 @@
// { dg-lto-do link }
-// { dg-lto-options {{-O3 -Wno-multichar}} }
+// { dg-lto-options {{-O3 -Wno-multichar -Wno-return-type }} }
// { dg-extra-ld-options "-flto -flto-partition=1to1 -r -nostdlib" }
extern "C" {
diff --git a/gcc/testsuite/g++.dg/lto/pr45679-1_1.C b/gcc/testsuite/g++.dg/lto/pr45679-1_1.C
index c5e2db061fb..9aa92fa540e 100644
--- a/gcc/testsuite/g++.dg/lto/pr45679-1_1.C
+++ b/gcc/testsuite/g++.dg/lto/pr45679-1_1.C
@@ -1,4 +1,4 @@
- extern "C" {
+ extern "C" {
typedef struct _IO_FILE FILE;
extern struct _IO_FILE *stderr;
extern int fprintf (FILE *__restrict __stream, __const char *__restrict __format, ...);
diff --git a/gcc/testsuite/g++.dg/lto/pr45679-2_0.C b/gcc/testsuite/g++.dg/lto/pr45679-2_0.C
index 549741902ba..b88ad92f539 100644
--- a/gcc/testsuite/g++.dg/lto/pr45679-2_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr45679-2_0.C
@@ -1,6 +1,7 @@
// { dg-lto-do link }
-// { dg-lto-options {{-O3 -Wno-multichar}} }
+// { dg-lto-options {{-O3 -Wno-multichar -Wno-return-type}} }
// { dg-extra-ld-options "-flto -flto-partition=1to1 -r -nostdlib" }
+
extern "C" {
typedef struct {
union {
diff --git a/gcc/testsuite/g++.dg/lto/pr48042_0.C b/gcc/testsuite/g++.dg/lto/pr48042_0.C
index 00b3428e9f1..4ca1a0cebfe 100644
--- a/gcc/testsuite/g++.dg/lto/pr48042_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr48042_0.C
@@ -10,5 +10,5 @@ class B:public A {
};
int B::x() {
+ return 0;
}
-
diff --git a/gcc/testsuite/g++.dg/lto/pr51650-1_0.C b/gcc/testsuite/g++.dg/lto/pr51650-1_0.C
index 4c9ef3c8c48..1c37e629e1d 100644
--- a/gcc/testsuite/g++.dg/lto/pr51650-1_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr51650-1_0.C
@@ -15,5 +15,6 @@ int
fn ()
{
C::m ();
+ return 0;
}
-int main() {}
+int main() { return 0; }
diff --git a/gcc/testsuite/g++.dg/lto/pr51650-3_0.C b/gcc/testsuite/g++.dg/lto/pr51650-3_0.C
index e84d555a0fb..ff55e4aab4c 100644
--- a/gcc/testsuite/g++.dg/lto/pr51650-3_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr51650-3_0.C
@@ -16,5 +16,6 @@ int
fn ()
{
C::m ();
+ return 0;
}
-int main() {}
+int main() { return 0; }
diff --git a/gcc/testsuite/g++.dg/lto/pr63270_1.C b/gcc/testsuite/g++.dg/lto/pr63270_1.C
index a842e5cfcc6..b5aab3508cf 100644
--- a/gcc/testsuite/g++.dg/lto/pr63270_1.C
+++ b/gcc/testsuite/g++.dg/lto/pr63270_1.C
@@ -48,6 +48,7 @@ namespace v8
};
int PreParser::ParseMemberWithNewPrefixesExpression ( bool * )
{
+ return 0;
}
}
}
diff --git a/gcc/testsuite/g++.dg/lto/pr65193_0.C b/gcc/testsuite/g++.dg/lto/pr65193_0.C
index d778fcabb7f..ceebe51069d 100644
--- a/gcc/testsuite/g++.dg/lto/pr65193_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr65193_0.C
@@ -1,6 +1,6 @@
/* { dg-lto-do link } */
/* { dg-require-effective-target fpic } */
-/* { dg-lto-options {{-fPIC -r -nostdlib -flto -O2 -g}} } */
+/* { dg-lto-options {{-fPIC -r -nostdlib -flto -O2 -g -Wno-return-type}} } */
void frexp (int, int *);
namespace std
diff --git a/gcc/testsuite/g++.dg/lto/pr65302_0.C b/gcc/testsuite/g++.dg/lto/pr65302_0.C
index 2298afd9d96..83e1194fb10 100644
--- a/gcc/testsuite/g++.dg/lto/pr65302_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr65302_0.C
@@ -1,5 +1,5 @@
// { dg-lto-do link }
-// { dg-lto-options { { -flto -O2 } } }
+// { dg-lto-options { { -flto -O2 -Wno-return-type } } }
// { dg-extra-ld-options "-r -nostdlib -O0" }
class CstringStorageReference {
diff --git a/gcc/testsuite/g++.dg/lto/pr65316_0.C b/gcc/testsuite/g++.dg/lto/pr65316_0.C
index cccd0db7d8e..2b9360e29fe 100644
--- a/gcc/testsuite/g++.dg/lto/pr65316_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr65316_0.C
@@ -1,6 +1,7 @@
// { dg-lto-do link }
-// { dg-lto-options { { -flto -std=c++11 -g2 -fno-lto-odr-type-merging -O2 } } }
+// { dg-lto-options { { -flto -std=c++11 -g2 -fno-lto-odr-type-merging -O2 -Wno-return-type } } }
// { dg-extra-ld-options "-r -nostdlib -O2 -fno-lto-odr-type-merging" }
+
namespace std
{
typedef long unsigned int size_t;
diff --git a/gcc/testsuite/g++.dg/lto/pr65475c_0.C b/gcc/testsuite/g++.dg/lto/pr65475c_0.C
index 8e1d8bcc83d..73686918c2c 100644
--- a/gcc/testsuite/g++.dg/lto/pr65475c_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr65475c_0.C
@@ -1,6 +1,7 @@
/* { dg-lto-do link } */
-/* { dg-lto-options "-O2 -w" } */
/* { dg-extra-ld-options { -O2 -Wno-odr -r -nostdlib } } */
+/* { dg-lto-options { "-O2 -w -Wno-return-type" } } */
+
namespace std
{
template < class > struct char_traits;
diff --git a/gcc/testsuite/g++.dg/lto/pr65549_0.C b/gcc/testsuite/g++.dg/lto/pr65549_0.C
index 889cc994a58..d3ed1dca158 100644
--- a/gcc/testsuite/g++.dg/lto/pr65549_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr65549_0.C
@@ -1,5 +1,5 @@
// { dg-lto-do link }
-// { dg-lto-options { { -std=gnu++14 -flto -g } { -std=gnu++14 -flto -g -O2 -fno-inline -flto-partition=max } } }
+// { dg-lto-options { { -std=gnu++14 -flto -g -Wno-return-type } { -std=gnu++14 -flto -g -O2 -fno-inline -flto-partition=max -Wno-return-type } } }
// { dg-extra-ld-options "-r -nostdlib" }
namespace std {
@@ -135,10 +135,12 @@ public:
then(0, [] {});
}
} clients;
-main() {
+int main() {
B app;
app.run(0, 0, [&] {
auto config = app.configuration()[0].as<std::string>();
clients.then([] {});
});
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/lto/pr69077_0.C b/gcc/testsuite/g++.dg/lto/pr69077_0.C
index 6a81d0e3a5c..10cb05917b9 100644
--- a/gcc/testsuite/g++.dg/lto/pr69077_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr69077_0.C
@@ -11,4 +11,4 @@ struct cStdDev
struct cWeightedStdDev : public cStdDev {
virtual int netPack();
};
-int cWeightedStdDev::netPack() { }
+int cWeightedStdDev::netPack() { return 0; }
diff --git a/gcc/testsuite/g++.dg/lto/pr69589_0.C b/gcc/testsuite/g++.dg/lto/pr69589_0.C
index 599d5d44960..c6f72c4090b 100644
--- a/gcc/testsuite/g++.dg/lto/pr69589_0.C
+++ b/gcc/testsuite/g++.dg/lto/pr69589_0.C
@@ -1,5 +1,5 @@
// { dg-lto-do link }
-// { dg-lto-options "-O2 -rdynamic" }
+// { dg-lto-options { "-O2 -rdynamic -Wno-return-type" } }
// { dg-extra-ld-options "-r -nostdlib" }
// { dg-require-effective-target rdynamic }
diff --git a/gcc/testsuite/g++.dg/missing-return.C b/gcc/testsuite/g++.dg/missing-return.C
new file mode 100644
index 00000000000..f7fcfebb2b4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/missing-return.C
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-Wreturn-type -fdump-tree-optimized" } */
+
+int foo(int a)
+{
+} /* { dg-warning "no return statement" } */
+
+/* { dg-final { scan-tree-dump "__builtin_unreachable" "optimized" } } */
diff --git a/gcc/testsuite/g++.dg/opt/20050511-1.C b/gcc/testsuite/g++.dg/opt/20050511-1.C
index a8929030af4..a840fb0db8e 100644
--- a/gcc/testsuite/g++.dg/opt/20050511-1.C
+++ b/gcc/testsuite/g++.dg/opt/20050511-1.C
@@ -53,6 +53,8 @@ UINT32 bar (const C * sPtr)
if (a.xy[0] != ((SINT16) 0xffff << 2))
abort ();
+
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/opt/combine.C b/gcc/testsuite/g++.dg/opt/combine.C
index d01ae78a4bc..a1325a0dca9 100644
--- a/gcc/testsuite/g++.dg/opt/combine.C
+++ b/gcc/testsuite/g++.dg/opt/combine.C
@@ -36,6 +36,7 @@ class QVariant { };
template<typename T> inline T qvariant_cast (const QVariant &v)
{
const int vid = qMetaTypeId<T> ((0)) ;
+ return T();
};
class QScriptContext
{
@@ -70,4 +71,5 @@ QScriptValue QScriptDebuggerBackendPrivate::trace (QScriptContext *context)
{
QScriptValue data = context->callee () ;
QScriptDebuggerBackendPrivate *self = qscriptvalue_cast<QScriptDebuggerBackendPrivate*> (data) ;
+ return QScriptValue();
}
diff --git a/gcc/testsuite/g++.dg/opt/complex3.C b/gcc/testsuite/g++.dg/opt/complex3.C
index 9a3fdf3f0d2..ee1b616e403 100644
--- a/gcc/testsuite/g++.dg/opt/complex3.C
+++ b/gcc/testsuite/g++.dg/opt/complex3.C
@@ -7,7 +7,7 @@ _Complex float g();
_Complex float h()throw();
void i(float)throw();
-float j(void)
+void j(void)
{
_Complex float x = h();
try
diff --git a/gcc/testsuite/g++.dg/opt/const3.C b/gcc/testsuite/g++.dg/opt/const3.C
index c7c0a1645de..969df91e6e1 100644
--- a/gcc/testsuite/g++.dg/opt/const3.C
+++ b/gcc/testsuite/g++.dg/opt/const3.C
@@ -30,8 +30,8 @@ struct A
int i;
-int A::foo1(void *ios, const char *str) { }
-int A::foo2(int v, const Type t) { i=0; }
+int A::foo1(void *ios, const char *str) { return 0; }
+int A::foo2(int v, const Type t) { i=0; return 0; }
int main()
{
diff --git a/gcc/testsuite/g++.dg/opt/covariant1.C b/gcc/testsuite/g++.dg/opt/covariant1.C
index e57cf4c6be0..b8a93177946 100644
--- a/gcc/testsuite/g++.dg/opt/covariant1.C
+++ b/gcc/testsuite/g++.dg/opt/covariant1.C
@@ -9,7 +9,7 @@ bar (int x)
}
struct S { S () {}; virtual ~S () {}; };
-struct T { virtual T *foo (int) {}; };
+struct T { virtual T *foo (int) { return 0; }; };
struct V : virtual S, virtual T {};
struct V v;
struct U : public S, public T
diff --git a/gcc/testsuite/g++.dg/opt/declone3.C b/gcc/testsuite/g++.dg/opt/declone3.C
index 26f3b5214c9..3dd939a961c 100644
--- a/gcc/testsuite/g++.dg/opt/declone3.C
+++ b/gcc/testsuite/g++.dg/opt/declone3.C
@@ -14,4 +14,4 @@ struct Item : Object, virtual A {
}
bool m_fn1();
};
-bool Item::m_fn1() {}
+bool Item::m_fn1() { return true; }
diff --git a/gcc/testsuite/g++.dg/opt/dump1.C b/gcc/testsuite/g++.dg/opt/dump1.C
index f74d0247832..f813044456c 100644
--- a/gcc/testsuite/g++.dg/opt/dump1.C
+++ b/gcc/testsuite/g++.dg/opt/dump1.C
@@ -2,6 +2,7 @@
// { dg-options "-O2 -fno-inline -fdump-final-insns" }
// { dg-do compile { target c++11 } }
// { dg-final cleanup-final-insns-dump }
+// { dg-additional-options "-Wno-return-type" }
namespace std
{
@@ -348,7 +349,7 @@ namespace std __attribute__ ((__visibility__ ("default")))
template<typename _Tp>
inline reference_wrapper<_Tp>
ref(_Tp& __t) noexcept
- {}
+ { return reference_wrapper<_Tp>(); }
template<typename _Tp>
struct _Maybe_wrap_member_pointer
{
diff --git a/gcc/testsuite/g++.dg/opt/inline15.C b/gcc/testsuite/g++.dg/opt/inline15.C
index 5da3a610519..9aa1b73c863 100644
--- a/gcc/testsuite/g++.dg/opt/inline15.C
+++ b/gcc/testsuite/g++.dg/opt/inline15.C
@@ -16,7 +16,7 @@ struct A
};
struct C : public A
{
- virtual int bar () { }
+ virtual int bar () { return 0; }
};
struct D : public C
{
@@ -33,6 +33,7 @@ struct E
static unsigned *
fn2 ()
{
+ return 0;
}
void
diff --git a/gcc/testsuite/g++.dg/opt/local1.C b/gcc/testsuite/g++.dg/opt/local1.C
index 9cecaee6f2a..a63afc24f16 100644
--- a/gcc/testsuite/g++.dg/opt/local1.C
+++ b/gcc/testsuite/g++.dg/opt/local1.C
@@ -8,7 +8,7 @@ struct Outer {
inline void h(const Outer &o)
{
struct Local : public Outer::Inner {
- virtual bool f() {};
+ virtual bool f() { return true; };
};
Local l;
o.g(l);
diff --git a/gcc/testsuite/g++.dg/opt/memcpy1.C b/gcc/testsuite/g++.dg/opt/memcpy1.C
index e2b1dd2cdf0..66411cdda39 100644
--- a/gcc/testsuite/g++.dg/opt/memcpy1.C
+++ b/gcc/testsuite/g++.dg/opt/memcpy1.C
@@ -63,6 +63,7 @@ namespace CS
// the multiplication below to produce a very large number
// in excess of the maximum possible object size (SIZE_MAX/2).
__builtin_memcpy (this->OutP, InP, OutV * sizeof (csVector2)); // { dg-warning "specified size \[0-9\]+ exceeds maximum object size" }
+ return 0;
}
};
}
@@ -79,4 +80,5 @@ csBoxClipper::Clip (csVector2 * InPolygon, size_t InCount,
InPolygon, InCount,
OutPolygon);
uint8 Clipped = boxClip.Clip ();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/opt/new1.C b/gcc/testsuite/g++.dg/opt/new1.C
index dbcc0f8517a..d0f7d95b655 100644
--- a/gcc/testsuite/g++.dg/opt/new1.C
+++ b/gcc/testsuite/g++.dg/opt/new1.C
@@ -15,8 +15,8 @@ namespace QScript {
namespace Ecma {
class Core {
public:
- inline QScriptEnginePrivate *engine() const { }
- inline QScriptClassInfo *classInfo() const { }
+ inline QScriptEnginePrivate *engine() const { return 0; }
+ inline QScriptClassInfo *classInfo() const { return 0; }
QScriptValueImpl publicPrototype;
};
class Boolean: public Core {
diff --git a/gcc/testsuite/g++.dg/opt/nrv8.C b/gcc/testsuite/g++.dg/opt/nrv8.C
index 19999a18824..ca39e7e0f32 100644
--- a/gcc/testsuite/g++.dg/opt/nrv8.C
+++ b/gcc/testsuite/g++.dg/opt/nrv8.C
@@ -20,7 +20,7 @@ A bar()
return l;
}
-main()
+int main()
{
A a = bar ();
diff --git a/gcc/testsuite/g++.dg/opt/pr23299.C b/gcc/testsuite/g++.dg/opt/pr23299.C
index 94a414aa5f7..f14750eb306 100644
--- a/gcc/testsuite/g++.dg/opt/pr23299.C
+++ b/gcc/testsuite/g++.dg/opt/pr23299.C
@@ -6,15 +6,15 @@ extern "C" void abort ();
struct A
{
- virtual int a () {}
+ virtual int a () { return 0; }
};
struct B : public A
{
- virtual int b () {}
+ virtual int b () { return 1; }
};
struct C : public A
{
- virtual int c () {}
+ virtual int c () { return 2; }
};
struct D
{
@@ -50,6 +50,8 @@ E::c ()
if (x > 10)
throw 1;
x |= 2;
+
+ return x;
}
int
diff --git a/gcc/testsuite/g++.dg/opt/pr27826.C b/gcc/testsuite/g++.dg/opt/pr27826.C
index 5e40f1746f9..7de08e08133 100644
--- a/gcc/testsuite/g++.dg/opt/pr27826.C
+++ b/gcc/testsuite/g++.dg/opt/pr27826.C
@@ -5,7 +5,7 @@ struct Geometry
{
int type:16;
};
-struct Geometry get() {};
+struct Geometry get() { return Geometry(); };
int f()
{
struct Geometry test;
diff --git a/gcc/testsuite/g++.dg/opt/pr44919.C b/gcc/testsuite/g++.dg/opt/pr44919.C
index a4aeec822bc..1916a2b359d 100644
--- a/gcc/testsuite/g++.dg/opt/pr44919.C
+++ b/gcc/testsuite/g++.dg/opt/pr44919.C
@@ -1,5 +1,6 @@
// { dg-do compile { target powerpc*-*-* ia64-*-* i?86-*-* x86_64-*-* } }
// { dg-options "-O3 -fselective-scheduling2" }
+// { dg-additional-options "-Wno-return-type" }
namespace std {
@@ -9,7 +10,7 @@ template<typename _Tp> class new_allocator { public: typedef size_t size_type; t
template<typename _Tp> class allocator: public new_allocator<_Tp> { public: typedef size_t size_type; template<typename _Tp1> struct rebind { typedef allocator<_Tp1> other; }; };
class back_insert_iterator { };
-template<typename _Container> back_insert_iterator back_inserter(_Container& __x) { };
+template<typename _Container> back_insert_iterator back_inserter(_Container& __x) { return back_insert_iterator(); };
class vector { };
@@ -99,6 +100,7 @@ namespace internal {
};
template <class K> Object intersection( const typename K::Segment_2 &seg, const typename K::Iso_rectangle_2 &iso, const K&) {
typedef Segment_2_Iso_rectangle_2_pair<K> is_t; is_t ispair(&seg, &iso); switch (ispair.intersection_type()) { }
+ return Object();
}
template <class K> typename Segment_2_Iso_rectangle_2_pair<K>::Intersection_results Segment_2_Iso_rectangle_2_pair<K>::intersection_type() const {
typedef typename K::RT RT;
diff --git a/gcc/testsuite/g++.dg/opt/pr46640.C b/gcc/testsuite/g++.dg/opt/pr46640.C
index 1c25778e8da..b55afd4937e 100644
--- a/gcc/testsuite/g++.dg/opt/pr46640.C
+++ b/gcc/testsuite/g++.dg/opt/pr46640.C
@@ -7,6 +7,7 @@ struct QBasicAtomicInt
bool deref ()
{
asm volatile ("":"=m" (i), "=qm" (j));
+ return true;
}
};
@@ -41,4 +42,6 @@ bool makeDir (unsigned len)
return false;
i = pos;
}
+
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/opt/pr47615.C b/gcc/testsuite/g++.dg/opt/pr47615.C
index f8dbcf7e824..bc467b0c883 100644
--- a/gcc/testsuite/g++.dg/opt/pr47615.C
+++ b/gcc/testsuite/g++.dg/opt/pr47615.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-O -fstrict-aliasing -ftree-pre -fno-tree-fre -fno-tree-sra" }
+// { dg-additional-options "-Wno-return-type" }
typedef __SIZE_TYPE__ size_t;
namespace std
@@ -434,7 +435,7 @@ template < typename Const_Node_Iterator, typename Node_Iterator, typename, typen
return ((m_p_nd->m_p_right));
}
bool operator == (bin_search_tree_const_node_it_)
- {}
+ { return true; }
node_pointer m_p_nd;
};
template
@@ -704,7 +705,7 @@ typedef
int,
null_mapped_type,
less < int >, rb_tree_tag, tree_order_statistics_node_update > set_t;
-main ()
+int main ()
{
set_t s;
s.insert (12);
diff --git a/gcc/testsuite/g++.dg/opt/pr55329.C b/gcc/testsuite/g++.dg/opt/pr55329.C
index 3646785d12e..28a8a4d72fa 100644
--- a/gcc/testsuite/g++.dg/opt/pr55329.C
+++ b/gcc/testsuite/g++.dg/opt/pr55329.C
@@ -19,10 +19,10 @@ struct A
int *a;
A ();
~A () { a3 (); }
- int a1 (int * p) { if (!p) f3 (); f2 (p); }
+ int a1 (int * p) { if (!p) f3 (); f2 (p); return 0; }
int *a2 ();
void a3 () { if (*a) a1 (a); }
- int a4 (int x) { if (*a) f4 (); *a2 () += x; }
+ int a4 (int x) { if (*a) f4 (); *a2 () += x; return 0; }
};
struct B : A
diff --git a/gcc/testsuite/g++.dg/opt/pr61456.C b/gcc/testsuite/g++.dg/opt/pr61456.C
index 14a118b5720..51a1e618cb3 100644
--- a/gcc/testsuite/g++.dg/opt/pr61456.C
+++ b/gcc/testsuite/g++.dg/opt/pr61456.C
@@ -14,6 +14,7 @@ typedef decltype (&Funcs::f1) pfunc;
static int Set (Funcs * f, const pfunc & fp)
{
(f->*fp) ();
+ return 0;
}
void
diff --git a/gcc/testsuite/g++.dg/opt/pr65003.C b/gcc/testsuite/g++.dg/opt/pr65003.C
index 5d131afaedb..333cdbcc477 100644
--- a/gcc/testsuite/g++.dg/opt/pr65003.C
+++ b/gcc/testsuite/g++.dg/opt/pr65003.C
@@ -26,6 +26,6 @@ struct F : D
F (int &, const int &, const A &);
bool foo (int &, bool) const;
};
-bool D::foo (int &, bool) const {}
+bool D::foo (int &, bool) const { return true; }
F::F (int &, const int &, const A &) {}
-bool F::foo (int &, bool) const {}
+bool F::foo (int &, bool) const { return false; }
diff --git a/gcc/testsuite/g++.dg/opt/pr65554.C b/gcc/testsuite/g++.dg/opt/pr65554.C
index 1519964d1fe..cae2beafe02 100644
--- a/gcc/testsuite/g++.dg/opt/pr65554.C
+++ b/gcc/testsuite/g++.dg/opt/pr65554.C
@@ -24,6 +24,7 @@ namespace std
{
int _Num = p2 - p1;
__builtin_memmove (0, p1, sizeof (_Tp) * _Num);
+ return 0;
}
};
template <int, typename _II, typename _OI> void __copy_move_a (_II p1, _II p2, _OI p3)
@@ -68,6 +69,7 @@ namespace std
template <typename _InputIterator, typename _ForwardIterator> static _ForwardIterator __uninit_copy (_InputIterator p1, _InputIterator p2, _ForwardIterator p3)
{
copy (p1, p2, p3);
+ return _ForwardIterator();
}
};
template <typename _InputIterator, typename _ForwardIterator> void
diff --git a/gcc/testsuite/g++.dg/opt/pr69432.C b/gcc/testsuite/g++.dg/opt/pr69432.C
index 1f23f2cedd6..d024ece4cba 100644
--- a/gcc/testsuite/g++.dg/opt/pr69432.C
+++ b/gcc/testsuite/g++.dg/opt/pr69432.C
@@ -26,7 +26,7 @@ void f4 (S, U);
struct C
{
template <typename S, typename T, typename U>
- static S f5 (S x, T y, U z) { f2 (x, y, z); }
+ static S f5 (S x, T y, U z) { f2 (x, y, z); return S(); }
};
template <typename S, typename T, typename U>
diff --git a/gcc/testsuite/g++.dg/opt/pr78373.C b/gcc/testsuite/g++.dg/opt/pr78373.C
index 9ceef1cc732..b332691ec0b 100644
--- a/gcc/testsuite/g++.dg/opt/pr78373.C
+++ b/gcc/testsuite/g++.dg/opt/pr78373.C
@@ -12,11 +12,13 @@ struct D : B {
static int m_fn3(int, int, int, A) {
D &self = singleton;
self.m_fn2();
+ return 0;
}
static D singleton;
};
template <typename, typename> struct C { bool m_fn4() const; };
template <typename Base, typename Traits> bool C<Base, Traits>::m_fn4() const {
Traits::m_fn3(0, 0, 0, Base::singleton);
+ return true;
}
template struct C<A, D>;
diff --git a/gcc/testsuite/g++.dg/opt/pr79267.C b/gcc/testsuite/g++.dg/opt/pr79267.C
index 177eee6c6f7..69dc6cb0251 100644
--- a/gcc/testsuite/g++.dg/opt/pr79267.C
+++ b/gcc/testsuite/g++.dg/opt/pr79267.C
@@ -45,12 +45,12 @@ D bar ();
template <typename T> struct L
{
struct K { K (int); void operator() (int *) { bar ().q (); } };
- static J<T> bp () { bq (0); }
+ static J<T> bp () { bq (0); return J<T>(); }
template <typename br> static void bq (br) { J<T> (0, K (0)); }
};
struct F
{
- virtual J<int> x (int) { foo (0, 0, 0); J<bs<> > (L<bs<> >::bp ()); }
+ virtual J<int> x (int) { foo (0, 0, 0); J<bs<> > (L<bs<> >::bp ()); return J<int>(); }
};
void
diff --git a/gcc/testsuite/g++.dg/opt/pr82159-2.C b/gcc/testsuite/g++.dg/opt/pr82159-2.C
index f153c29ddac..c9797105562 100644
--- a/gcc/testsuite/g++.dg/opt/pr82159-2.C
+++ b/gcc/testsuite/g++.dg/opt/pr82159-2.C
@@ -1,6 +1,7 @@
// PR c++/82159
// { dg-do compile }
// { dg-options "" }
+// { dg-additional-options "-Wno-return-type" }
template <typename T> struct D { T e; };
struct F : D<int[0]> {
diff --git a/gcc/testsuite/g++.dg/opt/pr82929.C b/gcc/testsuite/g++.dg/opt/pr82929.C
new file mode 100644
index 00000000000..572f4914815
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/pr82929.C
@@ -0,0 +1,30 @@
+// PR tree-optimization/82929
+// { dg-do compile }
+// { dg-options "-O2" }
+
+template <int _Nw> struct A {
+ long _M_w[_Nw];
+ void m_fn1(A p1) {
+ for (int a = 0;; a++)
+ _M_w[a] &= p1._M_w[a];
+ }
+ void m_fn2() {
+ for (int b = 0; b < _Nw; b++)
+ _M_w[b] = ~_M_w[b];
+ }
+};
+template <int _Nb> struct C : A<_Nb / (8 * 8)> {
+ void operator&=(C p1) { this->m_fn1(p1); }
+ C m_fn3() {
+ this->m_fn2();
+ return *this;
+ }
+ C operator~() { return C(*this).m_fn3(); }
+};
+struct B {
+ C<192> Value;
+};
+void fn1(C<192> &p1) {
+ B c;
+ p1 &= ~c.Value;
+}
diff --git a/gcc/testsuite/g++.dg/other/array3.C b/gcc/testsuite/g++.dg/other/array3.C
index ce3641e8ccd..3e6f7d1ad07 100644
--- a/gcc/testsuite/g++.dg/other/array3.C
+++ b/gcc/testsuite/g++.dg/other/array3.C
@@ -10,5 +10,6 @@ extern unsigned char xvalue_store[];
bool reserve (int want)
{
new unsigned char[want];
+ return true;
}
unsigned char xvalue_store[257];
diff --git a/gcc/testsuite/g++.dg/other/copy2.C b/gcc/testsuite/g++.dg/other/copy2.C
index 335cab8d19f..df480f06ecc 100644
--- a/gcc/testsuite/g++.dg/other/copy2.C
+++ b/gcc/testsuite/g++.dg/other/copy2.C
@@ -10,11 +10,15 @@ class A
{
public:
int i;
- A &operator =(const A &i)
- {
+ A &operator =(const A &i);
+};
+
+A a;
+
+A& A::operator=(const A &i) {
status = 0;
+ return a;
}
-};
class B
{
diff --git a/gcc/testsuite/g++.dg/other/crash-5.C b/gcc/testsuite/g++.dg/other/crash-5.C
index 25a70b7df36..81e5bac8a25 100644
--- a/gcc/testsuite/g++.dg/other/crash-5.C
+++ b/gcc/testsuite/g++.dg/other/crash-5.C
@@ -9,7 +9,7 @@ struct int_less_than {};
void assert_fail (const char*);
-int f(const set<int, int_less_than>&)
+void f(const set<int, int_less_than>&)
{
assert_fail (__PRETTY_FUNCTION__);
diff --git a/gcc/testsuite/g++.dg/other/crash-8.C b/gcc/testsuite/g++.dg/other/crash-8.C
index dcae7da76c7..b7f56fc9bd7 100644
--- a/gcc/testsuite/g++.dg/other/crash-8.C
+++ b/gcc/testsuite/g++.dg/other/crash-8.C
@@ -97,6 +97,8 @@ public:
mapped_type& operator[](const key_type& __k) {
insert(value_type(__k, mapped_type()));
+ static mapped_type a;
+ return a;
}
};
diff --git a/gcc/testsuite/g++.dg/other/error34.C b/gcc/testsuite/g++.dg/other/error34.C
index f5ced1adfdc..f6b44888f48 100644
--- a/gcc/testsuite/g++.dg/other/error34.C
+++ b/gcc/testsuite/g++.dg/other/error34.C
@@ -2,5 +2,5 @@
// { dg-do compile }
// { dg-options "" }
-S () : str(__PRETTY_FUNCTION__) {} // { dg-error "forbids declaration" "decl" }
+S () : str(__PRETTY_FUNCTION__) { return S(); } // { dg-error "forbids declaration" "decl" }
// { dg-error "only constructors" "constructor" { target *-*-* } .-1 }
diff --git a/gcc/testsuite/g++.dg/other/i386-8.C b/gcc/testsuite/g++.dg/other/i386-8.C
index cf833a524de..a9465ef12bc 100644
--- a/gcc/testsuite/g++.dg/other/i386-8.C
+++ b/gcc/testsuite/g++.dg/other/i386-8.C
@@ -19,4 +19,6 @@ foo (float *x, short *y)
__m64 c = _mm_cvtps_pi16 (b);
__builtin_memcpy (y, &c, sizeof (short) * 4);
y[0] = bar (y[0]);
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/other/pr22003.C b/gcc/testsuite/g++.dg/other/pr22003.C
index 222ccac7beb..521e3f9b305 100644
--- a/gcc/testsuite/g++.dg/other/pr22003.C
+++ b/gcc/testsuite/g++.dg/other/pr22003.C
@@ -21,5 +21,6 @@ struct c3 : c1, c2
c4* c3::func()
{
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/other/pr24623.C b/gcc/testsuite/g++.dg/other/pr24623.C
index 480bb39b63a..2a337756c2a 100644
--- a/gcc/testsuite/g++.dg/other/pr24623.C
+++ b/gcc/testsuite/g++.dg/other/pr24623.C
@@ -34,6 +34,8 @@ RefCountPointer (T * p = 0):_p (p)
if (_p != 0)
_p->decrementRefCount ();
}
+ static RefCountPointer a;
+ return a;
}
~RefCountPointer ()
{
diff --git a/gcc/testsuite/g++.dg/other/pr29610.C b/gcc/testsuite/g++.dg/other/pr29610.C
index 6566fb9a204..d68f4a15a07 100644
--- a/gcc/testsuite/g++.dg/other/pr29610.C
+++ b/gcc/testsuite/g++.dg/other/pr29610.C
@@ -6,7 +6,7 @@ struct __normal_iterator
typedef int*const *_Iterator;
int*const * _M_current;
__normal_iterator(const _Iterator& __i) : _M_current(__i){}
- const _Iterator& base() const {}
+ const _Iterator& base() const { static _Iterator a; return a; }
};
struct string { ~string(){} };
struct vector
@@ -26,9 +26,10 @@ inline int Painter::for_each(vector &layout, SliceWindowFunc func)
{
for (unsigned int window = 0; window < layout.size();++window)
(this->*func)();
+ return 0;
}
int t;
-int Painter::redraw_window(void) {t = 1;}
+int Painter::redraw_window(void) {t = 1; return 0; }
string t2(int);
vector *g(const string&);
void Painter::tcl_command(void)
diff --git a/gcc/testsuite/g++.dg/other/pr42645-1.C b/gcc/testsuite/g++.dg/other/pr42645-1.C
index 5dc76f9efd9..df93645721e 100644
--- a/gcc/testsuite/g++.dg/other/pr42645-1.C
+++ b/gcc/testsuite/g++.dg/other/pr42645-1.C
@@ -13,7 +13,7 @@ struct S
T *t3;
} t;
int m1 () const { return t.t3[0].t1; }
- char *m2 () { foo (); }
+ char *m2 () { foo (); return 0; }
void m3 (int x) { char *m = m2 (); if (m1 () > 0 && x > 0); }
void m4 () { if (m1 () > 0) for (int i = 0; i < 4; i++) t.t2[i] = 0; }
};
diff --git a/gcc/testsuite/g++.dg/other/pr42645-2.C b/gcc/testsuite/g++.dg/other/pr42645-2.C
index 67632e51d91..20cce80f217 100644
--- a/gcc/testsuite/g++.dg/other/pr42645-2.C
+++ b/gcc/testsuite/g++.dg/other/pr42645-2.C
@@ -8,7 +8,7 @@ struct C
C ();
};
-static inline C *foo () {}
+static inline C *foo () { return 0; }
extern void f4 ();
@@ -16,6 +16,7 @@ static inline int
f3 ()
{
f4 ();
+ return 0;
}
static inline void
diff --git a/gcc/testsuite/g++.dg/other/pr52048.C b/gcc/testsuite/g++.dg/other/pr52048.C
index 6bf51f1e288..cf388a5a045 100644
--- a/gcc/testsuite/g++.dg/other/pr52048.C
+++ b/gcc/testsuite/g++.dg/other/pr52048.C
@@ -1,6 +1,7 @@
// PR debug/52048
// { dg-do compile }
// { dg-options "-fcompare-debug -fnon-call-exceptions -fno-tree-dominator-opts -O2" }
+// { dg-additional-options "-Wno-return-type" }
template <typename T> struct A;
template <typename T>
diff --git a/gcc/testsuite/g++.dg/other/typedef3.C b/gcc/testsuite/g++.dg/other/typedef3.C
index 8ead5b84520..ed9aca2cf53 100644
--- a/gcc/testsuite/g++.dg/other/typedef3.C
+++ b/gcc/testsuite/g++.dg/other/typedef3.C
@@ -8,5 +8,5 @@ struct XalanCProcessor
ParseOptionType getParseOption(void);
};
typedef XalanCProcessor::ParseOptionType ParseOptionType;
-ParseOptionType XalanCProcessor::getParseOption(void) {}
+ParseOptionType XalanCProcessor::getParseOption(void) { return ParseOptionType(); }
diff --git a/gcc/testsuite/g++.dg/overload/addr1.C b/gcc/testsuite/g++.dg/overload/addr1.C
index 25856a20fc6..4eb9e2f31ca 100644
--- a/gcc/testsuite/g++.dg/overload/addr1.C
+++ b/gcc/testsuite/g++.dg/overload/addr1.C
@@ -11,7 +11,7 @@ static int flag = 0;
template <typename> struct A
{
- A &active () { flag++;}
+ A &active () { flag++; static A a; return a; }
static void foo() {}
diff --git a/gcc/testsuite/g++.dg/overload/defarg4.C b/gcc/testsuite/g++.dg/overload/defarg4.C
index 3fa0751b7c0..65ad5821aa3 100644
--- a/gcc/testsuite/g++.dg/overload/defarg4.C
+++ b/gcc/testsuite/g++.dg/overload/defarg4.C
@@ -6,11 +6,11 @@ class foo
{
template<typename U>
static bool func(const U& x)
- {}
+ { return true; }
public:
template<typename U>
unsigned int Find(const U& x, bool (*pFunc) (const U&) = func) const
- {}
+ { return 0; }
};
class bar {
@@ -22,5 +22,6 @@ protected:
bool bar::Initialize()
{
b.Find(b);
+ return false;
}
diff --git a/gcc/testsuite/g++.dg/overload/operator5.C b/gcc/testsuite/g++.dg/overload/operator5.C
index 329775aa872..b90b0fd912e 100644
--- a/gcc/testsuite/g++.dg/overload/operator5.C
+++ b/gcc/testsuite/g++.dg/overload/operator5.C
@@ -8,7 +8,7 @@ class String {
operator UnspecifiedBoolTypeA() const;
operator UnspecifiedBoolTypeB() const;
};
-inline bool equalIgnoringCase(const String& a, const String& b) { }
+inline bool equalIgnoringCase(const String& a, const String& b) { return true; }
inline bool equalPossiblyIgnoringCase(const String& a,
const String& b,
bool ignoreCase) {
diff --git a/gcc/testsuite/g++.dg/overload/ref-conv1.C b/gcc/testsuite/g++.dg/overload/ref-conv1.C
index 7e141141264..1c525fbc782 100644
--- a/gcc/testsuite/g++.dg/overload/ref-conv1.C
+++ b/gcc/testsuite/g++.dg/overload/ref-conv1.C
@@ -1,4 +1,5 @@
// PR c++/50442
+// { dg-additional-options "-Wno-return-type" }
template <typename T> struct MoveRef { operator T& () {} };
template <typename T> MoveRef <T> Move(T&) {}
diff --git a/gcc/testsuite/g++.dg/overload/template5.C b/gcc/testsuite/g++.dg/overload/template5.C
index 8ff1b3d657c..902684059a8 100644
--- a/gcc/testsuite/g++.dg/overload/template5.C
+++ b/gcc/testsuite/g++.dg/overload/template5.C
@@ -6,7 +6,7 @@ int low(T a, T b, T c) { return a + b + c; } // { dg-message "template" }
template<typename T>
int high(T a, T b, T c) { return a + b + c; } // { dg-message "template" }
-int test (void)
+void test (void)
{
low (5, 6); // { dg-error "no matching function" }
// { dg-message "(candidate|3 arguments, 2 provided)" "" { target *-*-* } .-1 }
diff --git a/gcc/testsuite/g++.dg/parse/crash40.C b/gcc/testsuite/g++.dg/parse/crash40.C
index 537cdb78ef1..67322f3eb11 100644
--- a/gcc/testsuite/g++.dg/parse/crash40.C
+++ b/gcc/testsuite/g++.dg/parse/crash40.C
@@ -15,12 +15,12 @@ class AA
struct BB : AA {};
class AAA {
- int get() const {}
+ int get() const { return 0; }
};
struct BBB {
static BBB *foo();
private:
- int get() const {} /* { dg-message "private" } */
+ int get() const { return 1; } /* { dg-message "private" } */
};
template<bool> struct S {
S(unsigned int = BBB::foo()->AAA::get()); /* { dg-error "is not a base of" } */
diff --git a/gcc/testsuite/g++.dg/parse/crash61.C b/gcc/testsuite/g++.dg/parse/crash61.C
index 790df0e4bc4..b2004b85173 100644
--- a/gcc/testsuite/g++.dg/parse/crash61.C
+++ b/gcc/testsuite/g++.dg/parse/crash61.C
@@ -1,5 +1,6 @@
// PR c++/56241
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
struct pair { constexpr pair (const) : }; // { dg-error "" }
template <0> make_pair () {} // { dg-error "" }
diff --git a/gcc/testsuite/g++.dg/parse/crash67.C b/gcc/testsuite/g++.dg/parse/crash67.C
index 51773ccef53..0befc9e2457 100644
--- a/gcc/testsuite/g++.dg/parse/crash67.C
+++ b/gcc/testsuite/g++.dg/parse/crash67.C
@@ -1,4 +1,5 @@
// PR c++/79414
+// { dg-additional-options "-Wno-return-type" }
class x0;
template <x1> x2() { // { dg-error "declared|type" }
diff --git a/gcc/testsuite/g++.dg/parse/ctor5.C b/gcc/testsuite/g++.dg/parse/ctor5.C
index f980b4a184f..917bfb06c34 100644
--- a/gcc/testsuite/g++.dg/parse/ctor5.C
+++ b/gcc/testsuite/g++.dg/parse/ctor5.C
@@ -1,4 +1,5 @@
// PR c++/27309
+// { dg-additional-options "-Wno-return-type" }
struct A
{
diff --git a/gcc/testsuite/g++.dg/parse/defarg4.C b/gcc/testsuite/g++.dg/parse/defarg4.C
index bafdadb9429..151f6c5f668 100644
--- a/gcc/testsuite/g++.dg/parse/defarg4.C
+++ b/gcc/testsuite/g++.dg/parse/defarg4.C
@@ -19,6 +19,7 @@ struct R
int Foo ()
{
R s (1);
+ return 0;
}
template <typename T> struct Q
@@ -29,4 +30,5 @@ template <typename T> struct Q
int Foo (Q<int> *s)
{
s->Foo (1);
+ return 1;
}
diff --git a/gcc/testsuite/g++.dg/parse/defarg6.C b/gcc/testsuite/g++.dg/parse/defarg6.C
index 827b605b77e..f73c2746cec 100644
--- a/gcc/testsuite/g++.dg/parse/defarg6.C
+++ b/gcc/testsuite/g++.dg/parse/defarg6.C
@@ -6,6 +6,6 @@
namespace sc_dt {
class sc_length_param {
- friend int compare_unsigned(int if_v_signed = 0) {}
+ friend int compare_unsigned(int if_v_signed = 0) { return 0; }
};
}
diff --git a/gcc/testsuite/g++.dg/parse/error5.C b/gcc/testsuite/g++.dg/parse/error5.C
index d14a47664b8..576ae9fd09b 100644
--- a/gcc/testsuite/g++.dg/parse/error5.C
+++ b/gcc/testsuite/g++.dg/parse/error5.C
@@ -1,19 +1,19 @@
// PR c++/13269
// { dg-options "-fshow-column" }
+// { dg-additional-options "-Wno-return-type" }
class Foo { int foo() return 0; } };
-// { dg-error "30:expected identifier before numeric constant" "identifier" { target *-*-* } 4 }
+// { dg-error "30:expected identifier before numeric constant" "identifier" { target *-*-* } 5 }
-// { dg-error "23:named return values are no longer supported" "named return" { target *-*-* } 4 }
+// { dg-error "23:named return values are no longer supported" "named return" { target *-*-* } 5 }
// the column number info of this error output is still wrong because the error
// message has been generated by cp_parser_error() which does not
// necessarily allow accurate column number display. At some point, we will
// need make cp_parser_error() report more accurate column numbers.
-// { dg-error "30:expected '\{' at end of input" "brace" { target *-*-* } 4 }
+// { dg-error "30:expected '\{' at end of input" "brace" { target *-*-* } 5 }
-// { dg-error "34:expected ';' after class definition" "semicolon" {target *-*-* } 4 }
-
-// { dg-error "35:expected declaration before '\}' token" "declaration" {target *-*-* } 4 }
+// { dg-error "34:expected ';' after class definition" "semicolon" {target *-*-* } 5 }
+// { dg-error "35:expected declaration before '\}' token" "declaration" {target *-*-* } 5 }
diff --git a/gcc/testsuite/g++.dg/parse/expr2.C b/gcc/testsuite/g++.dg/parse/expr2.C
index 32800e4f9cd..fb572e9408a 100644
--- a/gcc/testsuite/g++.dg/parse/expr2.C
+++ b/gcc/testsuite/g++.dg/parse/expr2.C
@@ -2,7 +2,7 @@ struct X {
X(double *data, double d0, double d1);
};
-int foo(double d0) {
+void foo(double d0) {
double * data;
X(data,d0,d0);
}
diff --git a/gcc/testsuite/g++.dg/parse/friend7.C b/gcc/testsuite/g++.dg/parse/friend7.C
index 72ab430ef1b..7fc480f3ea0 100644
--- a/gcc/testsuite/g++.dg/parse/friend7.C
+++ b/gcc/testsuite/g++.dg/parse/friend7.C
@@ -1,5 +1,6 @@
// PR c++/34488
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
struct A
{
diff --git a/gcc/testsuite/g++.dg/parse/namespace1.C b/gcc/testsuite/g++.dg/parse/namespace1.C
index 7740bce2dc4..56697419253 100644
--- a/gcc/testsuite/g++.dg/parse/namespace1.C
+++ b/gcc/testsuite/g++.dg/parse/namespace1.C
@@ -2,6 +2,6 @@ namespace foo {
void baz(int);
}
-int bar(int foo) {
+void bar(int foo) {
foo::baz (3);
}
diff --git a/gcc/testsuite/g++.dg/parse/namespace9.C b/gcc/testsuite/g++.dg/parse/namespace9.C
index 7ff0267bff3..d5da5446a65 100644
--- a/gcc/testsuite/g++.dg/parse/namespace9.C
+++ b/gcc/testsuite/g++.dg/parse/namespace9.C
@@ -1,7 +1,7 @@
namespace A {
void f();
}
-int g()
+void g()
{
struct f { };
using A::f;
diff --git a/gcc/testsuite/g++.dg/parse/ret-type2.C b/gcc/testsuite/g++.dg/parse/ret-type2.C
index 4b7a0457edd..913c8fe0002 100644
--- a/gcc/testsuite/g++.dg/parse/ret-type2.C
+++ b/gcc/testsuite/g++.dg/parse/ret-type2.C
@@ -1,3 +1,5 @@
+// { dg-additional-options "-Wno-return-type" }
+
struct S {} f(); // { dg-error "return" "err" }
// { dg-message "note" "note" { target *-*-* } .-1 }
struct T {} *g(); // { dg-error "return" }
diff --git a/gcc/testsuite/g++.dg/parse/typedef8.C b/gcc/testsuite/g++.dg/parse/typedef8.C
index 4c1823e87b9..60b8f39ee5e 100644
--- a/gcc/testsuite/g++.dg/parse/typedef8.C
+++ b/gcc/testsuite/g++.dg/parse/typedef8.C
@@ -5,7 +5,7 @@ typedef register int b; // { dg-error "conflicting" }
typedef extern int c; // { dg-error "conflicting" }
static typedef int a; // { dg-error "conflicting" }
-int foo()
+void foo()
{
typedef auto int bar; // { dg-error "conflicting|two or more data types" }
}
diff --git a/gcc/testsuite/g++.dg/pch/static-1.C b/gcc/testsuite/g++.dg/pch/static-1.C
index 21e77898298..cd6d4111cc5 100644
--- a/gcc/testsuite/g++.dg/pch/static-1.C
+++ b/gcc/testsuite/g++.dg/pch/static-1.C
@@ -2,9 +2,11 @@
int LocalStaticTest()
{
static A sa;
+ return 0;
}
int main(int argc, char **argv)
{
A::StaticTest();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/plugin/diagnostic-test-expressions-1.C b/gcc/testsuite/g++.dg/plugin/diagnostic-test-expressions-1.C
index a145dfea28c..8b6afeb052a 100644
--- a/gcc/testsuite/g++.dg/plugin/diagnostic-test-expressions-1.C
+++ b/gcc/testsuite/g++.dg/plugin/diagnostic-test-expressions-1.C
@@ -66,7 +66,7 @@ struct test_struct
int field;
};
-int test_structure_references (struct test_struct *ptr)
+void test_structure_references (struct test_struct *ptr)
{
struct test_struct local;
local.field = 42;
@@ -84,7 +84,7 @@ int test_structure_references (struct test_struct *ptr)
{ dg-end-multiline-output "" } */
}
-int test_postfix_incdec (int i)
+void test_postfix_incdec (int i)
{
__emit_expression_range (0, i++ ); /* { dg-warning "range" } */
/* { dg-begin-multiline-output "" }
@@ -101,7 +101,7 @@ int test_postfix_incdec (int i)
/* Unary operators. ****************************************************/
-int test_sizeof (int i)
+void test_sizeof (int i)
{
__emit_expression_range (0, sizeof(int) + i); /* { dg-warning "range" } */
/* { dg-begin-multiline-output "" }
@@ -128,7 +128,7 @@ int test_sizeof (int i)
{ dg-end-multiline-output "" } */
}
-int test_alignof (int i)
+void test_alignof (int i)
{
__emit_expression_range (0, alignof(int) + i); /* { dg-warning "range" } */
/* { dg-begin-multiline-output "" }
@@ -167,7 +167,7 @@ int test_alignof (int i)
{ dg-end-multiline-output "" } */
}
-int test_prefix_incdec (int i)
+void test_prefix_incdec (int i)
{
__emit_expression_range (0, ++i ); /* { dg-warning "range" } */
/* { dg-begin-multiline-output "" }
diff --git a/gcc/testsuite/g++.dg/plugin/dumb-plugin-test-1.C b/gcc/testsuite/g++.dg/plugin/dumb-plugin-test-1.C
index 70101c86826..404cd2124c0 100644
--- a/gcc/testsuite/g++.dg/plugin/dumb-plugin-test-1.C
+++ b/gcc/testsuite/g++.dg/plugin/dumb-plugin-test-1.C
@@ -26,7 +26,7 @@ struct Bar {
int g = g;
Foo foo = foo;
-int func()
+void func()
{
Bar *bar1, bar2;
Foo local_foo;
diff --git a/gcc/testsuite/g++.dg/plugin/self-assign-test-1.C b/gcc/testsuite/g++.dg/plugin/self-assign-test-1.C
index 95d39dd0ce6..08bee09f0d5 100644
--- a/gcc/testsuite/g++.dg/plugin/self-assign-test-1.C
+++ b/gcc/testsuite/g++.dg/plugin/self-assign-test-1.C
@@ -26,7 +26,7 @@ struct Bar {
int g = g; // { dg-warning "assigned to itself" }
Foo foo = foo; // { dg-warning "assigned to itself" }
-int func()
+void func()
{
Bar *bar1, bar2;
Foo local_foo;
diff --git a/gcc/testsuite/g++.dg/plugin/self-assign-test-2.C b/gcc/testsuite/g++.dg/plugin/self-assign-test-2.C
index da963c42db2..2c9d8cb7d0e 100644
--- a/gcc/testsuite/g++.dg/plugin/self-assign-test-2.C
+++ b/gcc/testsuite/g++.dg/plugin/self-assign-test-2.C
@@ -26,7 +26,7 @@ struct Bar {
int g = g; // { dg-warning "assigned to itself" }
Foo foo = foo; // { dg-warning "assigned to itself" }
-int func()
+void func()
{
Bar *bar1, bar2;
Foo local_foo;
diff --git a/gcc/testsuite/g++.dg/plugin/self-assign-test-3.C b/gcc/testsuite/g++.dg/plugin/self-assign-test-3.C
index e5b354baff2..48a1aa4139c 100644
--- a/gcc/testsuite/g++.dg/plugin/self-assign-test-3.C
+++ b/gcc/testsuite/g++.dg/plugin/self-assign-test-3.C
@@ -26,7 +26,7 @@ struct Bar {
int g = g; // { dg-bogus "assigned to itself" }
Foo foo = foo; // { dg-bogus "assigned to itself" }
-int func()
+void func()
{
Bar *bar1, bar2;
Foo local_foo;
diff --git a/gcc/testsuite/g++.dg/pr45788.C b/gcc/testsuite/g++.dg/pr45788.C
index 0f4db20c769..9148585a7f6 100644
--- a/gcc/testsuite/g++.dg/pr45788.C
+++ b/gcc/testsuite/g++.dg/pr45788.C
@@ -1,5 +1,5 @@
// { dg-do compile { target i?86-*-* x86_64-*-* } }
-// { dg-options "-O3 -fwhole-program -msse2" }
+// { dg-options "-O3 -fwhole-program -msse2 -Wno-return-type" }
typedef long unsigned int __darwin_size_t;
typedef __darwin_size_t size_t;
diff --git a/gcc/testsuite/g++.dg/pr48484.C b/gcc/testsuite/g++.dg/pr48484.C
index 1380c452fa4..19e9b9a8d0a 100644
--- a/gcc/testsuite/g++.dg/pr48484.C
+++ b/gcc/testsuite/g++.dg/pr48484.C
@@ -65,6 +65,8 @@ bool
jump = 0;
}
}
+
+ return false;
}
bool
@@ -102,4 +104,5 @@ bool
}
}
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/pr50763-3.C b/gcc/testsuite/g++.dg/pr50763-3.C
index b66be87b1b8..33aba30406e 100644
--- a/gcc/testsuite/g++.dg/pr50763-3.C
+++ b/gcc/testsuite/g++.dg/pr50763-3.C
@@ -53,5 +53,5 @@ long sExt::evalPoint(const v2d & crUV, v3d & rPnt) const {
if (!_Dom.cop2d(crUV)) {
sUV = _Dom.clp2d(crUV);
}
- eval();
+ return eval();
}
diff --git a/gcc/testsuite/g++.dg/pr55513.C b/gcc/testsuite/g++.dg/pr55513.C
index 35c728d30a3..1c119e40ebd 100644
--- a/gcc/testsuite/g++.dg/pr55513.C
+++ b/gcc/testsuite/g++.dg/pr55513.C
@@ -1,11 +1,13 @@
// { dg-do compile }
// { dg-options "-O0 -fdump-tree-gimple" }
+int
main ()
{
char s[10];
const int t = (__builtin_memcpy (s, "Hello", 6), 777);
__builtin_printf ("%d %s\n", t, s);
+ return 0;
}
// { dg-final { scan-tree-dump-times "memcpy" 1 "gimple" } }
diff --git a/gcc/testsuite/g++.dg/pr55604.C b/gcc/testsuite/g++.dg/pr55604.C
index f6fa3c0f42c..f67a1b4ef14 100644
--- a/gcc/testsuite/g++.dg/pr55604.C
+++ b/gcc/testsuite/g++.dg/pr55604.C
@@ -1,10 +1,12 @@
/* { dg-do compile } */
/* { dg-options "-O -fdump-rtl-reload" } */
+int
main ()
{
char s[10];
const int t = (__builtin_memcpy (s, "Hello", 6), 5);
__builtin_printf ("%d %s\n", t, s);
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/pr57662.C b/gcc/testsuite/g++.dg/pr57662.C
index bd5793dcaea..1592433d0d7 100644
--- a/gcc/testsuite/g++.dg/pr57662.C
+++ b/gcc/testsuite/g++.dg/pr57662.C
@@ -1,5 +1,6 @@
/* { dg-do compile { target powerpc*-*-* ia64-*-* i?86-*-* x86_64-*-* } } */
/* { dg-options "-O2 -fselective-scheduling2 -fsel-sched-pipelining" } */
+/* { dg-additional-options "-Wno-return-type" } */
extern "C" {
typedef struct _IO_FILE FILE;
diff --git a/gcc/testsuite/g++.dg/pr57878.C b/gcc/testsuite/g++.dg/pr57878.C
index 231f9e346b9..5df2b7c9ef4 100644
--- a/gcc/testsuite/g++.dg/pr57878.C
+++ b/gcc/testsuite/g++.dg/pr57878.C
@@ -95,6 +95,8 @@ namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
}
__sso_string_base(const __sso_string_base& __rcs);
const _CharT_alloc_type& _M_get_allocator() const {
+ static _CharT_alloc_type c;
+ return c;
}
};
template<typename _CharT, typename _Traits, typename _Alloc>
diff --git a/gcc/testsuite/g++.dg/pr58389.C b/gcc/testsuite/g++.dg/pr58389.C
index 648c145459b..8c98b67076b 100644
--- a/gcc/testsuite/g++.dg/pr58389.C
+++ b/gcc/testsuite/g++.dg/pr58389.C
@@ -51,4 +51,4 @@ inline void C::m_fn1() {
sort(c, b, qt_notclosestLeaf);
}
}
-A F::m_fn1() const { const_cast<F *>(this)->d_ptr->m_fn1(); }
+A F::m_fn1() const { const_cast<F *>(this)->d_ptr->m_fn1(); return A(); }
diff --git a/gcc/testsuite/g++.dg/pr59510.C b/gcc/testsuite/g++.dg/pr59510.C
index dcdf860dcf7..4ac5becfd26 100644
--- a/gcc/testsuite/g++.dg/pr59510.C
+++ b/gcc/testsuite/g++.dg/pr59510.C
@@ -1,6 +1,7 @@
// PR debug/59510
// { dg-do compile }
// { dg-options "-O2 -g --param=large-stack-frame-growth=1" }
+// { dg-additional-options "-Wno-return-type" }
template <typename _Iterator>
struct _Iter_base
diff --git a/gcc/testsuite/g++.dg/pr64688.C b/gcc/testsuite/g++.dg/pr64688.C
index 3525e49b735..bf85f1abb45 100644
--- a/gcc/testsuite/g++.dg/pr64688.C
+++ b/gcc/testsuite/g++.dg/pr64688.C
@@ -24,7 +24,10 @@ template <typename> struct F;
template <typename> struct G;
template <typename, typename, int> struct H;
template <typename Element, typename Layout> struct H<Element, Layout, 3> {};
-template <int, typename E, typename L, int N> unsigned char at_c(H<E, L, N>) {}
+template <int, typename E, typename L, int N> unsigned char at_c(H<E, L, N>)
+{
+ return 0;
+}
template <typename> class I;
template <typename> class J;
template <typename> class K;
diff --git a/gcc/testsuite/g++.dg/pr65032.C b/gcc/testsuite/g++.dg/pr65032.C
index a62f50b49bf..d6b6768d25a 100644
--- a/gcc/testsuite/g++.dg/pr65032.C
+++ b/gcc/testsuite/g++.dg/pr65032.C
@@ -84,4 +84,6 @@ G::DecodeVorbis (int *p1)
mDecodedAudioFrames -= b;
fn2 (b);
}
+
+ return nsresult();
}
diff --git a/gcc/testsuite/g++.dg/pr67989.C b/gcc/testsuite/g++.dg/pr67989.C
index c3023557d31..26748997fdd 100644
--- a/gcc/testsuite/g++.dg/pr67989.C
+++ b/gcc/testsuite/g++.dg/pr67989.C
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-std=c++11 -O2" } */
+/* { dg-additional-options "-Wno-return-type" } */
__extension__ typedef unsigned long long int uint64_t;
namespace std __attribute__ ((__visibility__ ("default")))
diff --git a/gcc/testsuite/g++.dg/pr70590-2.C b/gcc/testsuite/g++.dg/pr70590-2.C
index 409c86eccd1..fd79dbfc883 100644
--- a/gcc/testsuite/g++.dg/pr70590-2.C
+++ b/gcc/testsuite/g++.dg/pr70590-2.C
@@ -8,13 +8,13 @@ constexpr int *foo = &a;
void blah (int *);
-int
+void
bar ()
{
blah (foo);
}
-int
+void
baz ()
{
blah (foo);
diff --git a/gcc/testsuite/g++.dg/pr70590.C b/gcc/testsuite/g++.dg/pr70590.C
index 488620065ee..28c455ab530 100644
--- a/gcc/testsuite/g++.dg/pr70590.C
+++ b/gcc/testsuite/g++.dg/pr70590.C
@@ -12,13 +12,13 @@ foo ()
void blah (int *);
-int
+void
bar ()
{
blah (foo ());
}
-int
+void
baz ()
{
blah (foo ());
diff --git a/gcc/testsuite/g++.dg/pr70965.C b/gcc/testsuite/g++.dg/pr70965.C
index d8a2c35e532..4b45789845f 100644
--- a/gcc/testsuite/g++.dg/pr70965.C
+++ b/gcc/testsuite/g++.dg/pr70965.C
@@ -14,7 +14,7 @@ template <typename> struct D
extern template class D<char>;
enum L { M };
struct F { virtual char *foo (); };
-template <class> struct I : B { static int foo (int) {} };
+template <class> struct I : B { static int foo (int) { return 0; } };
struct G { typedef I<int> t; };
void foo (int) { G::t::foo (0); }
void bar (const D<char> &, const D<int> &, int, L);
diff --git a/gcc/testsuite/g++.dg/pr71633.C b/gcc/testsuite/g++.dg/pr71633.C
index 48e9c9833ec..8852695b910 100644
--- a/gcc/testsuite/g++.dg/pr71633.C
+++ b/gcc/testsuite/g++.dg/pr71633.C
@@ -20,6 +20,7 @@ class c3 : c1, c2
int *c3::fn2 () const
{
+ return 0;
}
int *c3::fn3 (int p) const
diff --git a/gcc/testsuite/g++.dg/pr77550.C b/gcc/testsuite/g++.dg/pr77550.C
index a1064737a67..9b31defd09c 100644
--- a/gcc/testsuite/g++.dg/pr77550.C
+++ b/gcc/testsuite/g++.dg/pr77550.C
@@ -235,7 +235,7 @@ template <typename, typename, typename> struct basic_string {
basic_string() : _M_dataplus(0) {}
basic_string(const basic_string &) : _M_dataplus(0) {}
size_type size() { return _M_string_length; }
- char *data() const {}
+ char *data() const { return 0; }
};
//template<> basic_string<char, std::char_traits<char>, std::allocator<char>>::
//_Alloc_hider::_Alloc_hider(char*, std::allocator<char>&&) {}
diff --git a/gcc/testsuite/g++.dg/pr80287.C b/gcc/testsuite/g++.dg/pr80287.C
index da8d3fab150..d01e73371de 100644
--- a/gcc/testsuite/g++.dg/pr80287.C
+++ b/gcc/testsuite/g++.dg/pr80287.C
@@ -3,7 +3,7 @@
// { dg-options "-g" }
struct A {
- operator long() {}
+ operator long() { return 0; }
} __attribute__((__may_alias__));
struct {
diff --git a/gcc/testsuite/g++.dg/pr80707.C b/gcc/testsuite/g++.dg/pr80707.C
index 4fe89335e31..b3848b0eb5f 100644
--- a/gcc/testsuite/g++.dg/pr80707.C
+++ b/gcc/testsuite/g++.dg/pr80707.C
@@ -26,4 +26,6 @@ int A::m_fn1(int &p1) const {
p1 = 0;
b--;
}
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/pr81194.C b/gcc/testsuite/g++.dg/pr81194.C
index 249fcf3b7c7..5f949389ec6 100644
--- a/gcc/testsuite/g++.dg/pr81194.C
+++ b/gcc/testsuite/g++.dg/pr81194.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-O2 -std=c++17 -fno-exceptions" }
+// { dg-additional-options "-Wno-return-type" }
template <class a> struct b { typedef a *c; };
class e {};
diff --git a/gcc/testsuite/g++.dg/spellcheck-identifiers.C b/gcc/testsuite/g++.dg/spellcheck-identifiers.C
index 08434399e6f..e4a606e2052 100644
--- a/gcc/testsuite/g++.dg/spellcheck-identifiers.C
+++ b/gcc/testsuite/g++.dg/spellcheck-identifiers.C
@@ -136,7 +136,7 @@ enum foo {
FOO_SECOND
};
-int
+void
test_6 (enum foo f)
{
switch (f)
diff --git a/gcc/testsuite/g++.dg/stackprotectexplicit2.C b/gcc/testsuite/g++.dg/stackprotectexplicit2.C
index 9cf9ab909c9..35d9e886ccf 100644
--- a/gcc/testsuite/g++.dg/stackprotectexplicit2.C
+++ b/gcc/testsuite/g++.dg/stackprotectexplicit2.C
@@ -7,6 +7,7 @@ int A()
{
int A[23];
char b[22];
+ return 0;
}
int __attribute__((stack_protect)) B()
@@ -24,4 +25,4 @@ int __attribute__((stack_protect)) c()
}
-/* { dg-final { scan-assembler-times "stack_chk_fail" 2 } } */ \ No newline at end of file
+/* { dg-final { scan-assembler-times "stack_chk_fail" 2 } } */
diff --git a/gcc/testsuite/g++.dg/tc1/dr152.C b/gcc/testsuite/g++.dg/tc1/dr152.C
index e787f8bcbfb..7d84d6afc1a 100644
--- a/gcc/testsuite/g++.dg/tc1/dr152.C
+++ b/gcc/testsuite/g++.dg/tc1/dr152.C
@@ -12,6 +12,7 @@ namespace N1 {
{
X x;
f(x); // { dg-error "matching" "matching" }
+ return 0;
}
}
@@ -30,6 +31,7 @@ namespace N2 {
{
X<T> x;
N2::f(x); // { dg-error "matching" "matching" }
+ return 0;
}
template int foo<float>(); // { dg-message "required from here" }
diff --git a/gcc/testsuite/g++.dg/template/aggr-init1.C b/gcc/testsuite/g++.dg/template/aggr-init1.C
index a09c7a79264..a7fe7c0441b 100644
--- a/gcc/testsuite/g++.dg/template/aggr-init1.C
+++ b/gcc/testsuite/g++.dg/template/aggr-init1.C
@@ -4,5 +4,5 @@ struct A {};
struct B {
void *(*a)();
};
-template <typename T> void *CreateA() {}
+template <typename T> void *CreateA() { return 0; }
B b = {CreateA<A>};
diff --git a/gcc/testsuite/g++.dg/template/anon1.C b/gcc/testsuite/g++.dg/template/anon1.C
index ef73df6b39e..ff5c9ea1553 100644
--- a/gcc/testsuite/g++.dg/template/anon1.C
+++ b/gcc/testsuite/g++.dg/template/anon1.C
@@ -1,10 +1,10 @@
struct x {
- int foo () {}
+ int foo () { return 0; }
};
template <class T>
struct vector {
- T& bar () {}
+ T& bar () { static T a; return a; }
};
template <class T>
diff --git a/gcc/testsuite/g++.dg/template/array29.C b/gcc/testsuite/g++.dg/template/array29.C
index e43cb9d965a..f3eb95dc0aa 100644
--- a/gcc/testsuite/g++.dg/template/array29.C
+++ b/gcc/testsuite/g++.dg/template/array29.C
@@ -30,6 +30,7 @@ template <typename BaseT> struct D
typename BaseT::callback_type p3)
{
p3.on_extended_iso_date ();
+ return char_type();
}
};
struct F
diff --git a/gcc/testsuite/g++.dg/template/array7.C b/gcc/testsuite/g++.dg/template/array7.C
index 1fb130f9961..59364be33cf 100644
--- a/gcc/testsuite/g++.dg/template/array7.C
+++ b/gcc/testsuite/g++.dg/template/array7.C
@@ -6,6 +6,7 @@ template <unsigned N, unsigned M>
int bar( const char(&val)[M] )
{
foo (N,M);
+ return 0;
}
int i = bar<10>("1234");
diff --git a/gcc/testsuite/g++.dg/template/canon-type-8.C b/gcc/testsuite/g++.dg/template/canon-type-8.C
index fd1fe3ce362..b72bd0fb770 100644
--- a/gcc/testsuite/g++.dg/template/canon-type-8.C
+++ b/gcc/testsuite/g++.dg/template/canon-type-8.C
@@ -2,6 +2,7 @@
// We were getting different canonical types for matching types because
// TYPE_ALIGN wasn't propagated to all the variants fast enough.
// { dg-options "" }
+// { dg-additional-options "-Wno-return-type" }
typedef __SIZE_TYPE__ size_t;
enum { chunk_size = 16 };
diff --git a/gcc/testsuite/g++.dg/template/cast5.C b/gcc/testsuite/g++.dg/template/cast5.C
new file mode 100644
index 00000000000..4e48d1d3c5c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/cast5.C
@@ -0,0 +1,8 @@
+// PR c++/82360
+// { dg-do compile { target c++11 } }
+
+class a {};
+template <class> class b {
+ b(b &&c) : d(static_cast<a &&>(c.d)) {}
+ a d;
+};
diff --git a/gcc/testsuite/g++.dg/template/conv1.C b/gcc/testsuite/g++.dg/template/conv1.C
index e0c7492034c..80c7becd665 100644
--- a/gcc/testsuite/g++.dg/template/conv1.C
+++ b/gcc/testsuite/g++.dg/template/conv1.C
@@ -16,8 +16,8 @@ template<class T> struct First
};
template <class T> int First<T>::Foo ()
-{} // This is here to make sure we didn't smash Foo's decl in the
- // method vector
+{ return 0; } // This is here to make sure we didn't smash Foo's decl in the
+ // method vector
struct B { };
struct D { };
diff --git a/gcc/testsuite/g++.dg/template/crash107.C b/gcc/testsuite/g++.dg/template/crash107.C
index d92ee3394e2..cecf9013244 100644
--- a/gcc/testsuite/g++.dg/template/crash107.C
+++ b/gcc/testsuite/g++.dg/template/crash107.C
@@ -1,6 +1,7 @@
// PR c++/44625
// { dg-do compile }
// { dg-options "" }
+// { dg-additional-options "-Wno-return-type" }
template<typename FP_> struct Vec { // { dg-message "note" }
Vec& operator^=(Vec& rhs) {
diff --git a/gcc/testsuite/g++.dg/template/crash23.C b/gcc/testsuite/g++.dg/template/crash23.C
index 0c3eac1acbf..b8dea99b4b1 100644
--- a/gcc/testsuite/g++.dg/template/crash23.C
+++ b/gcc/testsuite/g++.dg/template/crash23.C
@@ -1,7 +1,7 @@
// PR c++/17642
template<int dim>
-int f(const int* const lsh, const int* const bbox, const int* const nghostzones, int d)
+void f(const int* const lsh, const int* const bbox, const int* const nghostzones, int d)
{
for (int d=0; d<dim; ++d)
lsh[d] - (bbox[2*d+1] ? 0 : nghostzones[d]);
diff --git a/gcc/testsuite/g++.dg/template/crash8.C b/gcc/testsuite/g++.dg/template/crash8.C
index a6f26b30679..b68cfcb4440 100644
--- a/gcc/testsuite/g++.dg/template/crash8.C
+++ b/gcc/testsuite/g++.dg/template/crash8.C
@@ -13,7 +13,7 @@ struct bar
};
template <typename U>
- int wom(U c)
+ void wom(U c)
{
struct foo b;
}
diff --git a/gcc/testsuite/g++.dg/template/defarg4.C b/gcc/testsuite/g++.dg/template/defarg4.C
index 293538adbd1..30711c22c0b 100644
--- a/gcc/testsuite/g++.dg/template/defarg4.C
+++ b/gcc/testsuite/g++.dg/template/defarg4.C
@@ -1,7 +1,7 @@
// PR c++/14763
struct A {
- int get() const {}
+ int get() const { return 0; }
static A *foo();
};
diff --git a/gcc/testsuite/g++.dg/template/dependent-expr9.C b/gcc/testsuite/g++.dg/template/dependent-expr9.C
index 7da060d4bb6..e5abc2525d3 100644
--- a/gcc/testsuite/g++.dg/template/dependent-expr9.C
+++ b/gcc/testsuite/g++.dg/template/dependent-expr9.C
@@ -6,5 +6,5 @@ public:
};
A *fn1(int *);
template <typename> class B : A {
- static int *m_fn2() { fn1(m_fn2())->m_fn1<A>(); }
+ static int *m_fn2() { fn1(m_fn2())->m_fn1<A>(); return 0; }
};
diff --git a/gcc/testsuite/g++.dg/template/error10.C b/gcc/testsuite/g++.dg/template/error10.C
index 02ea64b7912..a61d22f1916 100644
--- a/gcc/testsuite/g++.dg/template/error10.C
+++ b/gcc/testsuite/g++.dg/template/error10.C
@@ -24,6 +24,7 @@ void B(void) {}
int Btest()
{
B<256 >> 4>();
+ return 0;
}
template <int N = 123>>4>
diff --git a/gcc/testsuite/g++.dg/template/friend32.C b/gcc/testsuite/g++.dg/template/friend32.C
index 94bff37776f..278b4960602 100644
--- a/gcc/testsuite/g++.dg/template/friend32.C
+++ b/gcc/testsuite/g++.dg/template/friend32.C
@@ -18,4 +18,5 @@ template<class T> class B
int f ()
{
B<int> b; // { dg-message "required" }
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/template/init6.C b/gcc/testsuite/g++.dg/template/init6.C
index 143746642fc..d5467023b5b 100644
--- a/gcc/testsuite/g++.dg/template/init6.C
+++ b/gcc/testsuite/g++.dg/template/init6.C
@@ -24,8 +24,10 @@ template<class T>
Iter<typename Graph<T>::Node*> *Graph<T>::Inner::get() {
SubIter<typename Graph<T>::Node*> *iter;
iter->insert(0);
+ return 0;
}
int main() {
Iter<Graph<int>::Node*> *n2_iter = new SubIter<Graph<int>::Node*>();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/template/memfriend7.C b/gcc/testsuite/g++.dg/template/memfriend7.C
index 2659a1a2191..e6bf12f9654 100644
--- a/gcc/testsuite/g++.dg/template/memfriend7.C
+++ b/gcc/testsuite/g++.dg/template/memfriend7.C
@@ -49,6 +49,7 @@ template <class T> int A<T*>::h()
{
C c;
c.ii = 0; // { dg-error "context" }
+ return 0;
}
template <class T> void A<T*>::i(char)
@@ -93,6 +94,7 @@ int A<char>::h()
{
C c;
c.ii = 0; // { dg-error "context" }
+ return 0;
}
void A<char>::i(char)
diff --git a/gcc/testsuite/g++.dg/template/new10.C b/gcc/testsuite/g++.dg/template/new10.C
index 98293ba5bd4..1a0c044c005 100644
--- a/gcc/testsuite/g++.dg/template/new10.C
+++ b/gcc/testsuite/g++.dg/template/new10.C
@@ -20,4 +20,5 @@ bool Analyzer::ReadDictionary( READER &reader )
= ( number_of_composite_sequences + SequenceMapIndex( 1 ) )
* ( number_of_composite_sequences + 1 );
map_from_2_hints_to_composite_sequence.reset(new SequenceIndex[ntt]());
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/template/nontype12.C b/gcc/testsuite/g++.dg/template/nontype12.C
index b4bb41d109b..4ec22ef94e3 100644
--- a/gcc/testsuite/g++.dg/template/nontype12.C
+++ b/gcc/testsuite/g++.dg/template/nontype12.C
@@ -27,7 +27,7 @@ template<typename T> struct C
template<T> int foo(); // { dg-error "double" }
};
-template<typename T> int baz(T) { C<T> c; } // { dg-message "required" }
+template<typename T> int baz(T) { C<T> c; return 0;} // { dg-message "required" }
void foobar()
{
diff --git a/gcc/testsuite/g++.dg/template/overload12.C b/gcc/testsuite/g++.dg/template/overload12.C
index 3251474bd6a..43898efebde 100644
--- a/gcc/testsuite/g++.dg/template/overload12.C
+++ b/gcc/testsuite/g++.dg/template/overload12.C
@@ -8,7 +8,7 @@ int foo(T a, T2& b, T2 c) {return a + b;} // { dg-message "template" }
int foo(char*, S&); // { dg-message "foo" }
// { dg-message "candidate expects 2 arguments, 3 provided" "arity" { target *-*-* } .-1 }
-int foo2(int x)
+void foo2(int x)
{
S s={1,2};
char c;
diff --git a/gcc/testsuite/g++.dg/template/overload5.C b/gcc/testsuite/g++.dg/template/overload5.C
index 8e520e92907..06075eca78f 100644
--- a/gcc/testsuite/g++.dg/template/overload5.C
+++ b/gcc/testsuite/g++.dg/template/overload5.C
@@ -24,5 +24,6 @@ int foo::f()
f_obj<&foo::g> c; // OK
f_obj<foo::g> d; // OK
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/template/overload8.C b/gcc/testsuite/g++.dg/template/overload8.C
index cc6a05b7041..4775db18d87 100644
--- a/gcc/testsuite/g++.dg/template/overload8.C
+++ b/gcc/testsuite/g++.dg/template/overload8.C
@@ -3,5 +3,5 @@
struct A
{
template<int> void foo() {}
- template<int> int foo() {}
+ template<int> int foo() { return 0; }
};
diff --git a/gcc/testsuite/g++.dg/template/partial10.C b/gcc/testsuite/g++.dg/template/partial10.C
index 53a48fbac82..673baae2023 100644
--- a/gcc/testsuite/g++.dg/template/partial10.C
+++ b/gcc/testsuite/g++.dg/template/partial10.C
@@ -10,9 +10,10 @@
template <class Fn> void def(Fn fn) {}
template <class T1, class T2> T2 fn(T1, T2);
-template <class T1> int fn(T1) { }
+template <class T1> int fn(T1) { return 0; }
int main()
{
def(fn<int>);
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/template/partial9.C b/gcc/testsuite/g++.dg/template/partial9.C
index 4c340fc935d..8307c70883e 100644
--- a/gcc/testsuite/g++.dg/template/partial9.C
+++ b/gcc/testsuite/g++.dg/template/partial9.C
@@ -1,6 +1,6 @@
// PR c++/36435
template <class T> T f();
-template <class T> T* f() { }
+template <class T> T* f() { return 0; }
template int* f();
diff --git a/gcc/testsuite/g++.dg/template/qual1.C b/gcc/testsuite/g++.dg/template/qual1.C
index 8fa79b3d2e7..7b20265eb9a 100644
--- a/gcc/testsuite/g++.dg/template/qual1.C
+++ b/gcc/testsuite/g++.dg/template/qual1.C
@@ -7,7 +7,7 @@ public:
void sort (int (*compare) (T *const&,T *const&));
};
-int shift_compare (int *const &, int *const &) {}
+int shift_compare (int *const &, int *const &) { return 0; }
template<class T> void
Link_array<T>::sort (int (*compare) (T *const&,T *const&))
diff --git a/gcc/testsuite/g++.dg/template/show-template-tree-3.C b/gcc/testsuite/g++.dg/template/show-template-tree-3.C
index 0eda40bcf77..7bb93ba4f60 100644
--- a/gcc/testsuite/g++.dg/template/show-template-tree-3.C
+++ b/gcc/testsuite/g++.dg/template/show-template-tree-3.C
@@ -9,6 +9,7 @@
for that in this case). */
// { dg-options "-fdiagnostics-show-template-tree" }
+// { dg-additional-options "-Wno-return-type" }
#include <map>
#include <vector>
diff --git a/gcc/testsuite/g++.dg/template/sizeof8.C b/gcc/testsuite/g++.dg/template/sizeof8.C
index 861febc161c..bed68f9d16e 100644
--- a/gcc/testsuite/g++.dg/template/sizeof8.C
+++ b/gcc/testsuite/g++.dg/template/sizeof8.C
@@ -4,6 +4,6 @@
template <int> struct S{};
-template <int N> S<sizeof(new double[N])> f() {}
+template <int N> S<sizeof(new double[N])> f() { return S<sizeof(new double[N])>(); }
template S<sizeof(void*)> f<2>();
diff --git a/gcc/testsuite/g++.dg/template/sizeof9.C b/gcc/testsuite/g++.dg/template/sizeof9.C
index 8d9ec95ae1f..84400d3a364 100644
--- a/gcc/testsuite/g++.dg/template/sizeof9.C
+++ b/gcc/testsuite/g++.dg/template/sizeof9.C
@@ -2,5 +2,5 @@
template<int N> struct X { char x[N]; };
template<typename T> X<1 + sizeof(T) - sizeof(T)> F(T const &);
-template<int N> struct S { int d() { F(1); } };
+template<int N> struct S { int d() { F(1); return S(); } };
diff --git a/gcc/testsuite/g++.dg/template/spec6.C b/gcc/testsuite/g++.dg/template/spec6.C
index 915b8331252..fd41d7417c2 100644
--- a/gcc/testsuite/g++.dg/template/spec6.C
+++ b/gcc/testsuite/g++.dg/template/spec6.C
@@ -1,3 +1,5 @@
+// { dg-additional-options "-Wno-return-type" }
+
template <bool, int> struct X {};
template <bool C> struct X<C,1> {
diff --git a/gcc/testsuite/g++.dg/template/spec7.C b/gcc/testsuite/g++.dg/template/spec7.C
index 18d3c90c45e..caf6b91d151 100644
--- a/gcc/testsuite/g++.dg/template/spec7.C
+++ b/gcc/testsuite/g++.dg/template/spec7.C
@@ -20,7 +20,7 @@ template<> template<> template <class V> void A<int>::B<char>::g(V)
A<int>::B<char> b;
-int h()
+void h()
{
b.f();
b.g(0);
diff --git a/gcc/testsuite/g++.dg/template/typedef8.C b/gcc/testsuite/g++.dg/template/typedef8.C
index f132606889b..b3178842172 100644
--- a/gcc/testsuite/g++.dg/template/typedef8.C
+++ b/gcc/testsuite/g++.dg/template/typedef8.C
@@ -1,4 +1,5 @@
// PR c++/34206
+// { dg-additional-options "-Wno-return-type" }
template<class _T1, class _T2> struct pair { };
template <class T0, class T1> struct tuple {
diff --git a/gcc/testsuite/g++.dg/template/using20.C b/gcc/testsuite/g++.dg/template/using20.C
index 1df9549cd6c..cf180d21fe9 100644
--- a/gcc/testsuite/g++.dg/template/using20.C
+++ b/gcc/testsuite/g++.dg/template/using20.C
@@ -15,4 +15,5 @@ int f(void)
{
G<int> a;
a.f();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/template/vla1.C b/gcc/testsuite/g++.dg/template/vla1.C
index fe93440f1f2..d873f2b0b06 100644
--- a/gcc/testsuite/g++.dg/template/vla1.C
+++ b/gcc/testsuite/g++.dg/template/vla1.C
@@ -5,5 +5,6 @@ template <bool>
static int label (int w)
{
sizeof(int[w]);
+ return 0;
}
int a = label<false>(1);
diff --git a/gcc/testsuite/g++.dg/tls/thread_local3.C b/gcc/testsuite/g++.dg/tls/thread_local3.C
index e05a0b95306..d29bda1a000 100644
--- a/gcc/testsuite/g++.dg/tls/thread_local3.C
+++ b/gcc/testsuite/g++.dg/tls/thread_local3.C
@@ -21,6 +21,7 @@ void f()
void *thread_main(void *)
{
f(); f(); f();
+ return 0;
}
#include <pthread.h>
diff --git a/gcc/testsuite/g++.dg/tls/thread_local3g.C b/gcc/testsuite/g++.dg/tls/thread_local3g.C
index a3c9ebcc2c5..2f9fdee1a73 100644
--- a/gcc/testsuite/g++.dg/tls/thread_local3g.C
+++ b/gcc/testsuite/g++.dg/tls/thread_local3g.C
@@ -19,6 +19,7 @@ thread_local A a;
void *thread_main(void *)
{
A* ap = &a;
+ return 0;
}
#include <pthread.h>
diff --git a/gcc/testsuite/g++.dg/tls/thread_local5.C b/gcc/testsuite/g++.dg/tls/thread_local5.C
index c4d5ff01bf5..61fea722dc2 100644
--- a/gcc/testsuite/g++.dg/tls/thread_local5.C
+++ b/gcc/testsuite/g++.dg/tls/thread_local5.C
@@ -30,6 +30,7 @@ void f()
void *thread_main(void *)
{
f(); f(); f();
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/tls/thread_local5g.C b/gcc/testsuite/g++.dg/tls/thread_local5g.C
index 5ced551fb57..596bbbe542f 100644
--- a/gcc/testsuite/g++.dg/tls/thread_local5g.C
+++ b/gcc/testsuite/g++.dg/tls/thread_local5g.C
@@ -28,6 +28,7 @@ thread_local A a;
void *thread_main(void *)
{
A* ap = &a;
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/tls/thread_local6.C b/gcc/testsuite/g++.dg/tls/thread_local6.C
index 2810efaea93..0009726626b 100644
--- a/gcc/testsuite/g++.dg/tls/thread_local6.C
+++ b/gcc/testsuite/g++.dg/tls/thread_local6.C
@@ -22,6 +22,7 @@ void f()
void *thread_main(void *)
{
f(); f(); f();
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/tls/thread_local6g.C b/gcc/testsuite/g++.dg/tls/thread_local6g.C
index b8f9cdf1dea..d357ca4fa1f 100644
--- a/gcc/testsuite/g++.dg/tls/thread_local6g.C
+++ b/gcc/testsuite/g++.dg/tls/thread_local6g.C
@@ -20,6 +20,7 @@ thread_local A a;
void *thread_main(void *)
{
A* ap = &a;
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/tm/cgraph_edge.C b/gcc/testsuite/g++.dg/tm/cgraph_edge.C
index d4c8f77fe89..b2649e8f521 100644
--- a/gcc/testsuite/g++.dg/tm/cgraph_edge.C
+++ b/gcc/testsuite/g++.dg/tm/cgraph_edge.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-fgnu-tm -O3" }
+// { dg-additional-options "-Wno-return-type" }
template<typename _InputIterator, typename _Distance> inline void advance(_InputIterator& __i, _Distance __n)
{
diff --git a/gcc/testsuite/g++.dg/tm/pr46646.C b/gcc/testsuite/g++.dg/tm/pr46646.C
index 9431615b0fb..be378b0bd5c 100644
--- a/gcc/testsuite/g++.dg/tm/pr46646.C
+++ b/gcc/testsuite/g++.dg/tm/pr46646.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-fgnu-tm -O0"}
+// { dg-additional-options "-Wno-return-type" }
namespace std __attribute__ ((__visibility__ ("default"))) {
template<class _T1, class _T2>
diff --git a/gcc/testsuite/g++.dg/tm/pr47554.C b/gcc/testsuite/g++.dg/tm/pr47554.C
index 28841bb15ad..41222dcc47f 100644
--- a/gcc/testsuite/g++.dg/tm/pr47554.C
+++ b/gcc/testsuite/g++.dg/tm/pr47554.C
@@ -11,6 +11,8 @@ class list
}
const list& _M_get_Node_allocator() const
{
+ static list l;
+ return l;
}
list _M_get_Tp_allocator() const
{
diff --git a/gcc/testsuite/g++.dg/tm/pr47573.C b/gcc/testsuite/g++.dg/tm/pr47573.C
index 1fd26896c5d..7410533a5f5 100644
--- a/gcc/testsuite/g++.dg/tm/pr47573.C
+++ b/gcc/testsuite/g++.dg/tm/pr47573.C
@@ -25,4 +25,5 @@ template<typename _Alloc = allocator<char> > class basic_string
int getStringHeight()
{
basic_string<> tmp;
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/tm/unsafe1.C b/gcc/testsuite/g++.dg/tm/unsafe1.C
index 91dd7b110ec..49dd564894a 100644
--- a/gcc/testsuite/g++.dg/tm/unsafe1.C
+++ b/gcc/testsuite/g++.dg/tm/unsafe1.C
@@ -4,7 +4,7 @@
struct S {
virtual ~S();
};
-int f() transaction_safe {
+void f() transaction_safe {
S s; // { dg-error "unsafe" "invocation of unsafe destructor" }
}
diff --git a/gcc/testsuite/g++.dg/tm/unsafe2.C b/gcc/testsuite/g++.dg/tm/unsafe2.C
index 1b81b310057..b0425ac6cee 100644
--- a/gcc/testsuite/g++.dg/tm/unsafe2.C
+++ b/gcc/testsuite/g++.dg/tm/unsafe2.C
@@ -6,7 +6,7 @@ void f(T) transaction_safe;
template<>
void f(bool); // not transaction-safe
-int g() transaction_safe
+void g() transaction_safe
{
f(42); // OK
f(true); // { dg-error "unsafe" }
diff --git a/gcc/testsuite/g++.dg/torture/20070621-1.C b/gcc/testsuite/g++.dg/torture/20070621-1.C
index 9bd8cc38c44..d8a6a76b6b0 100644
--- a/gcc/testsuite/g++.dg/torture/20070621-1.C
+++ b/gcc/testsuite/g++.dg/torture/20070621-1.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
/* Reduced from libstdc++-v3/testsuite/25_algorithms/equal/1.cc
1.2.ii: In function 'void test1()':
diff --git a/gcc/testsuite/g++.dg/torture/20090329-1.C b/gcc/testsuite/g++.dg/torture/20090329-1.C
index 0274a1944e5..6839931a247 100644
--- a/gcc/testsuite/g++.dg/torture/20090329-1.C
+++ b/gcc/testsuite/g++.dg/torture/20090329-1.C
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
struct input_iterator_tag { };
template<typename _Category, typename _Tp, typename _Distance = long, typename _Pointer = _Tp*, typename _Reference = _Tp&>
diff --git a/gcc/testsuite/g++.dg/torture/20141013.C b/gcc/testsuite/g++.dg/torture/20141013.C
index 82aacd6317e..e6c83ffab21 100644
--- a/gcc/testsuite/g++.dg/torture/20141013.C
+++ b/gcc/testsuite/g++.dg/torture/20141013.C
@@ -1,4 +1,5 @@
/* { dg-options "-fno-short-enums" } */
+/* { dg-additional-options "-Wno-return-type" } */
enum
{
_sch_isdigit = 0x0004,
diff --git a/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess1.C b/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess1.C
index c72532be4f4..5bc5c4ca859 100644
--- a/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess1.C
+++ b/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess1.C
@@ -1,6 +1,6 @@
// Test -Wsizeof-pointer-memaccess warnings.
// { dg-do compile }
-// { dg-options "-Wall -Wno-sizeof-array-argument -Wno-stringop-overflow" }
+// { dg-options "-Wall -Wno-sizeof-array-argument -Wno-stringop-overflow -Wno-stringop-truncation" }
// Test just twice, once with -O0 non-fortified, once with -O2 fortified.
// { dg-skip-if "" { *-*-* } { "*" } { "-O0" "-O2" } }
// { dg-skip-if "" { *-*-* } { "-flto" } { "" } }
@@ -698,12 +698,17 @@ f4 (char *x, char **y, int z, char w[64])
strncat (w, s2, sizeof (w)); // { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" }
stpncpy (w, s1, sizeof (w)); // { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" }
- // These are correct, no warning.
const char s3[] = "foobarbaz";
const char s4[] = "abcde12345678";
- strncpy (x, s3, sizeof (s3));
- strncat (x, s4, sizeof (s4));
- stpncpy (x, s3, sizeof (s3));
+
+ // These are pointless when the destination is large enough, and
+ // cause overflow otherwise. They might as well be replaced by
+ // strcpy() or memcpy().
+ strncpy (x, s3, sizeof (s3)); // { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" }
+ strncat (x, s4, sizeof (s4)); // { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" }
+ stpncpy (x, s3, sizeof (s3)); // { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" }
+
+ // These are safe, no warning.
y[1] = strndup (s3, sizeof (s3));
z += strncmp (s3, s4, sizeof (s3));
z += strncmp (s3, s4, sizeof (s4));
diff --git a/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess2.C b/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess2.C
index a216f470333..f2c864b0b24 100644
--- a/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess2.C
+++ b/gcc/testsuite/g++.dg/torture/Wsizeof-pointer-memaccess2.C
@@ -1,6 +1,6 @@
// Test -Wsizeof-pointer-memaccess warnings.
// { dg-do compile }
-// { dg-options "-Wall -Wno-sizeof-array-argument -Wno-stringop-overflow" }
+// { dg-options "-Wall -Wno-sizeof-array-argument -Wno-stringop-overflow -Wno-stringop-truncation" }
// Test just twice, once with -O0 non-fortified, once with -O2 fortified,
// suppressing buffer overflow warnings.
// { dg-skip-if "" { *-*-* } { "*" } { "-O0" "-O2" } }
@@ -703,12 +703,13 @@ f4 (char *x, char **y, int z, char w[64])
strncat (w, s2, sizeof (w)); // { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" }
stpncpy (w, s1, sizeof (w)); // { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" }
- // These are correct, no warning.
const char s3[] = "foobarbaz";
const char s4[] = "abcde12345678";
- strncpy (x, s3, sizeof (s3));
- strncat (x, s4, sizeof (s4));
- stpncpy (x, s3, sizeof (s3));
+ strncpy (x, s3, sizeof (s3)); // { dg-warning "call is the same expression as the source; did you mean to use the size of the destination" }
+ strncat (x, s4, sizeof (s4)); // { dg-warning "call is the same expression as the source; did you mean to use the size of the destination" }
+ stpncpy (x, s3, sizeof (s3)); // { dg-warning "call is the same expression as the source; did you mean to use the size of the destination" }
+
+ // These are safe, no warning.
y[1] = strndup (s3, sizeof (s3));
z += strncmp (s3, s4, sizeof (s3));
z += strncmp (s3, s4, sizeof (s4));
diff --git a/gcc/testsuite/g++.dg/torture/pr33134.C b/gcc/testsuite/g++.dg/torture/pr33134.C
index 43482c7fc04..64e678c6a98 100644
--- a/gcc/testsuite/g++.dg/torture/pr33134.C
+++ b/gcc/testsuite/g++.dg/torture/pr33134.C
@@ -18,4 +18,5 @@ bool fxsaveGIF (FXStream &store)
c1 = 0x80;
c1 |= (bitsperpixel - 1) << 4;
store << c1;
+ return true;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr33340.C b/gcc/testsuite/g++.dg/torture/pr33340.C
index bac882156b5..44d3c8e0d0f 100644
--- a/gcc/testsuite/g++.dg/torture/pr33340.C
+++ b/gcc/testsuite/g++.dg/torture/pr33340.C
@@ -1,4 +1,6 @@
-void* operator new(__SIZE_TYPE__, void* __p) { }
+// { dg-additional-options "-Wno-return-type" }
+
+void* operator new(__SIZE_TYPE__, void* __p) {}
struct auto_ptr {
int* p;
diff --git a/gcc/testsuite/g++.dg/torture/pr33627.C b/gcc/testsuite/g++.dg/torture/pr33627.C
index a14e345517f..9265bd95f05 100644
--- a/gcc/testsuite/g++.dg/torture/pr33627.C
+++ b/gcc/testsuite/g++.dg/torture/pr33627.C
@@ -10,9 +10,9 @@ class PX_ChangeRecord;
class pf_Frag {
public:
typedef enum _PFType { PFT_Object } PFType;
- inline PFType getType(void) const { }
- inline pf_Frag * getNext(void) const { }
- PT_DocPosition getPos(void) const { }
+ inline PFType getType(void) const { return PFType(); }
+ inline pf_Frag * getNext(void) const { return 0; }
+ PT_DocPosition getPos(void) const { return PT_DocPosition(); }
};
class pf_Fragments {
public:
diff --git a/gcc/testsuite/g++.dg/torture/pr34222.C b/gcc/testsuite/g++.dg/torture/pr34222.C
index 130896dc9da..dfcb5d7a952 100644
--- a/gcc/testsuite/g++.dg/torture/pr34222.C
+++ b/gcc/testsuite/g++.dg/torture/pr34222.C
@@ -47,6 +47,7 @@ using namespace std;
static float readFloat(ifstream& in) {
float f;
in.read((char*) &f, sizeof(float));
+ return f;
}
Mat4f readMeshMatrix(ifstream& in, int nBytes) {
float m00 = readFloat(in);
diff --git a/gcc/testsuite/g++.dg/torture/pr34241.C b/gcc/testsuite/g++.dg/torture/pr34241.C
index 70f186c5b9f..0ea9cfc1a0c 100644
--- a/gcc/testsuite/g++.dg/torture/pr34241.C
+++ b/gcc/testsuite/g++.dg/torture/pr34241.C
@@ -15,4 +15,5 @@ struct A
A test ()
{
const A a (42, true);
+ return a;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr34641.C b/gcc/testsuite/g++.dg/torture/pr34641.C
index 0cf50776281..72a98ad8f46 100644
--- a/gcc/testsuite/g++.dg/torture/pr34641.C
+++ b/gcc/testsuite/g++.dg/torture/pr34641.C
@@ -2,6 +2,7 @@
// { dg-require-effective-target fpic }
// { dg-require-visibility "" }
// { dg-options "-fPIC" }
+/* { dg-additional-options "-Wno-return-type" } */
typedef __SIZE_TYPE__ size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr34850.C b/gcc/testsuite/g++.dg/torture/pr34850.C
index c33dbfb5c93..e41620b739d 100644
--- a/gcc/testsuite/g++.dg/torture/pr34850.C
+++ b/gcc/testsuite/g++.dg/torture/pr34850.C
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-skip-if "" { *-*-* } { "-O0" } { "" } } */
/* { dg-options "-ffat-lto-objects" } */
+/* { dg-additional-options "-Wno-return-type" } */
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
@@ -12,7 +13,7 @@ extern "C" {
extern __inline __attribute__ ((__always_inline__)) __attribute__ ((__gnu_inline__, __artificial__))
void * memset (void *__dest, int __ch, size_t __len) throw () {
if (__builtin_constant_p (__len) && __len == 0)
- __warn_memset_zero_len (); /* { dg-warning "declared with attribute warning" } */
+ __warn_memset_zero_len ();
}
}
inline void clear_mem(void* ptr, u32bit n) {
diff --git a/gcc/testsuite/g++.dg/torture/pr35164-1.C b/gcc/testsuite/g++.dg/torture/pr35164-1.C
index 1704c222656..faa829f5f6f 100644
--- a/gcc/testsuite/g++.dg/torture/pr35164-1.C
+++ b/gcc/testsuite/g++.dg/torture/pr35164-1.C
@@ -1,3 +1,5 @@
+/* { dg-additional-options "-Wno-return-type" } */
+
typedef __SIZE_TYPE__ size_t;
template<typename _Iterator, typename _Container> class __normal_iterator {
public:
diff --git a/gcc/testsuite/g++.dg/torture/pr36745.C b/gcc/testsuite/g++.dg/torture/pr36745.C
index 53845aaa78e..56fa9d8028d 100644
--- a/gcc/testsuite/g++.dg/torture/pr36745.C
+++ b/gcc/testsuite/g++.dg/torture/pr36745.C
@@ -1,6 +1,7 @@
/* PR target/36745 */
/* { dg-do compile } */
/* { dg-options "-O2 -fPIC" } */
+/* { dg-additional-options "-Wno-return-type" } */
/* { dg-require-effective-target fpic } */
typedef __SIZE_TYPE__ size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr38705.C b/gcc/testsuite/g++.dg/torture/pr38705.C
index 8058d3a3979..07dec5594ca 100644
--- a/gcc/testsuite/g++.dg/torture/pr38705.C
+++ b/gcc/testsuite/g++.dg/torture/pr38705.C
@@ -24,4 +24,5 @@ S::bar () const
{
foo (u);
foo (t);
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr38811.C b/gcc/testsuite/g++.dg/torture/pr38811.C
index e9b304da6e5..5ced0781497 100644
--- a/gcc/testsuite/g++.dg/torture/pr38811.C
+++ b/gcc/testsuite/g++.dg/torture/pr38811.C
@@ -17,7 +17,7 @@ public:
AbcExtent2d(const AbcA2d & rMin, const AbcA2d & rMax);
AbcA2d ClampPoint2d(const AbcA2d & rPoint) const;
AbcA2d GetMax() const { return m_vMax; }
- AbcA2d GetMin() const { }
+ AbcA2d GetMin() const { return AbcA2d(); }
AbcA2d Evaluate(double dNormalizedX, double dNormalizedY) const;
};
inline AbcExtent2d::AbcExtent2d(const AbcA2d & rMin, const AbcA2d & rMax)
@@ -69,5 +69,7 @@ long AbcAbcdTracer::TestIsoAbcde(AbcZyParamType eZyParam, double dParam,
if (!DoesPointLieOnAbcde(sUV,0))
;
}
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr39362.C b/gcc/testsuite/g++.dg/torture/pr39362.C
index 554f9d06bac..6599fdd70c0 100644
--- a/gcc/testsuite/g++.dg/torture/pr39362.C
+++ b/gcc/testsuite/g++.dg/torture/pr39362.C
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
void *fastMalloc (int n);
void fastFree (void *p);
diff --git a/gcc/testsuite/g++.dg/torture/pr39732.C b/gcc/testsuite/g++.dg/torture/pr39732.C
index 4b3975b8db9..44edbf5ba90 100644
--- a/gcc/testsuite/g++.dg/torture/pr39732.C
+++ b/gcc/testsuite/g++.dg/torture/pr39732.C
@@ -27,4 +27,5 @@ int f(void)
{
basic_ostream<char, char_traits<char> > os;
copy(ostream_iterator<const int>(os, ","));
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr40991.C b/gcc/testsuite/g++.dg/torture/pr40991.C
index c08124024a7..bce5f7e6893 100644
--- a/gcc/testsuite/g++.dg/torture/pr40991.C
+++ b/gcc/testsuite/g++.dg/torture/pr40991.C
@@ -1,4 +1,5 @@
/* { dg-options "-std=gnu++0x" } */
+/* { dg-additional-options "-Wno-return-type" } */
typedef __SIZE_TYPE__ size_t;
namespace std __attribute__ ((__visibility__ ("default"))) {
diff --git a/gcc/testsuite/g++.dg/torture/pr41775.C b/gcc/testsuite/g++.dg/torture/pr41775.C
index 3d8548e3fb0..ca24abd0229 100644
--- a/gcc/testsuite/g++.dg/torture/pr41775.C
+++ b/gcc/testsuite/g++.dg/torture/pr41775.C
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
/* { dg-require-visibility "" } */
typedef unsigned int size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr42183.C b/gcc/testsuite/g++.dg/torture/pr42183.C
index 375b37f0c66..2ae415f0b5a 100644
--- a/gcc/testsuite/g++.dg/torture/pr42183.C
+++ b/gcc/testsuite/g++.dg/torture/pr42183.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
class IntSize {
public:
diff --git a/gcc/testsuite/g++.dg/torture/pr42450.C b/gcc/testsuite/g++.dg/torture/pr42450.C
index f630fa2b7e2..5813acb5cb4 100644
--- a/gcc/testsuite/g++.dg/torture/pr42450.C
+++ b/gcc/testsuite/g++.dg/torture/pr42450.C
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+// { dg-additional-options "-Wno-return-type" }
template < typename > class basic_stringstream;
diff --git a/gcc/testsuite/g++.dg/torture/pr42704.C b/gcc/testsuite/g++.dg/torture/pr42704.C
index 735b1e7bdea..d1cbc8225cd 100644
--- a/gcc/testsuite/g++.dg/torture/pr42704.C
+++ b/gcc/testsuite/g++.dg/torture/pr42704.C
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
typedef int PRInt32;
class nsTreeRows {
diff --git a/gcc/testsuite/g++.dg/torture/pr42760.C b/gcc/testsuite/g++.dg/torture/pr42760.C
index be85f7fc408..084fcfdfa68 100644
--- a/gcc/testsuite/g++.dg/torture/pr42760.C
+++ b/gcc/testsuite/g++.dg/torture/pr42760.C
@@ -16,6 +16,7 @@ baz (T x, T y, U z)
*z = *x;
++z;
}
+ return U();
};
template <typename T, typename U>
@@ -23,6 +24,7 @@ U
bar (T x, T y, U z)
{
baz (A <T>::b (x), A <T>::b (y), A <U>::b (z));
+ return U();
}
struct C
diff --git a/gcc/testsuite/g++.dg/torture/pr42773.C b/gcc/testsuite/g++.dg/torture/pr42773.C
index 478ad278aa6..5d887cefed1 100644
--- a/gcc/testsuite/g++.dg/torture/pr42773.C
+++ b/gcc/testsuite/g++.dg/torture/pr42773.C
@@ -51,4 +51,5 @@ class Cell {
};
QValueList<Cell*> Cell::obscuringCells() const {
QValueList<Cell*> empty;
+ return QValueList<Cell*>();
}
diff --git a/gcc/testsuite/g++.dg/torture/pr42883.C b/gcc/testsuite/g++.dg/torture/pr42883.C
index f164c3781f5..ad2ae251637 100644
--- a/gcc/testsuite/g++.dg/torture/pr42883.C
+++ b/gcc/testsuite/g++.dg/torture/pr42883.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
typedef __SIZE_TYPE__ size_t;
namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
diff --git a/gcc/testsuite/g++.dg/torture/pr43905.C b/gcc/testsuite/g++.dg/torture/pr43905.C
index 0e49a32a1dd..259ea785041 100644
--- a/gcc/testsuite/g++.dg/torture/pr43905.C
+++ b/gcc/testsuite/g++.dg/torture/pr43905.C
@@ -2,9 +2,11 @@ extern void sf ( __const char *);
struct Matrix{
int operator[](int n){
sf ( __PRETTY_FUNCTION__);
+ return 0;
}
int operator[](int n)const{
sf ( __PRETTY_FUNCTION__);
+ return 0;
}
};
void calcmy(Matrix const &b, Matrix &c, int k){
diff --git a/gcc/testsuite/g++.dg/torture/pr44148.C b/gcc/testsuite/g++.dg/torture/pr44148.C
index a60ba9aa3a6..cebfccba5d6 100644
--- a/gcc/testsuite/g++.dg/torture/pr44148.C
+++ b/gcc/testsuite/g++.dg/torture/pr44148.C
@@ -2,6 +2,7 @@
// { dg-do compile }
// { dg-options "" }
// { dg-options "-fpic" { target fpic } }
+// { dg-additional-options "-Wno-return-type" }
template <typename T> struct S2
{
diff --git a/gcc/testsuite/g++.dg/torture/pr44295.C b/gcc/testsuite/g++.dg/torture/pr44295.C
index 8169bb0a873..7525dc41a78 100644
--- a/gcc/testsuite/g++.dg/torture/pr44295.C
+++ b/gcc/testsuite/g++.dg/torture/pr44295.C
@@ -1,4 +1,6 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
+
extern "C" {
typedef __SIZE_TYPE__ size_t;
typedef struct {
diff --git a/gcc/testsuite/g++.dg/torture/pr44357.C b/gcc/testsuite/g++.dg/torture/pr44357.C
index 3380350e81b..2ffb6d07f7e 100644
--- a/gcc/testsuite/g++.dg/torture/pr44357.C
+++ b/gcc/testsuite/g++.dg/torture/pr44357.C
@@ -1,4 +1,6 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
+
extern "C"
{
typedef long unsigned int size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr44813.C b/gcc/testsuite/g++.dg/torture/pr44813.C
index 1dc01b06a17..6ec1b60adc4 100644
--- a/gcc/testsuite/g++.dg/torture/pr44813.C
+++ b/gcc/testsuite/g++.dg/torture/pr44813.C
@@ -1,3 +1,5 @@
+/* { dg-additional-options "-Wno-return-type" } */
+
typedef unsigned int PRUint32;
typedef int PRInt32;
typedef unsigned long PRUint64;
diff --git a/gcc/testsuite/g++.dg/torture/pr45580.C b/gcc/testsuite/g++.dg/torture/pr45580.C
index c3af4910aeb..5728c0382bb 100644
--- a/gcc/testsuite/g++.dg/torture/pr45580.C
+++ b/gcc/testsuite/g++.dg/torture/pr45580.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std {
typedef __SIZE_TYPE__ size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr45874.C b/gcc/testsuite/g++.dg/torture/pr45874.C
index 70965ff574e..37c2ccc8a3a 100644
--- a/gcc/testsuite/g++.dg/torture/pr45874.C
+++ b/gcc/testsuite/g++.dg/torture/pr45874.C
@@ -48,6 +48,7 @@ Status Mpeg2FrameConstructor::ParsePictureHeader(Ipp8u *buf, Ipp32s iLen, Mpeg2T
bs.SkipBits(10);
}
}
+ return Status();
}
void BitstreamReader::SkipBits(Ipp32s iNum) {
if (iNum <= m_iReadyBits) {
diff --git a/gcc/testsuite/g++.dg/torture/pr45877.C b/gcc/testsuite/g++.dg/torture/pr45877.C
index 9af6ae99985..1754fb5cb5c 100644
--- a/gcc/testsuite/g++.dg/torture/pr45877.C
+++ b/gcc/testsuite/g++.dg/torture/pr45877.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std __attribute__ ((__visibility__ ("default")))
{
diff --git a/gcc/testsuite/g++.dg/torture/pr46383.C b/gcc/testsuite/g++.dg/torture/pr46383.C
index e4810c5ada8..ee00f358ea2 100644
--- a/gcc/testsuite/g++.dg/torture/pr46383.C
+++ b/gcc/testsuite/g++.dg/torture/pr46383.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std {
template<class,class>struct pair{};
diff --git a/gcc/testsuite/g++.dg/torture/pr46469.C b/gcc/testsuite/g++.dg/torture/pr46469.C
index 8212ea4f9e2..c16a0f277a7 100644
--- a/gcc/testsuite/g++.dg/torture/pr46469.C
+++ b/gcc/testsuite/g++.dg/torture/pr46469.C
@@ -1,5 +1,6 @@
extern "C" __inline __attribute__ ((__gnu_inline__)) int pthread_equal ()
{
+ return 0;
}
static
@@ -10,4 +11,5 @@ static
int identifierByPthreadHandle ()
{
pthread_equal ();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr47313.C b/gcc/testsuite/g++.dg/torture/pr47313.C
index c10f558a3eb..787445ed595 100644
--- a/gcc/testsuite/g++.dg/torture/pr47313.C
+++ b/gcc/testsuite/g++.dg/torture/pr47313.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace internal {
template < class DSC, bool Const > struct CC_iterator {
diff --git a/gcc/testsuite/g++.dg/torture/pr48271.C b/gcc/testsuite/g++.dg/torture/pr48271.C
index 5b60ccd768c..2e3eb5bfd05 100644
--- a/gcc/testsuite/g++.dg/torture/pr48271.C
+++ b/gcc/testsuite/g++.dg/torture/pr48271.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-ftree-vrp -fno-guess-branch-probability -fnon-call-exceptions" }
+// { dg-additional-options "-Wno-return-type" }
void *xalloc ();
void xfree (void *);
diff --git a/gcc/testsuite/g++.dg/torture/pr48695.C b/gcc/testsuite/g++.dg/torture/pr48695.C
index 44e6c771dba..2f2953d9999 100644
--- a/gcc/testsuite/g++.dg/torture/pr48695.C
+++ b/gcc/testsuite/g++.dg/torture/pr48695.C
@@ -1,4 +1,5 @@
// { dg-do run }
+/* { dg-options "-fcheck-new" } */
typedef __SIZE_TYPE__ size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr49615.C b/gcc/testsuite/g++.dg/torture/pr49615.C
index 98a2f95b8b3..bc5182b2716 100644
--- a/gcc/testsuite/g++.dg/torture/pr49615.C
+++ b/gcc/testsuite/g++.dg/torture/pr49615.C
@@ -5,6 +5,7 @@ template <class T>
static inline bool Dispatch (T* obj, void (T::*func) ())
{
(obj->*func) ();
+ return true;
}
class C
{
@@ -21,6 +22,8 @@ bool C::f (int n)
case 1:
b = Dispatch (this, &C::g);
}
+
+ return true;
}
void C::g ()
{
diff --git a/gcc/testsuite/g++.dg/torture/pr49770.C b/gcc/testsuite/g++.dg/torture/pr49770.C
index 7eac9e0d9bb..794bf3dba19 100644
--- a/gcc/testsuite/g++.dg/torture/pr49770.C
+++ b/gcc/testsuite/g++.dg/torture/pr49770.C
@@ -79,8 +79,10 @@ test01 ()
__builtin_abort ();
}
+int
main ()
{
test01 ();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr49938.C b/gcc/testsuite/g++.dg/torture/pr49938.C
index 91804f4b0d5..d2c73e73871 100644
--- a/gcc/testsuite/g++.dg/torture/pr49938.C
+++ b/gcc/testsuite/g++.dg/torture/pr49938.C
@@ -43,6 +43,8 @@ namespace net {
}
scoped_array<unsigned char> signed_data(new unsigned
char[signed_data_len]);
+
+ return true;
}
}
diff --git a/gcc/testsuite/g++.dg/torture/pr51436.C b/gcc/testsuite/g++.dg/torture/pr51436.C
index 43d6c730032..b01a33ca243 100644
--- a/gcc/testsuite/g++.dg/torture/pr51436.C
+++ b/gcc/testsuite/g++.dg/torture/pr51436.C
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
typedef __SIZE_TYPE__ size_t;
extern "C" void *memcpy (void *, __const void *, size_t);
diff --git a/gcc/testsuite/g++.dg/torture/pr51482.C b/gcc/testsuite/g++.dg/torture/pr51482.C
index 28435919f46..4032703f720 100644
--- a/gcc/testsuite/g++.dg/torture/pr51482.C
+++ b/gcc/testsuite/g++.dg/torture/pr51482.C
@@ -27,4 +27,5 @@ WVECT * anim_track_bez_wvect::tangent(int kn, BEZIER_KEY_CLASS key_class, WVECT
p_tn->y = (g1.y + g3.y*bp1)*tn1;
p_tn->z = (g1.z + g3.z*bp1)*tn1;
p_tn->w = (g1.w + g3.w*bp1)*tn1;
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr51737.C b/gcc/testsuite/g++.dg/torture/pr51737.C
index ff77edbf3d3..90edafffc88 100644
--- a/gcc/testsuite/g++.dg/torture/pr51737.C
+++ b/gcc/testsuite/g++.dg/torture/pr51737.C
@@ -20,6 +20,7 @@ struct id_state {
};
void * id_state::start_file(void) {
intrusive_ptr<file_info> parent;
+ return 0;
}
struct id_generation_data : intrusive_base<id_generation_data> {
void child_length() const {}
diff --git a/gcc/testsuite/g++.dg/torture/pr51959.C b/gcc/testsuite/g++.dg/torture/pr51959.C
index da0be71ebd8..1c98f4f6cc8 100644
--- a/gcc/testsuite/g++.dg/torture/pr51959.C
+++ b/gcc/testsuite/g++.dg/torture/pr51959.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std {
typedef __SIZE_TYPE__ size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr52772.C b/gcc/testsuite/g++.dg/torture/pr52772.C
index 810e6579fba..0109f137676 100644
--- a/gcc/testsuite/g++.dg/torture/pr52772.C
+++ b/gcc/testsuite/g++.dg/torture/pr52772.C
@@ -46,7 +46,7 @@ class c5 : public c2 {
};
class c6 {
- public: int get() const {};
+ public: int get() const { return 0; };
};
class c7 {
diff --git a/gcc/testsuite/g++.dg/torture/pr52918-2.C b/gcc/testsuite/g++.dg/torture/pr52918-2.C
index ba31295e41e..185444bdd0e 100644
--- a/gcc/testsuite/g++.dg/torture/pr52918-2.C
+++ b/gcc/testsuite/g++.dg/torture/pr52918-2.C
@@ -37,4 +37,5 @@ void * __cxa_allocate_exception(size_t thrown_size) throw()
void *ret;
if (! ret)
__scoped_lock sentry(emergency_mutex);
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr53011.C b/gcc/testsuite/g++.dg/torture/pr53011.C
index 2cd8a60332d..4882f23b19e 100644
--- a/gcc/testsuite/g++.dg/torture/pr53011.C
+++ b/gcc/testsuite/g++.dg/torture/pr53011.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
extern "C" class WvFastString;
typedef WvFastString& WvStringParm;
diff --git a/gcc/testsuite/g++.dg/torture/pr53602.C b/gcc/testsuite/g++.dg/torture/pr53602.C
index 1bb9cf4faaf..67d9ed848f0 100644
--- a/gcc/testsuite/g++.dg/torture/pr53602.C
+++ b/gcc/testsuite/g++.dg/torture/pr53602.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-std=c++11" }
+// { dg-additional-options "-Wno-return-type" }
namespace std
{
diff --git a/gcc/testsuite/g++.dg/torture/pr53752.C b/gcc/testsuite/g++.dg/torture/pr53752.C
index fd6c687667c..61febdeb6b8 100644
--- a/gcc/testsuite/g++.dg/torture/pr53752.C
+++ b/gcc/testsuite/g++.dg/torture/pr53752.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-g" }
+// { dg-additional-options "-Wno-return-type" }
typedef unsigned int uint32_t;
typedef unsigned long int uint64_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr54838.C b/gcc/testsuite/g++.dg/torture/pr54838.C
index 6d34d57a486..c3308210ebe 100644
--- a/gcc/testsuite/g++.dg/torture/pr54838.C
+++ b/gcc/testsuite/g++.dg/torture/pr54838.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-ftracer -fno-tree-dce -fno-tree-sra" }
+// { dg-additional-options "-Wno-return-type" }
struct bidirectional_iterator_tag
{};
diff --git a/gcc/testsuite/g++.dg/torture/pr54902.C b/gcc/testsuite/g++.dg/torture/pr54902.C
index 790ffe5fcb5..84707bd7136 100644
--- a/gcc/testsuite/g++.dg/torture/pr54902.C
+++ b/gcc/testsuite/g++.dg/torture/pr54902.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std __attribute__ ((__visibility__ ("default"))) {
template<typename _Iterator> struct iterator_traits {
diff --git a/gcc/testsuite/g++.dg/torture/pr56029.C b/gcc/testsuite/g++.dg/torture/pr56029.C
index ca4a82a0060..72ad59b4354 100644
--- a/gcc/testsuite/g++.dg/torture/pr56029.C
+++ b/gcc/testsuite/g++.dg/torture/pr56029.C
@@ -1,5 +1,6 @@
// PR tree-optimization/56029
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
template <class T>
struct DefaultDeleter
diff --git a/gcc/testsuite/g++.dg/torture/pr56768.C b/gcc/testsuite/g++.dg/torture/pr56768.C
index db504c500e0..b2be74f28e4 100644
--- a/gcc/testsuite/g++.dg/torture/pr56768.C
+++ b/gcc/testsuite/g++.dg/torture/pr56768.C
@@ -7,7 +7,7 @@ struct Iter
void operator++ ();
};
-bool operator!= (Iter &, Iter &) { }
+bool operator!= (Iter &, Iter &) { return true; }
struct Container
{
diff --git a/gcc/testsuite/g++.dg/torture/pr57107.C b/gcc/testsuite/g++.dg/torture/pr57107.C
index 516dec16fc5..4dbd32bd298 100644
--- a/gcc/testsuite/g++.dg/torture/pr57107.C
+++ b/gcc/testsuite/g++.dg/torture/pr57107.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
typedef long unsigned int size_t;
namespace std {
diff --git a/gcc/testsuite/g++.dg/torture/pr57140.C b/gcc/testsuite/g++.dg/torture/pr57140.C
index 2ea2f9c4d1b..654653d53aa 100644
--- a/gcc/testsuite/g++.dg/torture/pr57140.C
+++ b/gcc/testsuite/g++.dg/torture/pr57140.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std {
typedef long unsigned int size_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr57235.C b/gcc/testsuite/g++.dg/torture/pr57235.C
index fd5663348e9..8a9e522a36a 100644
--- a/gcc/testsuite/g++.dg/torture/pr57235.C
+++ b/gcc/testsuite/g++.dg/torture/pr57235.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std
{
diff --git a/gcc/testsuite/g++.dg/torture/pr58252.C b/gcc/testsuite/g++.dg/torture/pr58252.C
index d38a7a7ea4b..db3270d9afb 100644
--- a/gcc/testsuite/g++.dg/torture/pr58252.C
+++ b/gcc/testsuite/g++.dg/torture/pr58252.C
@@ -1,5 +1,6 @@
// { dg-do compile }
// { dg-options "-fpermissive" }
+// { dg-additional-options "-Wno-return-type" }
typedef long unsigned int size_t;
typedef bool _CORBA_Boolean;
typedef unsigned int _CORBA_ULong;
diff --git a/gcc/testsuite/g++.dg/torture/pr58555.C b/gcc/testsuite/g++.dg/torture/pr58555.C
index ac5009a7b26..454d33d8740 100644
--- a/gcc/testsuite/g++.dg/torture/pr58555.C
+++ b/gcc/testsuite/g++.dg/torture/pr58555.C
@@ -1,4 +1,6 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
+
template <typename _Tp> _Tp *__addressof(_Tp &) {}
template <typename _Tp> class A {
public:
diff --git a/gcc/testsuite/g++.dg/torture/pr59208.C b/gcc/testsuite/g++.dg/torture/pr59208.C
index 3dc110c55e3..2b2ad6deea5 100644
--- a/gcc/testsuite/g++.dg/torture/pr59208.C
+++ b/gcc/testsuite/g++.dg/torture/pr59208.C
@@ -14,7 +14,7 @@ enum DebuggerType {};
C a;
DebuggerType b;
void operator==(A &, const A &);
-static A get_dbx_doc(A &p1) { p1 == 0; }
+static A get_dbx_doc(A &p1) { p1 == 0; return A(); }
void add_button() {
A c;
diff --git a/gcc/testsuite/g++.dg/torture/pr60438-1.C b/gcc/testsuite/g++.dg/torture/pr60438-1.C
index 748295aabe0..3f79dd770eb 100644
--- a/gcc/testsuite/g++.dg/torture/pr60438-1.C
+++ b/gcc/testsuite/g++.dg/torture/pr60438-1.C
@@ -23,4 +23,6 @@ foo (struct C *y, float x)
d.a = (int) (b * x);
}
baz (&d);
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr60746.C b/gcc/testsuite/g++.dg/torture/pr60746.C
index 7ce6ebe6bc0..941e42ca783 100644
--- a/gcc/testsuite/g++.dg/torture/pr60746.C
+++ b/gcc/testsuite/g++.dg/torture/pr60746.C
@@ -20,4 +20,6 @@ Two::run ()
|| list_arry[4][orig].getSize () > 0)
{
}
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr61554.C b/gcc/testsuite/g++.dg/torture/pr61554.C
index 6f609c8be09..90f8e85e303 100644
--- a/gcc/testsuite/g++.dg/torture/pr61554.C
+++ b/gcc/testsuite/g++.dg/torture/pr61554.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
struct A
{
diff --git a/gcc/testsuite/g++.dg/torture/pr63419.C b/gcc/testsuite/g++.dg/torture/pr63419.C
index 6d4c0745d41..88b117883b7 100644
--- a/gcc/testsuite/g++.dg/torture/pr63419.C
+++ b/gcc/testsuite/g++.dg/torture/pr63419.C
@@ -1,5 +1,5 @@
// { dg-do compile }
-// { dg-additional-options "-Wno-psabi" }
+// { dg-additional-options "-Wno-psabi -Wno-return-type" }
// Ignore warning on some powerpc-linux configurations.
// { dg-prune-output "non-standard ABI extension" }
diff --git a/gcc/testsuite/g++.dg/torture/pr63476.C b/gcc/testsuite/g++.dg/torture/pr63476.C
index 75ecc374065..c0c8ae44e85 100644
--- a/gcc/testsuite/g++.dg/torture/pr63476.C
+++ b/gcc/testsuite/g++.dg/torture/pr63476.C
@@ -1,5 +1,5 @@
// { dg-do compile }
-// { dg-additional-options "-std=gnu++11" }
+// { dg-additional-options "-std=gnu++11 -Wno-return-type" }
enum class nsresult;
class A;
diff --git a/gcc/testsuite/g++.dg/torture/pr63512.C b/gcc/testsuite/g++.dg/torture/pr63512.C
index 1c1899dc6d8..d08e6174168 100644
--- a/gcc/testsuite/g++.dg/torture/pr63512.C
+++ b/gcc/testsuite/g++.dg/torture/pr63512.C
@@ -43,4 +43,5 @@ C::m_fn3 (const int &, int &, int **)
if (a)
c.Range.m_fn1 ();
m_fn2 (semi, 0, b ? "" : a ? "alias declaration" : "using declaration");
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr64282.C b/gcc/testsuite/g++.dg/torture/pr64282.C
index 48429884e9f..9696effc38d 100644
--- a/gcc/testsuite/g++.dg/torture/pr64282.C
+++ b/gcc/testsuite/g++.dg/torture/pr64282.C
@@ -53,7 +53,7 @@ public:
class H
{
void m_fn7 (const F &, bool &);
- bool m_fn8 (const D<F> &, const F &, F &);
+ void m_fn8 (const D<F> &, const F &, F &);
};
typedef A<int> CandPair;
class I
@@ -91,7 +91,7 @@ H::m_fn7 (const F &, bool &)
F h;
m_fn8 (g, f.first, h);
}
-bool
+void
H::m_fn8 (const D<F> &p1, const F &, F &)
{
F i;
diff --git a/gcc/testsuite/g++.dg/torture/pr64378.C b/gcc/testsuite/g++.dg/torture/pr64378.C
index 6770601eaf2..4d48cdc0946 100644
--- a/gcc/testsuite/g++.dg/torture/pr64378.C
+++ b/gcc/testsuite/g++.dg/torture/pr64378.C
@@ -5,7 +5,7 @@ struct data {
};
struct top {
- virtual int topf() {}
+ virtual int topf() { return 0; }
};
struct child1: top {
diff --git a/gcc/testsuite/g++.dg/torture/pr64565.C b/gcc/testsuite/g++.dg/torture/pr64565.C
index 42b0239c525..ea5ca14e23a 100644
--- a/gcc/testsuite/g++.dg/torture/pr64565.C
+++ b/gcc/testsuite/g++.dg/torture/pr64565.C
@@ -1,4 +1,6 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
+
typedef enum
{
NS_OK
diff --git a/gcc/testsuite/g++.dg/torture/pr64568-2.C b/gcc/testsuite/g++.dg/torture/pr64568-2.C
index 05782179008..5881b854d6a 100644
--- a/gcc/testsuite/g++.dg/torture/pr64568-2.C
+++ b/gcc/testsuite/g++.dg/torture/pr64568-2.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
namespace std
{
diff --git a/gcc/testsuite/g++.dg/torture/pr64669.C b/gcc/testsuite/g++.dg/torture/pr64669.C
index b207739e6d3..a4e7d3a8a28 100644
--- a/gcc/testsuite/g++.dg/torture/pr64669.C
+++ b/gcc/testsuite/g++.dg/torture/pr64669.C
@@ -60,4 +60,6 @@ Lex::advance_one_char (const char *p, bool is_single_quote,
&& (*value == '\'' || *value == '\n') && !issued_error)
error_at (this->location (), "invalid character literal");
}
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr64686.C b/gcc/testsuite/g++.dg/torture/pr64686.C
index 714aa4100c5..9eb54160eba 100644
--- a/gcc/testsuite/g++.dg/torture/pr64686.C
+++ b/gcc/testsuite/g++.dg/torture/pr64686.C
@@ -15,5 +15,6 @@ A *
B::m_fn1 (int *) const
{
new B (m_fn2 (0)->m_fn1 (0), 0, m_fn2 (0)->m_fn1 (0));
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr64978.C b/gcc/testsuite/g++.dg/torture/pr64978.C
index a9ac49f8e12..3e45be6f09c 100644
--- a/gcc/testsuite/g++.dg/torture/pr64978.C
+++ b/gcc/testsuite/g++.dg/torture/pr64978.C
@@ -27,4 +27,4 @@ void C::m_fn3(A, unsigned, const int *, int &) {
1 ? VTables.m_fn2() : 0;
}
void B::m_fn1(bool, const int *, int &) { C(); }
-unsigned B::m_fn2() { m_fn1(0, 0, a); }
+unsigned B::m_fn2() { m_fn1(0, 0, a); return 0; }
diff --git a/gcc/testsuite/g++.dg/torture/pr64995.C b/gcc/testsuite/g++.dg/torture/pr64995.C
index f5b97cf371b..f2e1b1b3748 100644
--- a/gcc/testsuite/g++.dg/torture/pr64995.C
+++ b/gcc/testsuite/g++.dg/torture/pr64995.C
@@ -20,6 +20,7 @@ inline int *A::m_fn2() {
a = acos(c);
double d = m_fn1(b);
acos(d);
+ return 0;
}
void passTime() {
diff --git a/gcc/testsuite/g++.dg/torture/pr65655.C b/gcc/testsuite/g++.dg/torture/pr65655.C
index 8dfc13b7d7d..ea07d2e976b 100644
--- a/gcc/testsuite/g++.dg/torture/pr65655.C
+++ b/gcc/testsuite/g++.dg/torture/pr65655.C
@@ -1,6 +1,6 @@
/* { dg-do compile } */
// { dg-timeout-factor 2.0 }
-// { dg-additional-options "-std=c++11 -fsanitize=undefined -O2" }
+// { dg-additional-options "-std=c++11 -fsanitize=undefined -O2 -Wno-return-type" }
class ECoordinate { };
class EPoint {
public:
diff --git a/gcc/testsuite/g++.dg/torture/pr65851.C b/gcc/testsuite/g++.dg/torture/pr65851.C
index 6efe8861a19..81f3184296c 100644
--- a/gcc/testsuite/g++.dg/torture/pr65851.C
+++ b/gcc/testsuite/g++.dg/torture/pr65851.C
@@ -16,6 +16,7 @@ class C : A {
public:
unsigned long write(const char *p1, unsigned long p2) {
m_string.push_range(p1 + p2);
+ return 0;
}
};
char *write_signed_decimal_backward(bool) {
diff --git a/gcc/testsuite/g++.dg/torture/pr67055.C b/gcc/testsuite/g++.dg/torture/pr67055.C
index 7cbbca275c2..6621e58136f 100644
--- a/gcc/testsuite/g++.dg/torture/pr67055.C
+++ b/gcc/testsuite/g++.dg/torture/pr67055.C
@@ -15,6 +15,7 @@ namespace vespamalloc {
unsigned long A<StackRep>::fillStack(unsigned long p1) {
void *retAddr[p1];
fn1(retAddr);
+ return 0;
}
class B {
protected:
@@ -33,6 +34,7 @@ namespace vespamalloc {
void *C<MemBlockPtrT, ThreadListT>::malloc(unsigned long) {
MemBlockPtrT mem;
mem.alloc(0);
+ return 0;
}
C<D<16>, int> *_GmemP;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr67191.C b/gcc/testsuite/g++.dg/torture/pr67191.C
index 79ee988ce7a..997c661d3ec 100644
--- a/gcc/testsuite/g++.dg/torture/pr67191.C
+++ b/gcc/testsuite/g++.dg/torture/pr67191.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
template <typename> class A;
template <typename _Tp> using __allocator_base = _Tp;
diff --git a/gcc/testsuite/g++.dg/torture/pr68852.C b/gcc/testsuite/g++.dg/torture/pr68852.C
index 41727517760..c2c6a585d3c 100644
--- a/gcc/testsuite/g++.dg/torture/pr68852.C
+++ b/gcc/testsuite/g++.dg/torture/pr68852.C
@@ -34,6 +34,7 @@ public:
c[2] = m_fn2(b);
c[3] = m_fn2(a);
c[ProjectRectBounds_next].m_fn1();
+ return 0;
}
D operator*(D p1) {
D d;
diff --git a/gcc/testsuite/g++.dg/torture/pr69264.C b/gcc/testsuite/g++.dg/torture/pr69264.C
index 43753806758..a531bfc9b8c 100644
--- a/gcc/testsuite/g++.dg/torture/pr69264.C
+++ b/gcc/testsuite/g++.dg/torture/pr69264.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
// { dg-additional-options "-mcpu=970 -maltivec" { target powerpc*-*-* } }
typedef union {
diff --git a/gcc/testsuite/g++.dg/torture/pr70971.C b/gcc/testsuite/g++.dg/torture/pr70971.C
index 23f33aafaba..b4ce235110f 100644
--- a/gcc/testsuite/g++.dg/torture/pr70971.C
+++ b/gcc/testsuite/g++.dg/torture/pr70971.C
@@ -1,4 +1,4 @@
-// { dg-additional-options "-std=c++14" }
+// { dg-additional-options "-std=c++14 -Wno-return-type" }
template<typename Signature>
class function;
diff --git a/gcc/testsuite/g++.dg/torture/pr77674.C b/gcc/testsuite/g++.dg/torture/pr77674.C
index f933174317e..a24ce58dafc 100644
--- a/gcc/testsuite/g++.dg/torture/pr77674.C
+++ b/gcc/testsuite/g++.dg/torture/pr77674.C
@@ -3,6 +3,7 @@ typedef struct { } __fsid_t;
typedef unsigned long int pthread_t;
extern "C" {
extern __inline __attribute__ ((__gnu_inline__)) int pthread_equal (pthread_t __thread1, pthread_t __thread2) throw () {
+ return 0;
}
}
typedef pthread_t __gthread_t;
diff --git a/gcc/testsuite/g++.dg/torture/pr77947.C b/gcc/testsuite/g++.dg/torture/pr77947.C
index 3c8a24a16c6..14f8b13a864 100644
--- a/gcc/testsuite/g++.dg/torture/pr77947.C
+++ b/gcc/testsuite/g++.dg/torture/pr77947.C
@@ -18,9 +18,12 @@ B::m_fn2 () const
bool
m_fn1 () const
{
+ return true;
}
C () {}
};
+
+ return 0;
}
void
fn1 (A &p1)
diff --git a/gcc/testsuite/g++.dg/torture/pr78268.C b/gcc/testsuite/g++.dg/torture/pr78268.C
index ef4547c1159..8458c20d417 100644
--- a/gcc/testsuite/g++.dg/torture/pr78268.C
+++ b/gcc/testsuite/g++.dg/torture/pr78268.C
@@ -1,4 +1,6 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
+
typedef enum {} nsresult;
struct A {
diff --git a/gcc/testsuite/g++.dg/torture/pr78507.C b/gcc/testsuite/g++.dg/torture/pr78507.C
index 9691cf9bd7a..6d6bd31c571 100644
--- a/gcc/testsuite/g++.dg/torture/pr78507.C
+++ b/gcc/testsuite/g++.dg/torture/pr78507.C
@@ -1,5 +1,7 @@
// PR middle-end/78507
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
+
struct A {
template <typename _Iterator1, typename _Iterator2>
int operator()(_Iterator1, _Iterator2);
diff --git a/gcc/testsuite/g++.dg/torture/pr78692.C b/gcc/testsuite/g++.dg/torture/pr78692.C
index 57a0d2fcb0a..331cc81c210 100644
--- a/gcc/testsuite/g++.dg/torture/pr78692.C
+++ b/gcc/testsuite/g++.dg/torture/pr78692.C
@@ -22,5 +22,6 @@ int
F::g ()
{
a = i (h, b, 0);
+ return 0;
}
}
diff --git a/gcc/testsuite/g++.dg/torture/pr80171.C b/gcc/testsuite/g++.dg/torture/pr80171.C
index 81f272583c5..c1f6e622515 100644
--- a/gcc/testsuite/g++.dg/torture/pr80171.C
+++ b/gcc/testsuite/g++.dg/torture/pr80171.C
@@ -1,4 +1,5 @@
// { dg-do compile }
+// { dg-additional-options "-Wno-return-type" }
template <typename> struct remove_reference;
template <typename _Tp> struct remove_reference<_Tp &> { typedef _Tp type; };
diff --git a/gcc/testsuite/g++.dg/torture/pr82154.C b/gcc/testsuite/g++.dg/torture/pr82154.C
index f4e1c3ea139..e229c3e640e 100644
--- a/gcc/testsuite/g++.dg/torture/pr82154.C
+++ b/gcc/testsuite/g++.dg/torture/pr82154.C
@@ -25,6 +25,7 @@ class f
i ()
{
static d j;
+ return d();
}
int *k () throw (a::c);
};
@@ -47,4 +48,5 @@ int *f::k () throw (a::c)
}
}
i ().e ();
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/torture/pr82902.C b/gcc/testsuite/g++.dg/torture/pr82902.C
new file mode 100644
index 00000000000..cc2ce271d4c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr82902.C
@@ -0,0 +1,21 @@
+// { dg-do compile }
+
+typedef struct el_t {
+ el_t *next;
+ int elem[];
+} EL;
+el_t a, c;
+void *b;
+void *fn1() {
+ if (b)
+ return a.elem;
+ return c.elem;
+}
+typedef struct {
+ int x;
+} EV_T;
+EV_T *d;
+void fn2() {
+ EV_T *e = (EV_T *)fn1();
+ d[0] = *e;
+}
diff --git a/gcc/testsuite/g++.dg/torture/pr82985.C b/gcc/testsuite/g++.dg/torture/pr82985.C
new file mode 100644
index 00000000000..5c371835cf6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr82985.C
@@ -0,0 +1,458 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-w" } */
+/* { dg-additional-options "-mavx2" { target { x86_64-*-* i?86-*-* } } } */
+
+namespace std {
+template < typename _Default > struct __detector { using type = _Default; };
+template < typename _Default, template < typename > class >
+using __detected_or = __detector< _Default >;
+template < typename _Default, template < typename > class _Op >
+using __detected_or_t = typename __detected_or< _Default, _Op >::type;
+template < typename > struct iterator_traits;
+template < typename _Tp > struct iterator_traits< _Tp * > {
+ typedef _Tp reference;
+};
+} // std
+using std::iterator_traits;
+template < typename _Iterator, typename > struct __normal_iterator {
+ typename iterator_traits< _Iterator >::reference operator*();
+ void operator++();
+};
+template < typename _IteratorL, typename _IteratorR, typename _Container >
+int operator!=(__normal_iterator< _IteratorL, _Container >,
+ __normal_iterator< _IteratorR, _Container >);
+namespace std {
+template < typename _Tp > struct allocator { typedef _Tp value_type; };
+struct __allocator_traits_base {
+ template < typename _Tp > using __pointer = typename _Tp::pointer;
+};
+template < typename _Alloc > struct allocator_traits : __allocator_traits_base {
+ using pointer = __detected_or_t< typename _Alloc::value_type *, __pointer >;
+};
+} // std
+typedef double __m128d __attribute__((__vector_size__(16)));
+typedef double __m256d __attribute__((__vector_size__(32)));
+enum { InnerVectorizedTraversal, LinearVectorizedTraversal };
+enum { ReadOnlyAccessors };
+template < int, typename Then, typename > struct conditional {
+ typedef Then type;
+};
+template < typename Then, typename Else > struct conditional< 0, Then, Else > {
+ typedef Else type;
+};
+template < typename, typename > struct is_same {
+ enum { value };
+};
+template < typename T > struct is_same< T, T > {
+ enum { value = 1 };
+};
+template < typename > struct traits;
+struct accessors_level {
+ enum { has_direct_access, has_write_access, value };
+};
+template < typename > struct EigenBase;
+template < typename > struct PlainObjectBase;
+template < typename, int = accessors_level::value > struct DenseCoeffsBase;
+template < typename, int, int, int = 0, int = 0, int = 0 > struct Matrix;
+template < typename > struct MatrixBase;
+template < typename, int, int, bool = 0 > struct Block;
+struct VectorBlock;
+template < typename, typename > struct CwiseNullaryOp;
+template < typename, typename, typename > struct CwiseBinaryOp;
+template < typename, int = accessors_level::has_write_access > struct MapBase;
+template < typename > struct packet_traits;
+template < typename > struct unpacket_traits;
+template < int Size, typename PacketType,
+ int = Size == is_same< PacketType, typename unpacket_traits<
+ PacketType >::half >::value >
+struct find_best_packet_helper;
+template < int Size, typename PacketType >
+struct find_best_packet_helper< Size, PacketType, 1 > {
+ typedef PacketType type;
+};
+template < int Size, typename PacketType >
+struct find_best_packet_helper< Size, PacketType, 0 > {
+ typedef typename find_best_packet_helper<
+ 1, typename unpacket_traits< PacketType >::half >::type type;
+};
+template < typename T, int Size > struct find_best_packet {
+ typedef typename find_best_packet_helper<
+ Size, typename packet_traits< T >::type >::type type;
+};
+struct compute_matrix_flags {
+ enum { ret = 1 };
+};
+struct ref_selector {
+ typedef Matrix< double, 10, 1 > &type;
+};
+template < typename Derived > struct dense_xpr_base {
+ typedef MatrixBase< Derived > type;
+};
+template < typename ExpressionType > struct is_lvalue {
+ enum { value = traits< ExpressionType >::Flags };
+};
+template < typename Packet > void pmul(Packet);
+template < typename Packet >
+Packet pload(const typename unpacket_traits< Packet >::type *);
+template < typename Packet >
+Packet pset1(const typename unpacket_traits< Packet >::type &);
+template < typename Scalar, typename Packet > void pstoreu(Scalar, Packet &);
+template < typename Packet, int >
+Packet ploadt(const typename unpacket_traits< Packet >::type *from) {
+ return pload< Packet >(from);
+}
+template < typename Scalar, typename Packet, int >
+void pstoret(Scalar *to, const Packet from) {
+ pstoreu(to, from);
+}
+typedef __m128d Packet2d;
+template <> struct unpacket_traits< Packet2d > {
+ typedef double type;
+ typedef Packet2d half;
+};
+template <> Packet2d pload(const double *from) { return *(__m128d *)from; }
+typedef __m256d Packet4d;
+template <> struct packet_traits< double > { typedef Packet4d type; };
+template <> struct unpacket_traits< Packet4d > {
+ typedef double type;
+ typedef Packet2d half;
+};
+__m256d pset1___trans_tmp_1;
+template <> Packet4d pset1(const double &) {
+ int __A;
+ pset1___trans_tmp_1 = __m256d{__A};
+ return pset1___trans_tmp_1;
+}
+template <> void pstoreu(double *to, const Packet4d &from) {
+ *(__attribute__((__vector_size__(4 * sizeof(double)))) double *)to = from;
+}
+struct scalar_product_op {
+ template < typename Packet > void packetOp(Packet a, Packet) { pmul(a); }
+};
+struct scalar_constant_op {
+ template < typename PacketType > PacketType packetOp() {
+ return pset1< PacketType >(0);
+ }
+};
+struct assign_op {
+ template < int, typename Packet > void assignPacket(double *a, Packet b) {
+ pstoret< double, Packet, 0 >(a, b);
+ }
+};
+template < typename Derived >
+struct DenseCoeffsBase< Derived, 0 > : EigenBase< Derived > {};
+template < typename Derived >
+struct DenseCoeffsBase< Derived > : DenseCoeffsBase< Derived, 0 > {};
+template < typename Derived > struct DenseBase : DenseCoeffsBase< Derived > {
+ using DenseCoeffsBase< Derived >::derived;
+ enum { SizeAtCompileTime, MaxSizeAtCompileTime };
+ static CwiseNullaryOp< scalar_constant_op, Derived > Constant();
+ Derived &setConstant();
+ struct FixedSegmentReturnType {
+ typedef VectorBlock Type;
+ };
+ template < int > typename FixedSegmentReturnType::Type segment() {
+ return typename FixedSegmentReturnType::Type(derived(), 0, 0);
+ }
+};
+template < typename Derived > struct MatrixBase : DenseBase< Derived > {
+ using DenseBase< Derived >::derived;
+ template < typename OtherDerived >
+ CwiseBinaryOp< scalar_product_op, const Derived, const OtherDerived >
+ cwiseProduct(OtherDerived) {
+ return CwiseBinaryOp< scalar_product_op, const Derived,
+ const OtherDerived >(derived(), derived());
+ }
+ template < typename OtherDerived >
+ Derived &operator=(const DenseBase< OtherDerived > &);
+};
+template < typename Derived > struct EigenBase {
+ Derived &derived() { return *static_cast< Derived * >(this); }
+ Derived derived() const;
+};
+template < typename > struct binary_evaluator;
+template < typename Derived > struct evaluator {
+ typedef Derived PlainObjectType;
+ typedef typename PlainObjectType::Scalar Scalar;
+ enum { IsVectorAtCompileTime, Flags };
+ evaluator(PlainObjectType m) : m_data(m.data()) {}
+ Scalar &coeffRef(int, int);
+ template < int, typename PacketType > PacketType packet(int, int) {
+ return ploadt< PacketType, 0 >(m_data);
+ }
+ const Scalar *m_data;
+};
+template < typename Scalar, int Rows, int Cols, int Options, int MaxRows,
+ int MaxCols >
+struct evaluator< Matrix< Scalar, Rows, Cols, Options, MaxRows, MaxCols > >
+ : evaluator< PlainObjectBase< Matrix< Scalar, Rows, Cols > > > {
+ typedef Matrix< Scalar, Rows, Cols > XprType;
+ evaluator(XprType m) : evaluator< PlainObjectBase< XprType > >(m) {}
+};
+struct nullary_wrapper {
+ template < typename T, typename IndexType >
+ T packetOp(scalar_constant_op op, IndexType, IndexType) {
+ return op.packetOp< T >();
+ }
+};
+template < typename NullaryOp, typename PlainObjectType >
+struct evaluator< CwiseNullaryOp< NullaryOp, PlainObjectType > > {
+ evaluator(CwiseNullaryOp< NullaryOp, PlainObjectType >);
+ template < int, typename PacketType, typename IndexType >
+ PacketType packet(IndexType row, IndexType col) {
+ return m_wrapper.packetOp< PacketType >(m_functor, row, col);
+ }
+ NullaryOp m_functor;
+ nullary_wrapper m_wrapper;
+};
+template < typename BinaryOp, typename Lhs, typename Rhs >
+struct evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > >
+ : binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > > {
+ evaluator(CwiseBinaryOp< BinaryOp, Lhs, Rhs > xpr)
+ : binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > >(xpr) {}
+};
+template < typename BinaryOp, typename Lhs, typename Rhs >
+struct binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > > {
+ binary_evaluator(CwiseBinaryOp< BinaryOp, Lhs, Rhs > xpr)
+ : m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()) {}
+ template < int, typename PacketType > PacketType packet(int, int) {
+ PacketType __trans_tmp_1 = m_lhsImpl.template packet< 0, PacketType >(0, 0);
+ PacketType __trans_tmp_2;
+ m_functor.packetOp(__trans_tmp_1, __trans_tmp_2);
+ }
+ BinaryOp m_functor;
+ evaluator< Lhs > m_lhsImpl;
+ evaluator< Rhs > m_rhsImpl;
+};
+template < typename Derived > struct mapbase_evaluator {
+ typedef Derived XprType;
+ mapbase_evaluator(XprType map) : m_data(map.data()) {}
+ typename XprType::Scalar &coeffRef(int, int) { return m_data[0]; }
+ typename XprType::PointerType m_data;
+};
+template < int > struct block_evaluator;
+template < typename ArgType, int BlockRows, int BlockCols, bool InnerPanel >
+struct evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel > >
+ : block_evaluator< BlockCols > {
+ enum { Flags };
+ evaluator(Block< ArgType, 1, 1 > block) : block_evaluator< 1 >(block) {}
+};
+template < int BlockCols >
+struct block_evaluator
+ : mapbase_evaluator< Block< Matrix< double, 10, 1 >, 1, BlockCols > > {
+ typedef Block< Matrix< double, 10, 1 >, 1, BlockCols > XprType;
+ block_evaluator(XprType block) : mapbase_evaluator< XprType >(block) {}
+};
+template < typename DstEvaluator > struct copy_using_evaluator_traits {
+ typedef typename DstEvaluator::XprType Dst;
+ typedef typename Dst::Scalar DstScalar;
+ enum { DstFlags = DstEvaluator::Flags };
+ enum { InnerSize = DstFlags };
+ typedef typename conditional<
+ int() == LinearVectorizedTraversal,
+ typename find_best_packet< DstScalar, Dst::SizeAtCompileTime >::type,
+ typename find_best_packet< DstScalar, InnerSize >::type >::type
+ PacketType;
+};
+template < typename Kernel >
+struct copy_using_evaluator_innervec_CompleteUnrolling {
+ enum { outer, inner, SrcAlignment, DstAlignment };
+ static void run(Kernel kernel) {
+ kernel.template assignPacketByOuterInner< DstAlignment, SrcAlignment,
+ typename Kernel::PacketType >(
+ outer, inner);
+ }
+};
+template < typename Kernel > struct dense_assignment_loop {
+ static void run(Kernel kernel) {
+ copy_using_evaluator_innervec_CompleteUnrolling< Kernel >::run(kernel);
+ }
+};
+template < typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT,
+ typename Functor >
+struct generic_dense_assignment_kernel {
+ typedef DstEvaluatorTypeT DstXprType;
+ typedef DstEvaluatorTypeT DstEvaluatorType;
+ typedef SrcEvaluatorTypeT SrcEvaluatorType;
+ typedef typename copy_using_evaluator_traits< DstEvaluatorTypeT >::PacketType
+ PacketType;
+ generic_dense_assignment_kernel(DstEvaluatorType dst, SrcEvaluatorType src,
+ Functor, DstXprType dstExpr)
+ : m_dst(dst), m_src(src), m_dstExpr(dstExpr) {}
+ template < int StoreMode, int LoadMode, typename >
+ void assignPacketByOuterInner(long, long) {
+ long row;
+ long col;
+ m_functor.template assignPacket< StoreMode >(
+ &m_dst.coeffRef(row, col),
+ m_src.template packet< LoadMode, PacketType >(row, col));
+ }
+ DstEvaluatorType &m_dst;
+ SrcEvaluatorType m_src;
+ Functor m_functor;
+ DstXprType m_dstExpr;
+};
+template < typename DstXprType, typename SrcXprType, typename Functor >
+void call_dense_assignment_loop(DstXprType dst, SrcXprType src, Functor func) {
+ typedef evaluator< DstXprType > DstEvaluatorType;
+ typedef evaluator< SrcXprType > SrcEvaluatorType;
+ SrcEvaluatorType srcEvaluator(src);
+ DstEvaluatorType dstEvaluator(dst);
+ typedef generic_dense_assignment_kernel< DstEvaluatorType, SrcEvaluatorType,
+ Functor >
+ Kernel;
+ Kernel kernel(dstEvaluator, srcEvaluator, func, dst);
+ dense_assignment_loop< Kernel >::run(kernel);
+}
+template < typename, typename, typename > struct Assignment;
+template < typename Dst, typename Src > void call_assignment(Dst dst, Src src) {
+ call_assignment(dst, src, assign_op());
+}
+template < typename Dst, typename Src, typename Func >
+void call_assignment(Dst dst, Src src, Func func) {
+ call_assignment_no_alias(dst, src, func);
+}
+template < typename Dst, typename Src, typename Func >
+void call_assignment_no_alias(Dst dst, Src src, Func func) {
+ enum { NeedToTranspose };
+ Assignment< typename conditional< NeedToTranspose, int, Dst >::type, Src,
+ Func >::run(dst, src, func);
+}
+template < typename DstXprType, typename SrcXprType, typename Functor >
+struct Assignment {
+ static void run(DstXprType dst, SrcXprType src, Functor func) {
+ call_dense_assignment_loop(dst, src, func);
+ }
+};
+template < typename Derived >
+template < typename OtherDerived >
+Derived &MatrixBase< Derived >::
+operator=(const DenseBase< OtherDerived > &other) {
+ call_assignment(derived(), other.derived());
+}
+template < int Size > struct plain_array { double array[Size]; };
+template < int Size > class DenseStorage {
+ plain_array< Size > m_data;
+
+public:
+ const double *data() const { return m_data.array; }
+ double *data() { return m_data.array; }
+};
+template < typename Derived >
+struct PlainObjectBase : dense_xpr_base< Derived >::type {
+ typedef typename dense_xpr_base< Derived >::type Base;
+ typedef typename traits< Derived >::Scalar Scalar;
+ DenseStorage< Base::MaxSizeAtCompileTime > m_storage;
+ const Scalar *data() const { return m_storage.data(); }
+ Scalar *data() { return m_storage.data(); }
+ PlainObjectBase() {}
+ template < typename OtherDerived > PlainObjectBase(OtherDerived other) {
+ call_assignment_no_alias(this->derived(), other, assign_op());
+ }
+};
+template < typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows,
+ int _MaxCols >
+struct traits< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > > {
+ typedef _Scalar Scalar;
+ enum { Flags = compute_matrix_flags::ret };
+};
+template < typename, int _Rows, int _Cols, int, int, int >
+struct Matrix : PlainObjectBase< Matrix< double, _Rows, _Cols > > {
+ PlainObjectBase< Matrix > Base;
+ Matrix() {}
+ template < typename OtherDerived > Matrix(OtherDerived other) : Base(other) {}
+};
+template < typename, typename, typename > struct CwiseBinaryOp {
+ typedef ref_selector::type LhsNested;
+ CwiseBinaryOp(Matrix< double, 10, 1 > &aLhs, Matrix< double, 0, 0 >)
+ : m_lhs(aLhs) {}
+ LhsNested lhs() { return m_lhs; }
+ Matrix< double, 8, 1 > rhs() {}
+ LhsNested m_lhs;
+};
+template < typename NullaryOp, typename >
+struct CwiseNullaryOp
+ : dense_xpr_base< CwiseNullaryOp< NullaryOp, int > >::type {};
+template < typename Derived > Derived &DenseBase< Derived >::setConstant() {
+ derived() = Constant();
+}
+template < typename Derived >
+struct MapBase< Derived, ReadOnlyAccessors > : dense_xpr_base< Derived >::type {
+ typedef typename dense_xpr_base< Derived >::type Base;
+ typedef typename traits< Derived >::Scalar Scalar;
+ typedef typename conditional< is_lvalue< Derived >::value, Scalar *,
+ Scalar >::type PointerType;
+ Scalar *data() { return m_data; }
+ MapBase(PointerType dataPtr, long, long) : m_data(dataPtr) {}
+ PointerType m_data;
+};
+template < typename Derived >
+struct MapBase< Derived > : MapBase< Derived, ReadOnlyAccessors > {
+ typedef MapBase< Derived, ReadOnlyAccessors > Base;
+ MapBase(typename Base::PointerType dataPtr, long rows, long cols)
+ : Base(dataPtr, rows, cols) {}
+ using MapBase< Derived, ReadOnlyAccessors >::Base::operator=;
+};
+template < typename XprType, int BlockRows, int BlockCols, bool InnerPanel >
+struct traits< Block< XprType, BlockRows, BlockCols, InnerPanel > >
+ : traits< XprType > {};
+template < int, int > struct BlockImpl_dense;
+template < typename, int, int, typename > class BlockImpl;
+template < typename, int BlockRows, int BlockCols, bool >
+struct Block : BlockImpl< Matrix< double, 10, 1 >, BlockRows, BlockCols, int > {
+ typedef BlockImpl< Matrix< double, 10, 1 >, BlockRows, BlockCols, int > Impl;
+ using Impl::operator=;
+ Block(Matrix< double, 10, 1 > &xpr, long startRow, long startCol,
+ long blockRows, long blockCols)
+ : Impl(xpr, startRow, startCol, blockRows, blockCols) {}
+};
+template < typename XprType, int BlockRows, int BlockCols >
+struct BlockImpl< XprType, BlockRows, BlockCols, int >
+ : BlockImpl_dense< BlockRows, BlockCols > {
+ typedef BlockImpl_dense< BlockRows, BlockCols > Impl;
+ typedef Impl Base;
+ using Base::operator=;
+ BlockImpl(XprType &xpr, long startRow, long startCol, long blockRows,
+ long blockCols)
+ : Impl(xpr, startRow, startCol, blockRows, blockCols) {}
+};
+template < int BlockRows, int BlockCols >
+struct BlockImpl_dense
+ : MapBase< Block< Matrix< double, 10, 1 >, BlockRows, BlockCols > > {
+ typedef MapBase< Block< Matrix< double, 10, 1 >, BlockRows, BlockCols > >
+ Base;
+ using Base::operator=;
+ BlockImpl_dense(Matrix< double, 10, 1 > &xpr, long, long, long blockRows,
+ long blockCols)
+ : Base(xpr.data(), blockRows, blockCols) {}
+};
+struct VectorBlock : Block< int, traits< Matrix< double, 0, 1 > >::Flags, 1 > {
+ VectorBlock(Matrix< double, 10, 1 > &vector, long start, long size)
+ : Block(vector, 0, start, 1, size) {}
+};
+namespace std {
+template < typename _Alloc > struct _Vector_base {
+ typedef typename allocator_traits< _Alloc >::pointer pointer;
+};
+template < typename _Tp, typename _Alloc = allocator< _Tp > > class vector {
+public:
+ typedef __normal_iterator< typename _Vector_base< _Alloc >::pointer, int >
+ iterator;
+ iterator begin();
+ iterator end();
+};
+struct FrameHessian {
+ Matrix< double, 0, 1 > step;
+ void setState(Matrix< double, 0, 1 >);
+};
+struct FullSystem {
+ bool doStepFromBackup();
+ vector< FrameHessian * > frameHessians;
+};
+bool FullSystem::doStepFromBackup() {
+ Matrix< double, 10, 1 > pstepfac;
+ pstepfac.segment< 4 >().setConstant();
+ for (FrameHessian *fh : frameHessians)
+ fh->setState(pstepfac.cwiseProduct(fh->step));
+}
+} // namespace std
diff --git a/gcc/testsuite/g++.dg/tree-prof/pr79259.C b/gcc/testsuite/g++.dg/tree-prof/pr79259.C
index a55172b62d2..6125a179bdc 100644
--- a/gcc/testsuite/g++.dg/tree-prof/pr79259.C
+++ b/gcc/testsuite/g++.dg/tree-prof/pr79259.C
@@ -11,6 +11,8 @@ fn2 ()
{
if (a (c == 0))
return 0;
+
+ return 0;
}
int main()
diff --git a/gcc/testsuite/g++.dg/tree-ssa/copyprop.C b/gcc/testsuite/g++.dg/tree-ssa/copyprop.C
index b30e5ddc5ba..8c05895e45e 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/copyprop.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/copyprop.C
@@ -2,7 +2,7 @@
//
// { dg-do compile { target { lp64 } } }
// { dg-options "-Wno-error -fno-exceptions -fno-tree-vrp -O2 -fprofile-generate -finline-limit=500 -std=c++98" }
-//
+// { dg-additional-options "-Wno-return-type" }
#include <map>
#include <vector>
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr22444.C b/gcc/testsuite/g++.dg/tree-ssa/pr22444.C
index 7df4b9cce15..04bf2009cbe 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr22444.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr22444.C
@@ -4,6 +4,7 @@
// with the subvars leading to the subvars not being renamed when they should
// { dg-do compile }
// { dg-options "-O2" }
+// { dg-additional-options "-Wno-return-type" }
__extension__ typedef __PTRDIFF_TYPE__ ptrdiff_t;
__extension__ typedef __SIZE_TYPE__ size_t;
namespace std
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr23948.C b/gcc/testsuite/g++.dg/tree-ssa/pr23948.C
index def01d95157..ed99217a7b9 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr23948.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr23948.C
@@ -6,7 +6,7 @@ struct MIOFILE {
};
double potentially_runnable_resource_share();
void f1(double);
-int make_scheduler_request(double a, double b)
+void make_scheduler_request(double a, double b)
{
MIOFILE mf;
double prrs = potentially_runnable_resource_share();
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr24172.C b/gcc/testsuite/g++.dg/tree-ssa/pr24172.C
index 245186a3c50..4d92bd592c0 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr24172.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr24172.C
@@ -1,6 +1,6 @@
// { dg-options "-O2" }
void IOException( char);
-inline int* dummy( const char* const mode )
+inline void dummy( const char* const mode )
{
IOException(*mode+*mode);
}
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr24351-3.C b/gcc/testsuite/g++.dg/tree-ssa/pr24351-3.C
index 09a3f9462f3..f3aaee85286 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr24351-3.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr24351-3.C
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O2" } */
+// { dg-additional-options "-Wno-return-type" }
namespace sigc {
template <class T_type> struct type_trait {
typedef T_type& pass;
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr27283.C b/gcc/testsuite/g++.dg/tree-ssa/pr27283.C
index 224ea6a9bf9..1623d289a20 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr27283.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr27283.C
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O2" } */
+/* { dg-additional-options "-Wno-return-type" } */
namespace Gambit
{
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr27291.C b/gcc/testsuite/g++.dg/tree-ssa/pr27291.C
index b8b5e136a46..24f440dde06 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr27291.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr27291.C
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O2" } */
+/* { dg-additional-options "-Wno-return-type" } */
namespace std
{
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr27548.C b/gcc/testsuite/g++.dg/tree-ssa/pr27548.C
index d23b959a599..cbe7929aefe 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr27548.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr27548.C
@@ -1,6 +1,7 @@
// PR tree-optimization/27548
// { dg-do compile }
// { dg-options "-O1" }
+// { dg-additional-options "-Wno-return-type" }
namespace Gambit
{
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr31146-2.C b/gcc/testsuite/g++.dg/tree-ssa/pr31146-2.C
index 500d8b639ee..9fb5dc1b60c 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr31146-2.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr31146-2.C
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O -fno-tree-vrp -fdump-tree-forwprop1" } */
+/* { dg-options "-O -fcheck-new -fno-tree-vrp -fdump-tree-forwprop1" } */
#include <new>
@@ -20,6 +20,5 @@ double foo (void)
return v.a[2];
}
-/* -std=c++17 and above doesn't emit operator new () != NULL, so there is
- nothing to fold anymore. */
-/* { dg-final { scan-tree-dump "Replaced .* != 0B. with .1" "forwprop1" { target c++14_down } } } */
+/* GCC 8 emits operator new () != NULL with -fcheck-new. */
+/* { dg-final { scan-tree-dump "Replaced .* != 0B. with .1" "forwprop1" } } */
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr33604.C b/gcc/testsuite/g++.dg/tree-ssa/pr33604.C
index 1c0f550bfdf..668db49d146 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr33604.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr33604.C
@@ -6,7 +6,7 @@ struct Value
double value;
Value(double value_) : value (value_) {}
operator double() const { return value; }
- Value& operator=(double other) { value = other; }
+ Value& operator=(double other) { value = other; return *this; }
};
struct Ref
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr34355.C b/gcc/testsuite/g++.dg/tree-ssa/pr34355.C
index 978ed75df68..e7b4537697a 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr34355.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr34355.C
@@ -24,4 +24,5 @@ double Parse_Float ()
EXPRESS Express = {1.0, 2.0, 3.0, 4.0, 5.0};
Parse_Rel_Factor (Express, &Terms);
+ return 0.0;
}
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr41428.C b/gcc/testsuite/g++.dg/tree-ssa/pr41428.C
index c0a5eb627be..7aff519e746 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr41428.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr41428.C
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O -fdump-tree-ccp1-details" } */
+/* { dg-options "-O -fcheck-new -fdump-tree-ccp1-details" } */
extern "C" void abort (void);
inline void *operator new (__SIZE_TYPE__, void *__p) throw () { return __p; }
@@ -11,6 +11,5 @@ int foo(void)
return *(int *)&f;
}
-/* -std=c++17 and above doesn't emit operator new () != NULL, so there is
- nothing to fold anymore. */
-/* { dg-final { scan-tree-dump "Folded into: if \\\(1 != 0\\\)" "ccp1" { target c++14_down } } } */
+/* GCC 8 emits operator new () != NULL with -fcheck-new. */
+/* { dg-final { scan-tree-dump "Folded into: if \\\(1 != 0\\\)" "ccp1" } } */
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr42337.C b/gcc/testsuite/g++.dg/tree-ssa/pr42337.C
index 8abd4b2d161..61beb737018 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr42337.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr42337.C
@@ -1,6 +1,7 @@
// PR tree-optimize/42337
// { dg-do compile }
// { dg-options "-O2" }
+// { dg-additional-options "-Wno-return-type" }
template<class _T1, class _T2> struct pair {
_T2 second;
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr81408.C b/gcc/testsuite/g++.dg/tree-ssa/pr81408.C
index f94544b9e2d..60e6e5277d7 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pr81408.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr81408.C
@@ -1,5 +1,9 @@
/* { dg-do compile } */
/* { dg-options "-O2 -std=gnu++11 -fopt-info-loop-missed -Wunsafe-loop-optimizations" } */
+struct p
+{
+ char *ay;
+};
namespace a {
void b () __attribute__ ((__noreturn__));
@@ -19,7 +23,7 @@ template <typename j, typename> class k
j l;
public:
- typename d<j>::f operator* () {}
+ typename d<j>::f operator* () { return p(); }
void operator++ () { ++l; }
j
aa ()
@@ -33,7 +37,6 @@ operator!= (k<m, ab> o, k<n, ab> p2)
{
return o.aa () != p2.aa ();
}
-struct p;
namespace a {
struct F
{
@@ -75,13 +78,10 @@ at
av (au o)
{
o.aq ('\n');
+ return at();
}
u ax;
}
-struct p
-{
- char *ay;
-};
a::H t;
void
ShowHelpListCommands ()
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pred-1.C b/gcc/testsuite/g++.dg/tree-ssa/pred-1.C
index 01b065ee966..19a066ab48d 100644
--- a/gcc/testsuite/g++.dg/tree-ssa/pred-1.C
+++ b/gcc/testsuite/g++.dg/tree-ssa/pred-1.C
@@ -2,6 +2,8 @@
/* { dg-options "-O2 -fdump-tree-profile_estimate" } */
int a[100];
void foo(int);
+
+int
main()
{
int i;
@@ -11,5 +13,7 @@ main()
continue;
foo(i);
}
+
+ return 0;
}
// { dg-final { scan-tree-dump "continue heuristics" "profile_estimate" } }
diff --git a/gcc/testsuite/g++.dg/ubsan/pr65019.C b/gcc/testsuite/g++.dg/ubsan/pr65019.C
index a7f21a7d241..380c10b65d1 100644
--- a/gcc/testsuite/g++.dg/ubsan/pr65019.C
+++ b/gcc/testsuite/g++.dg/ubsan/pr65019.C
@@ -17,6 +17,8 @@ C::foo (const A &x, int y)
C *d = new C (x, y);
if (d->c == nullptr)
delete d;
+
+ return 0;
}
C::~C ()
diff --git a/gcc/testsuite/g++.dg/ubsan/pr65583.C b/gcc/testsuite/g++.dg/ubsan/pr65583.C
index 4e1149e9cb6..02acb361a22 100644
--- a/gcc/testsuite/g++.dg/ubsan/pr65583.C
+++ b/gcc/testsuite/g++.dg/ubsan/pr65583.C
@@ -1,6 +1,7 @@
// PR sanitizer/65583
// { dg-do compile }
// { dg-options "-std=c++11 -fsanitize=undefined" }
+// { dg-additional-options "-Wno-return-type" }
namespace std
{
diff --git a/gcc/testsuite/g++.dg/ubsan/vptr-12.C b/gcc/testsuite/g++.dg/ubsan/vptr-12.C
new file mode 100644
index 00000000000..f23bbc3fd10
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/vptr-12.C
@@ -0,0 +1,22 @@
+// { dg-do run }
+// { dg-shouldfail "ubsan" }
+// { dg-options "-fsanitize=vptr -fno-sanitize-recover=vptr" }
+
+struct MyClass
+{
+ virtual ~MyClass () {}
+ virtual void Doit () {}
+};
+
+int
+main ()
+{
+ MyClass *c = new MyClass;
+ c->~MyClass ();
+ c->Doit ();
+
+ return 0;
+}
+
+// { dg-output "\[^\n\r]*vptr-12.C:16:\[0-9]*: runtime error: member call on address 0x\[0-9a-fA-F]* which does not point to an object of type 'MyClass'(\n|\r\n|\r)" }
+// { dg-output "0x\[0-9a-fA-F]*: note: object has invalid vptr" }
diff --git a/gcc/testsuite/g++.dg/vect/pr60836.cc b/gcc/testsuite/g++.dg/vect/pr60836.cc
index 425106dd44d..b2d66ec5b15 100644
--- a/gcc/testsuite/g++.dg/vect/pr60836.cc
+++ b/gcc/testsuite/g++.dg/vect/pr60836.cc
@@ -27,6 +27,8 @@ norm_ (const int &)
b = e (b, d);
b = e (b, c);
}
+
+ return 0.0;
}
void
diff --git a/gcc/testsuite/g++.dg/vect/pr68145.cc b/gcc/testsuite/g++.dg/vect/pr68145.cc
index 51e663ae636..8a1e10ee783 100644
--- a/gcc/testsuite/g++.dg/vect/pr68145.cc
+++ b/gcc/testsuite/g++.dg/vect/pr68145.cc
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
struct A {
bool operator()(int p1, int p2) { return p1 && p2; }
diff --git a/gcc/testsuite/g++.dg/vect/pr70729-nest.cc b/gcc/testsuite/g++.dg/vect/pr70729-nest.cc
index 96171e5ec94..931895b0a9c 100644
--- a/gcc/testsuite/g++.dg/vect/pr70729-nest.cc
+++ b/gcc/testsuite/g++.dg/vect/pr70729-nest.cc
@@ -2,7 +2,7 @@
// { dg-additional-options "-ffast-math -fopenmp-simd" }
// { dg-additional-options "-msse2" { target x86_64-*-* i?86-*-* } }
-inline void* my_alloc (__SIZE_TYPE__ bytes) {void *ptr; __builtin_posix_memalign (&ptr, bytes, 128);}
+inline void* my_alloc (__SIZE_TYPE__ bytes) {void *ptr; __builtin_posix_memalign (&ptr, bytes, 128); return 0; }
inline void my_free (void* memory) {__builtin_free (memory);}
float W[100];
diff --git a/gcc/testsuite/g++.dg/vect/pr70729.cc b/gcc/testsuite/g++.dg/vect/pr70729.cc
index ff868f7a41b..eac4b4bd75c 100644
--- a/gcc/testsuite/g++.dg/vect/pr70729.cc
+++ b/gcc/testsuite/g++.dg/vect/pr70729.cc
@@ -2,7 +2,7 @@
// { dg-additional-options "-ffast-math -fopenmp-simd" }
// { dg-additional-options "-msse2" { target x86_64-*-* i?86-*-* } }
-inline void* my_alloc (__SIZE_TYPE__ bytes) {void *ptr; __builtin_posix_memalign (&ptr, bytes, 128);}
+inline void* my_alloc (__SIZE_TYPE__ bytes) {void *ptr; __builtin_posix_memalign (&ptr, bytes, 128); return 0; }
inline void my_free (void* memory) {__builtin_free (memory);}
template <typename T>
diff --git a/gcc/testsuite/g++.dg/warn/Waddress-3.C b/gcc/testsuite/g++.dg/warn/Waddress-3.C
index 13d7cd2c001..a97c7814ce5 100644
--- a/gcc/testsuite/g++.dg/warn/Waddress-3.C
+++ b/gcc/testsuite/g++.dg/warn/Waddress-3.C
@@ -1,6 +1,6 @@
// PR c++/65168
// { dg-do compile { target c++11 } }
-// { dg-options -Waddress }
+// { dg-options "-Waddress -Wno-return-type" }
// We shouldn't warn in unevaluated context about the address of a reference
// always being true.
diff --git a/gcc/testsuite/g++.dg/warn/Wconversion-null-2.C b/gcc/testsuite/g++.dg/warn/Wconversion-null-2.C
index a71551fdf90..98f5c405165 100644
--- a/gcc/testsuite/g++.dg/warn/Wconversion-null-2.C
+++ b/gcc/testsuite/g++.dg/warn/Wconversion-null-2.C
@@ -48,7 +48,7 @@ void warn_for_NULL()
NULL && NULL; // No warning: converting NULL to bool is OK
}
-int warn_for___null()
+void warn_for___null()
{
int i = __null; // { dg-warning "" } converting __null to non-pointer type
float z = __null; // { dg-warning "" } converting __null to non-pointer type
diff --git a/gcc/testsuite/g++.dg/warn/Wnull-conversion-2.C b/gcc/testsuite/g++.dg/warn/Wnull-conversion-2.C
index 92a87d1e76c..d5501fface5 100644
--- a/gcc/testsuite/g++.dg/warn/Wnull-conversion-2.C
+++ b/gcc/testsuite/g++.dg/warn/Wnull-conversion-2.C
@@ -34,6 +34,7 @@ void func1(long int a) {
int x = 1;
+int
main()
{
int *p = &x;
@@ -42,4 +43,6 @@ main()
Foo::Compare<long int, int>(NULL, p); // { dg-warning "passing NULL to" }
Foo::Compare(NULL, p);
func1(NULL); // { dg-warning "passing NULL to" }
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-10.C b/gcc/testsuite/g++.dg/warn/Wparentheses-10.C
index c30df090f5e..557db091ad0 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-10.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-10.C
@@ -5,7 +5,7 @@
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (a & b ^ c); // { dg-warning "parentheses" "correct warning" }
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-11.C b/gcc/testsuite/g++.dg/warn/Wparentheses-11.C
index 912c3b7ae76..6f0ecbe8bd8 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-11.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-11.C
@@ -5,7 +5,7 @@
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (a + b & c); // { dg-warning "parentheses" "correct warning" }
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-12.C b/gcc/testsuite/g++.dg/warn/Wparentheses-12.C
index b04529827d5..ca6407aea74 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-12.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-12.C
@@ -7,7 +7,7 @@ int foo (int);
int a, b, c;
-int
+void
bar (void)
{
if (a)
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-25.C b/gcc/testsuite/g++.dg/warn/Wparentheses-25.C
index d9951a4f46b..d3afa4ce784 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-25.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-25.C
@@ -5,7 +5,7 @@
// C++ version of Wparentheses-11.c
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (!a & b); /* { dg-warning "parentheses" "correct warning" } */
@@ -156,7 +156,7 @@ bar (int a, int b, int c)
}
-int
+void
baz (int a, int b, int c)
{
foo (!a & (b << c));/* { dg-warning "parentheses" "correct warning" } */
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-6.C b/gcc/testsuite/g++.dg/warn/Wparentheses-6.C
index 9963d822e05..d985d7e45d6 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-6.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-6.C
@@ -5,7 +5,7 @@
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (a <= b <= c); // { dg-warning "comparison" "correct warning" }
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-7.C b/gcc/testsuite/g++.dg/warn/Wparentheses-7.C
index 7d549c38c31..69d555b3dfb 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-7.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-7.C
@@ -5,7 +5,7 @@
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (a + b << c); // { dg-warning "parentheses" "correct warning" }
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-8.C b/gcc/testsuite/g++.dg/warn/Wparentheses-8.C
index ddb5e64b4ad..2089dce968c 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-8.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-8.C
@@ -5,7 +5,7 @@
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (a && b || c); // { dg-warning "parentheses" "correct warning" }
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-9.C b/gcc/testsuite/g++.dg/warn/Wparentheses-9.C
index bad6fb1c570..7c8f01d327b 100644
--- a/gcc/testsuite/g++.dg/warn/Wparentheses-9.C
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-9.C
@@ -5,7 +5,7 @@
int foo (int);
-int
+void
bar (int a, int b, int c)
{
foo (a & b | c); // { dg-warning "parentheses" "correct warning" }
diff --git a/gcc/testsuite/g++.dg/warn/Wshadow-5.C b/gcc/testsuite/g++.dg/warn/Wshadow-5.C
index 7a90ec9c997..feb2bffcdc0 100644
--- a/gcc/testsuite/g++.dg/warn/Wshadow-5.C
+++ b/gcc/testsuite/g++.dg/warn/Wshadow-5.C
@@ -1,7 +1,7 @@
// Wshadows was giving warnings for nested function parameters in nested class
// or structure that we didn't want.
// { dg-do compile }
-// { dg-options "-Wshadow" }
+// { dg-options "-Wshadow -Wno-return-type" }
// PR c++/41825
int f (int n)
diff --git a/gcc/testsuite/g++.dg/warn/Wtype-limits-Wextra.C b/gcc/testsuite/g++.dg/warn/Wtype-limits-Wextra.C
index 91b5c1b46d8..0cee96a8a98 100644
--- a/gcc/testsuite/g++.dg/warn/Wtype-limits-Wextra.C
+++ b/gcc/testsuite/g++.dg/warn/Wtype-limits-Wextra.C
@@ -69,7 +69,7 @@ void f(Int x) {
assert(0 <= x and x <= D);
}
-int ff(void) {
+void ff(void) {
f<unsigned char, 2>(5);
f<signed char, 2>(5);
}
@@ -78,7 +78,7 @@ template <typename Int, Int D>
void g(void) {
assert(0 <= D);
}
-int gg(void) {
+void gg(void) {
g<unsigned char, 2>();
}
diff --git a/gcc/testsuite/g++.dg/warn/Wtype-limits-no.C b/gcc/testsuite/g++.dg/warn/Wtype-limits-no.C
index 5040e2657ba..ad248d71808 100644
--- a/gcc/testsuite/g++.dg/warn/Wtype-limits-no.C
+++ b/gcc/testsuite/g++.dg/warn/Wtype-limits-no.C
@@ -69,7 +69,7 @@ void f(Int x) {
assert(0 <= x and x <= D); // { dg-bogus "comparison is always true due to limited range of data type" }
}
-int ff(void) {
+void ff(void) {
f<unsigned char, 2>(5);
f<signed char, 2>(5);
}
@@ -78,7 +78,7 @@ template <typename Int, Int D>
void g(void) {
assert(0 <= D);
}
-int gg(void) {
+void gg(void) {
g<unsigned char, 2>();
}
diff --git a/gcc/testsuite/g++.dg/warn/Wtype-limits.C b/gcc/testsuite/g++.dg/warn/Wtype-limits.C
index c345eff0b44..7919a42c834 100644
--- a/gcc/testsuite/g++.dg/warn/Wtype-limits.C
+++ b/gcc/testsuite/g++.dg/warn/Wtype-limits.C
@@ -69,7 +69,7 @@ void f(Int x) {
assert(0 <= x and x <= D);
}
-int ff(void) {
+void ff(void) {
f<unsigned char, 2>(5);
f<signed char, 2>(5);
}
@@ -78,7 +78,7 @@ template <typename Int, Int D>
void g(void) {
assert(0 <= D);
}
-int gg(void) {
+void gg(void) {
g<unsigned char, 2>();
}
diff --git a/gcc/testsuite/g++.dg/warn/Wunused-local-typedefs.C b/gcc/testsuite/g++.dg/warn/Wunused-local-typedefs.C
index 4fc8640ed7b..73f7ec79ce1 100644
--- a/gcc/testsuite/g++.dg/warn/Wunused-local-typedefs.C
+++ b/gcc/testsuite/g++.dg/warn/Wunused-local-typedefs.C
@@ -31,7 +31,7 @@ test0_tmpl(void)
foo(2);
}
-int
+void
test0(void)
{
test0_tmpl<int>();
diff --git a/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-5.C b/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-5.C
index 185d2b5c4ee..4269beda28a 100644
--- a/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-5.C
+++ b/gcc/testsuite/g++.dg/warn/Wzero-as-null-pointer-constant-5.C
@@ -1,5 +1,5 @@
// PR c++/52718
-// { dg-options "-Wzero-as-null-pointer-constant" }
+// { dg-options "-Wzero-as-null-pointer-constant -Wno-return-type" }
struct foo
{
diff --git a/gcc/testsuite/g++.dg/warn/pmf1.C b/gcc/testsuite/g++.dg/warn/pmf1.C
index 013c21b6db9..a63a00c33df 100644
--- a/gcc/testsuite/g++.dg/warn/pmf1.C
+++ b/gcc/testsuite/g++.dg/warn/pmf1.C
@@ -15,4 +15,5 @@ int
a::f() const
{
int (a::* b)() const = &f; // { dg-error "&a::f" }
+ return 0;
}
diff --git a/gcc/testsuite/g++.dg/warn/string1.C b/gcc/testsuite/g++.dg/warn/string1.C
index 2670f63d931..8f24a78f709 100644
--- a/gcc/testsuite/g++.dg/warn/string1.C
+++ b/gcc/testsuite/g++.dg/warn/string1.C
@@ -15,4 +15,6 @@ int test() {
//
std::string s = "";
s += 'x' + "y"; // { dg-warning "bounds of constant string" }
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.benjamin/p13417.C b/gcc/testsuite/g++.old-deja/g++.benjamin/p13417.C
index 132b13df277..e705f0dffb5 100644
--- a/gcc/testsuite/g++.old-deja/g++.benjamin/p13417.C
+++ b/gcc/testsuite/g++.old-deja/g++.benjamin/p13417.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
-// { dg-options "-Wno-deprecated" }
+// { dg-options "-Wno-deprecated -Wno-return-type" }
// prms-id: 13417
class Foo {
diff --git a/gcc/testsuite/g++.old-deja/g++.brendan/asm-extn1.C b/gcc/testsuite/g++.old-deja/g++.brendan/asm-extn1.C
index 3c39972ba40..75fa5f84d51 100644
--- a/gcc/testsuite/g++.old-deja/g++.brendan/asm-extn1.C
+++ b/gcc/testsuite/g++.old-deja/g++.brendan/asm-extn1.C
@@ -1,4 +1,4 @@
-// { dg-do assemble { target sparc-sun-* } }
+// { dg-do assemble { target sparc*-*-* } }
// { dg-options "-S" }
// GROUPS passed asm-extension
// This used to crash because c_expand_asm_keyword didn't know what to
@@ -7,7 +7,7 @@
extern void traptable(void);
-main()
+int main()
{
asm("wr %0,%%tbr" : : "r" (traptable));
}
diff --git a/gcc/testsuite/g++.old-deja/g++.brendan/crash24.C b/gcc/testsuite/g++.old-deja/g++.brendan/crash24.C
index 42d0fabc238..e1e9d32c94f 100644
--- a/gcc/testsuite/g++.old-deja/g++.brendan/crash24.C
+++ b/gcc/testsuite/g++.old-deja/g++.brendan/crash24.C
@@ -11,9 +11,12 @@
// array bounds, and then force the array to be allocated on the stack instead
// of a register.
+int
main()
{
char i[1];
i[1] = 0;
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.ext/constructor.C b/gcc/testsuite/g++.old-deja/g++.ext/constructor.C
index a7995bbfe79..3dc15c52acd 100644
--- a/gcc/testsuite/g++.old-deja/g++.ext/constructor.C
+++ b/gcc/testsuite/g++.old-deja/g++.ext/constructor.C
@@ -9,6 +9,7 @@ struct Any {
int i, j;
+int
main () {
struct Any *ap = (struct Any *)
__builtin_alloca (sizeof(struct Any));
diff --git a/gcc/testsuite/g++.old-deja/g++.ext/namedret1.C b/gcc/testsuite/g++.old-deja/g++.ext/namedret1.C
index 29955fb525c..ae0391d1559 100644
--- a/gcc/testsuite/g++.old-deja/g++.ext/namedret1.C
+++ b/gcc/testsuite/g++.old-deja/g++.ext/namedret1.C
@@ -1,7 +1,7 @@
// { dg-do assemble }
// { dg-options "-Wno-deprecated" }
-int f(int x) return y(x) { } // { dg-error "" }
+int f(int x) return y(x) { return 0; } // { dg-error "" }
extern "C" void abort ();
diff --git a/gcc/testsuite/g++.old-deja/g++.ext/namedret3.C b/gcc/testsuite/g++.old-deja/g++.ext/namedret3.C
index 7a0e0d5943f..0caf6a31378 100644
--- a/gcc/testsuite/g++.old-deja/g++.ext/namedret3.C
+++ b/gcc/testsuite/g++.old-deja/g++.ext/namedret3.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
-// { dg-options "-Wno-deprecated" }
+// { dg-options "-Wno-deprecated -Wno-return-type" }
extern "C" void abort();
diff --git a/gcc/testsuite/g++.old-deja/g++.ext/return1.C b/gcc/testsuite/g++.old-deja/g++.ext/return1.C
index f6bbc4da211..60d7c8abcf8 100644
--- a/gcc/testsuite/g++.old-deja/g++.ext/return1.C
+++ b/gcc/testsuite/g++.old-deja/g++.ext/return1.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
-// { dg-options "-Wno-deprecated" }
+// { dg-options "-Wno-deprecated -Wno-return-type" }
// Test that the named return value extension works when passed as a reference.
// Origin: Jason Merrill <jason@redhat.com>
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/anon4.C b/gcc/testsuite/g++.old-deja/g++.jason/anon4.C
index 588bf263c8d..47b87790119 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/anon4.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/anon4.C
@@ -3,6 +3,7 @@
// PRMS Id: 5371
// Bug: g++ screws up the alignment of buff and dies.
+int
main()
{
union {
@@ -11,4 +12,5 @@ main()
};
void *p = buff;
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/enum6.C b/gcc/testsuite/g++.old-deja/g++.jason/enum6.C
index c5bcec9ad7a..4894b55cdb3 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/enum6.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/enum6.C
@@ -18,6 +18,7 @@ enum C { c1 = -1, c2 = 0x80000000 };
enum D { d1 = CHAR_MIN, d2 = CHAR_MAX };
enum E { e1 = CHAR_MIN, e2 = CHAR_MIN };
+int
main()
{
return (sizeof (A) != 4 || sizeof (B) != 4 || sizeof (C) != 8
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/lineno2.C b/gcc/testsuite/g++.old-deja/g++.jason/lineno2.C
index 7f33176493c..a4f524acb70 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/lineno2.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/lineno2.C
@@ -8,7 +8,9 @@ public:
# 200 "lineno2.C"
};
+int
main()
{
- undef1(); // { dg-error "" "" { target *-*-* } 204 }
+ undef1(); // { dg-error "" "" { target *-*-* } 205 }
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/lineno3.C b/gcc/testsuite/g++.old-deja/g++.jason/lineno3.C
index 997267e7244..8e305606b64 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/lineno3.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/lineno3.C
@@ -7,7 +7,7 @@ template <class T> class A
{
public:
# 200 "lineno3.C"
- int foo () { undef1(); } // { dg-error "" "" { target *-*-* } 200 }
+ void foo () { undef1(); } // { dg-error "" "" { target *-*-* } 200 }
// { dg-message "note" "note" { target *-*-* } 200 }
};
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/lineno4.C b/gcc/testsuite/g++.old-deja/g++.jason/lineno4.C
index caa5bc216cd..703c897d12f 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/lineno4.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/lineno4.C
@@ -7,7 +7,7 @@ template <class T> class A
public:
# 200 "lineno4.C"
- int foo () { undef1(); } // { dg-error "" "" { target *-*-* } 200 }
+ void foo () { undef1(); } // { dg-error "" "" { target *-*-* } 200 }
// { dg-message "note" "note" { target *-*-* } 200 }
};
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/new2.C b/gcc/testsuite/g++.old-deja/g++.jason/new2.C
index 75d353da124..a2e000e19bf 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/new2.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/new2.C
@@ -7,7 +7,9 @@ struct A {
A() { i = 2; }
};
+int
main()
{
A *p = new A ();
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/new4.C b/gcc/testsuite/g++.old-deja/g++.jason/new4.C
index 770a2a26f92..d09ca1453cc 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/new4.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/new4.C
@@ -10,6 +10,7 @@ struct A {
A* ap = new A (1);
A* ap2 = new A[3];
+int
main ()
{
if (ap->i != 1 || ap2[0].i != 42 || ap2[1].i != 42 || ap2[2].i != 42)
@@ -20,4 +21,6 @@ main ()
if (ap->i != 1 || ap2[0].i != 42 || ap2[1].i != 42 || ap2[2].i != 42)
return 1;
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/shadow1.C b/gcc/testsuite/g++.old-deja/g++.jason/shadow1.C
index 941f80cfc00..fe3ec951b78 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/shadow1.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/shadow1.C
@@ -10,7 +10,9 @@ private:
void x::fun() { }
+int
main ()
{
float foo;
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/tempcons.C b/gcc/testsuite/g++.old-deja/g++.jason/tempcons.C
index a9974dbf779..d56eab51978 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/tempcons.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/tempcons.C
@@ -4,5 +4,5 @@
template <class T>
struct A {
int i;
- Blarg () : i(0) { } // { dg-error "" }
+ Blarg () : i(0) { return 0; } // { dg-error "" }
};
diff --git a/gcc/testsuite/g++.old-deja/g++.jason/thunk2.C b/gcc/testsuite/g++.old-deja/g++.jason/thunk2.C
index 427ef7f921d..8c02fc8f1d9 100644
--- a/gcc/testsuite/g++.old-deja/g++.jason/thunk2.C
+++ b/gcc/testsuite/g++.old-deja/g++.jason/thunk2.C
@@ -40,10 +40,13 @@ void* test(MMixin& anExample)
return anExample.MixinFunc(1,A(0)).p;
}
+int
main ()
{
CExample c;
if (test(c) != &c)
return 1;
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.law/builtin1.C b/gcc/testsuite/g++.old-deja/g++.law/builtin1.C
index 67b71a83685..de228642250 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/builtin1.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/builtin1.C
@@ -14,4 +14,6 @@ extern "C" int printf (const char *, ...);
void* junk() {
return __builtin_alloca(10);
}
-main() { printf ("PASS\n");}
+
+int
+main() { printf ("PASS\n"); return 0; }
diff --git a/gcc/testsuite/g++.old-deja/g++.law/enum9.C b/gcc/testsuite/g++.old-deja/g++.law/enum9.C
index a1a551d6e55..4fdb7aaeff5 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/enum9.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/enum9.C
@@ -15,6 +15,7 @@
enum E { A = 0x80000000, B = 0 };
+ int
main()
{
if (sizeof (E) != 4)
diff --git a/gcc/testsuite/g++.old-deja/g++.law/except3.C b/gcc/testsuite/g++.old-deja/g++.law/except3.C
index ec8bb100ee7..dc416eb3682 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/except3.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/except3.C
@@ -34,6 +34,7 @@ void do_something(Vector& v)
int i = v[v.size()+10];
}
+int
main()
{
Vector v(10);
diff --git a/gcc/testsuite/g++.old-deja/g++.law/init6.C b/gcc/testsuite/g++.old-deja/g++.law/init6.C
index 861b9252c95..65a07803070 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/init6.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/init6.C
@@ -1,4 +1,5 @@
// { dg-do assemble }
+// { dg-additional-options "-Wno-return-type" }
// GROUPS passed initialization
class Vector {
double *v;
diff --git a/gcc/testsuite/g++.old-deja/g++.law/profile1.C b/gcc/testsuite/g++.old-deja/g++.law/profile1.C
index ecd3b834c76..66ef7766d04 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/profile1.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/profile1.C
@@ -4,9 +4,12 @@
// { dg-options "-pg -static" { target hppa*-*-hpux* } }
// GROUPS passed profiling
#include <stdio.h>
+
+int
main()
{
printf ("PASS\n");
+ return 0;
}
/* { dg-final { cleanup-profile-file } } */
diff --git a/gcc/testsuite/g++.old-deja/g++.law/shadow2.C b/gcc/testsuite/g++.old-deja/g++.law/shadow2.C
index 46f9dfcc32f..64d967a01d5 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/shadow2.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/shadow2.C
@@ -22,7 +22,9 @@ public:
int f (int count) { return (count); }
};
+int
main ()
{
Y<char> y;
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.law/temps4.C b/gcc/testsuite/g++.old-deja/g++.law/temps4.C
index 3f8c9ed2c7d..df930537e2e 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/temps4.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/temps4.C
@@ -31,10 +31,13 @@ X foo() {
return x;
}
+int
main() {
X x = foo();
if (did_it)
abort ();
else
printf ("PASS\n");
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.law/weak.C b/gcc/testsuite/g++.old-deja/g++.law/weak.C
index 49132adbeb3..52444e77946 100644
--- a/gcc/testsuite/g++.old-deja/g++.law/weak.C
+++ b/gcc/testsuite/g++.old-deja/g++.law/weak.C
@@ -11,10 +11,12 @@
std::istream x (0);
+int
main () {
x.get();
std::putc(0, 0);
std::fgets(0, 0, 0);
x.get((char*) 0, 0);
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/bool2.C b/gcc/testsuite/g++.old-deja/g++.mike/bool2.C
index 3d8bc3c03eb..32b3d8c0f78 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/bool2.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/bool2.C
@@ -46,6 +46,8 @@ int i = true;
bool b = true;
bool c = (bool)(void (A::*)())0;
bool d = 256;
+
+int
main() {
if (!d) return 1;
if (!a) return 1;
@@ -55,4 +57,6 @@ main() {
if (!a2) return 1;
if (!a3) return 1;
if (!a5) return 1;
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh1.C b/gcc/testsuite/g++.old-deja/g++.mike/eh1.C
index 8105107f363..98191a20bc5 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh1.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh1.C
@@ -24,6 +24,7 @@ struct Exception
}
}
+int
main (int argc, const char *argv[])
{
if (argc != 2)
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh10.C b/gcc/testsuite/g++.old-deja/g++.mike/eh10.C
index a38e2ebc770..32587d4c158 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh10.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh10.C
@@ -16,6 +16,7 @@ void bar() {
void ee(int *) { }
+int
main() {
try {
foo();
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh13.C b/gcc/testsuite/g++.old-deja/g++.mike/eh13.C
index bac56865576..e2c7435c5ca 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh13.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh13.C
@@ -3,4 +3,4 @@
#include <string>
-main() { }
+int main() { return 0; }
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh16.C b/gcc/testsuite/g++.old-deja/g++.mike/eh16.C
index 73beac0a5bd..7a985908f3b 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh16.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh16.C
@@ -16,6 +16,7 @@ struct B {
}
};
+int
main() {
try {
B b;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh17.C b/gcc/testsuite/g++.old-deja/g++.mike/eh17.C
index 5b066c9c36b..2443d568024 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh17.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh17.C
@@ -15,6 +15,7 @@ struct B : public A {
}
};
+int
main() {
try {
B b;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh2.C b/gcc/testsuite/g++.old-deja/g++.mike/eh2.C
index 0a08790fe03..862bd6e7598 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh2.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh2.C
@@ -63,12 +63,10 @@ f(Vector& v) {
}
}
+int
main() {
Vector v(10);
f( v );
return 1;
}
-
-
-
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh23.C b/gcc/testsuite/g++.old-deja/g++.mike/eh23.C
index da2ac0ef39d..64733371b51 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh23.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh23.C
@@ -36,6 +36,7 @@ void my_terminate() {
exit (0); // double faults should call terminate
}
+int
main() {
std::set_terminate (my_terminate);
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh24.C b/gcc/testsuite/g++.old-deja/g++.mike/eh24.C
index 829819b396d..89b27c88fd7 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh24.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh24.C
@@ -15,6 +15,7 @@ struct A {
}
};
+int
main() {
try {
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh25.C b/gcc/testsuite/g++.old-deja/g++.mike/eh25.C
index 0ac61981b97..6516494dd44 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh25.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh25.C
@@ -22,6 +22,7 @@ struct A {
}
};
+int
main() {
try {
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh26.C b/gcc/testsuite/g++.old-deja/g++.mike/eh26.C
index d0d65da311a..6d82a4e73a6 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh26.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh26.C
@@ -3,6 +3,7 @@
class MyExceptionHandler { };
+int
main() {
try {
throw MyExceptionHandler();
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh27.C b/gcc/testsuite/g++.old-deja/g++.mike/eh27.C
index 8be08da8c26..dcfb5e7a093 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh27.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh27.C
@@ -5,6 +5,7 @@
class MyExceptionHandler { };
+int
main() {
try {
throw MyExceptionHandler();
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh28.C b/gcc/testsuite/g++.old-deja/g++.mike/eh28.C
index 57cab1178a4..47cd3861ca1 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh28.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh28.C
@@ -8,6 +8,7 @@ int fail = 1;
class X { public: virtual void p() { } };
class Y : public X { public: virtual void p() { fail = 0; } };
+int
main()
{
try { Y y; throw y; }
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh29.C b/gcc/testsuite/g++.old-deja/g++.mike/eh29.C
index 892e959647e..bcb4cea2874 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh29.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh29.C
@@ -17,6 +17,7 @@ public:
}
};
+int
main() {
try {
A a[5];
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh30.C b/gcc/testsuite/g++.old-deja/g++.mike/eh30.C
index 8da682c5341..848809245c1 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh30.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh30.C
@@ -1,4 +1,5 @@
// { dg-do assemble { target native } }
// { dg-options "-fexceptions -fPIC -S" }
+int
main() { throw 1; }
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh31.C b/gcc/testsuite/g++.old-deja/g++.mike/eh31.C
index 150d66b98a6..d7e84dd71e0 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh31.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh31.C
@@ -11,6 +11,7 @@ public:
};
+int
main() {
try {
throw Foo();
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh35.C b/gcc/testsuite/g++.old-deja/g++.mike/eh35.C
index adf852a64a3..9ea5a662d56 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh35.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh35.C
@@ -1,6 +1,7 @@
// { dg-do run { xfail sparc64-*-elf arm-*-pe } }
// { dg-options "-fexceptions" }
+int
main() {
try {
throw 'a';
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh36.C b/gcc/testsuite/g++.old-deja/g++.mike/eh36.C
index d6b4788cbfa..ba9e814ca3d 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh36.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh36.C
@@ -18,6 +18,7 @@ public:
}
} d(42);
+int
main() {
try {
throw &d;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh37.C b/gcc/testsuite/g++.old-deja/g++.mike/eh37.C
index a98b5df997f..63774119dd5 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh37.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh37.C
@@ -10,6 +10,7 @@ public:
}
} b(42);
+int
main() {
try {
throw &b;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh38.C b/gcc/testsuite/g++.old-deja/g++.mike/eh38.C
index 5a568798259..6482b900818 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh38.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh38.C
@@ -10,6 +10,7 @@ public:
}
} b(42);
+int
main() {
try {
throw &b;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh39.C b/gcc/testsuite/g++.old-deja/g++.mike/eh39.C
index e4bfff8d17a..8f8a84481e6 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh39.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh39.C
@@ -18,6 +18,7 @@ D::D() try : B() {
throw;
}
+int
main() {
try {
D d;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh40.C b/gcc/testsuite/g++.old-deja/g++.mike/eh40.C
index e42b419747a..f08836064c4 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh40.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh40.C
@@ -19,6 +19,7 @@ public:
}
};
+int
main() {
try {
D d;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh47.C b/gcc/testsuite/g++.old-deja/g++.mike/eh47.C
index 10eb8a8970f..36d3b9db329 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh47.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh47.C
@@ -8,6 +8,7 @@ void myterm() {
exit (0);
}
+int
main() {
try {
throw "";
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh50.C b/gcc/testsuite/g++.old-deja/g++.mike/eh50.C
index 0ebaab41fa6..028a2de0c23 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh50.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh50.C
@@ -10,6 +10,7 @@ void my_unexpected() {
template <class T> void foo(T) throw (int) { throw "Hi"; } // { dg-warning "deprecated" "" { target c++11 } }
+int
main() {
std::set_unexpected (my_unexpected);
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh51.C b/gcc/testsuite/g++.old-deja/g++.mike/eh51.C
index 7d3cd413ee7..428635b175d 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh51.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh51.C
@@ -10,6 +10,7 @@ void my_unexpected() {
template <class T> void foo(T) throw (T) { throw "Hi"; } // { dg-warning "deprecated" "" { target c++11 } }
+int
main() {
std::set_unexpected (my_unexpected);
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh7.C b/gcc/testsuite/g++.old-deja/g++.mike/eh7.C
index f431fb96d86..6a0502c484e 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh7.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh7.C
@@ -1,6 +1,7 @@
// { dg-do run { xfail sparc64-*-elf arm-*-pe } }
// { dg-options "-fexceptions" }
+int
main() {
if (0)
throw 1 | 2;
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh8.C b/gcc/testsuite/g++.old-deja/g++.mike/eh8.C
index 8e01da0b51b..be69abcf855 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh8.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh8.C
@@ -5,6 +5,7 @@ extern "C" int printf(const char *, ...);
int i;
+int
main() {
try {
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/eh9.C b/gcc/testsuite/g++.old-deja/g++.mike/eh9.C
index 633642faa95..a84bc27df49 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/eh9.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/eh9.C
@@ -1,4 +1,5 @@
// { dg-do run { xfail sparc64-*-elf arm-*-pe } }
// { dg-options "-fexceptions" }
+int
main() throw () { }
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/mangle1.C b/gcc/testsuite/g++.old-deja/g++.mike/mangle1.C
index de5e96588b3..3053af9dd58 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/mangle1.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/mangle1.C
@@ -22,6 +22,7 @@ void f3() {
}
#endif
+int
main() {
f.bar(foo::red);
}
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/p5958.C b/gcc/testsuite/g++.old-deja/g++.mike/p5958.C
index 666a4494509..ffcb13401da 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/p5958.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/p5958.C
@@ -4,6 +4,7 @@
class A { };
+int
main() {
int i = 1;
if (1 not_eq 1)
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/p6004.C b/gcc/testsuite/g++.old-deja/g++.mike/p6004.C
index 2b262bdb047..d1db7e006fe 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/p6004.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/p6004.C
@@ -16,6 +16,7 @@ int bar3() { return 43; }
int A::foo() { return 42; }
+int
main() {
return A::foo() - 42;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/p700.C b/gcc/testsuite/g++.old-deja/g++.mike/p700.C
index 62247791775..e4537c72868 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/p700.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/p700.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
-// { dg-options "-Wno-deprecated -Wno-register -Wno-builtin-declaration-mismatch" }
+// { dg-options "-Wno-deprecated -Wno-register -Wno-builtin-declaration-mismatch -Wno-return-type" }
// { dg-error "limited range of data type" "16-bit target" { target xstormy16-*-* } 0 }
// prms-id: 700
@@ -2111,6 +2111,7 @@ char mystrcmp(String30 s, String30 t)
+int
main()
{
Proc0();
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/p7912.C b/gcc/testsuite/g++.old-deja/g++.mike/p7912.C
index e5584ec4cd9..fe381e2d0e6 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/p7912.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/p7912.C
@@ -12,6 +12,7 @@ public:
};
+int
main()
{
try {
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/p811.C b/gcc/testsuite/g++.old-deja/g++.mike/p811.C
index 2ca04abdcba..bebfe6b7e20 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/p811.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/p811.C
@@ -535,6 +535,7 @@ X::stringify2() // { dg-error "no declaration matches" }
return "stringify2";
}
+int
main()
{
X x;
@@ -547,4 +548,6 @@ main()
cout << "y\n";
cout << y.stringify() << '\n';
cout << y.stringify2() << '\n';
+
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.mike/virt4.C b/gcc/testsuite/g++.old-deja/g++.mike/virt4.C
index 2d8b042632a..b6e4411ff65 100644
--- a/gcc/testsuite/g++.old-deja/g++.mike/virt4.C
+++ b/gcc/testsuite/g++.old-deja/g++.mike/virt4.C
@@ -22,7 +22,9 @@ public:
void foo() { D1::foo(); D2::foo(); }
};
+int
main() {
D1_2 h;
h.foo();
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.oliva/nameret1.C b/gcc/testsuite/g++.old-deja/g++.oliva/nameret1.C
index b32deedd4ee..8e5eccc293a 100644
--- a/gcc/testsuite/g++.old-deja/g++.oliva/nameret1.C
+++ b/gcc/testsuite/g++.old-deja/g++.oliva/nameret1.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
-// { dg-options "-Wno-deprecated" }
+// { dg-options "-Wno-deprecated -Wno-return-type" }
// Copyright (C) 1999, 2000, 2002 Free Software Foundation
// by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
diff --git a/gcc/testsuite/g++.old-deja/g++.oliva/nameret2.C b/gcc/testsuite/g++.old-deja/g++.oliva/nameret2.C
index e0dfb7e129c..5f86b1c8417 100644
--- a/gcc/testsuite/g++.old-deja/g++.oliva/nameret2.C
+++ b/gcc/testsuite/g++.old-deja/g++.oliva/nameret2.C
@@ -1,5 +1,5 @@
// { dg-do assemble }
-// { dg-options "-O1 -Wno-deprecated" }
+// { dg-options "-O1 -Wno-deprecated -Wno-return-type" }
// Copyright (C) 1999, 2000, 2002 Free Software Foundation
// by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
diff --git a/gcc/testsuite/g++.old-deja/g++.other/decl1.C b/gcc/testsuite/g++.old-deja/g++.other/decl1.C
index 07bcc914521..6262bf307df 100644
--- a/gcc/testsuite/g++.old-deja/g++.other/decl1.C
+++ b/gcc/testsuite/g++.old-deja/g++.other/decl1.C
@@ -6,4 +6,5 @@ int foo(int);
int bar() {
int baz(int(foo(0)));
int foo = baz;
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.other/expr1.C b/gcc/testsuite/g++.old-deja/g++.other/expr1.C
index 831876d01cb..415f0f19ef2 100644
--- a/gcc/testsuite/g++.old-deja/g++.other/expr1.C
+++ b/gcc/testsuite/g++.old-deja/g++.other/expr1.C
@@ -3,9 +3,10 @@
// Simplified from bug report by Trevor Taylor <ttaylor@powerup.com.au>
struct T {
- int operator()(int) { } // { dg-message "operator|candidate expects" }
+ int operator()(int) { return 0; } // { dg-message "operator|candidate expects" }
};
int main() {
T()(); // { dg-error "match" } no such operator
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.other/inline8.C b/gcc/testsuite/g++.old-deja/g++.other/inline8.C
index a46dc382a7d..c12a8f74a90 100644
--- a/gcc/testsuite/g++.old-deja/g++.other/inline8.C
+++ b/gcc/testsuite/g++.old-deja/g++.other/inline8.C
@@ -62,8 +62,10 @@ bool operator<(const NAMES_ITEM& n1, const NAMES_ITEM& n2)
lookup_t lookup;
NAMES_ITEM item ("one");
+
+int
main()
{
lookup.insert(pair<NAMES_ITEM,size_t>(item,0));
+ return 0;
}
-
diff --git a/gcc/testsuite/g++.old-deja/g++.other/loop1.C b/gcc/testsuite/g++.old-deja/g++.other/loop1.C
index 168734c545b..45b2acc3ef6 100644
--- a/gcc/testsuite/g++.old-deja/g++.other/loop1.C
+++ b/gcc/testsuite/g++.old-deja/g++.other/loop1.C
@@ -24,6 +24,7 @@ bool test ()
return true;
}
+int
main ()
{
f (test);
diff --git a/gcc/testsuite/g++.old-deja/g++.other/syntax1.C b/gcc/testsuite/g++.old-deja/g++.other/syntax1.C
index f1d3a86549c..d219048bb68 100644
--- a/gcc/testsuite/g++.old-deja/g++.other/syntax1.C
+++ b/gcc/testsuite/g++.old-deja/g++.other/syntax1.C
@@ -11,7 +11,9 @@ void AAA::fff() {}
AAA aaa;
+int
main ()
{
aaa.fff();
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.pt/repo3.C b/gcc/testsuite/g++.old-deja/g++.pt/repo3.C
index 53baf29f349..2f62139660e 100644
--- a/gcc/testsuite/g++.old-deja/g++.pt/repo3.C
+++ b/gcc/testsuite/g++.old-deja/g++.pt/repo3.C
@@ -29,9 +29,11 @@ struct D : public B<T>, public C<T>
{
};
+int
main ()
{
D<int> x;
+ return 0;
}
// { dg-final { cleanup-repo-files } }
diff --git a/gcc/testsuite/g++.old-deja/g++.robertl/eb27.C b/gcc/testsuite/g++.old-deja/g++.robertl/eb27.C
index 204a143d619..2fe151cea7d 100644
--- a/gcc/testsuite/g++.old-deja/g++.robertl/eb27.C
+++ b/gcc/testsuite/g++.old-deja/g++.robertl/eb27.C
@@ -1,5 +1,6 @@
// { dg-do assemble }
// { dg-options "-Wno-deprecated" }
+// { dg-additional-options "-Wno-return-type" }
/* bug.cc */
/* simple program to demonstrate the bug with named return values in gcc
*/
@@ -39,4 +40,5 @@ int main()
std::cout << x << std::endl;
y = x + test<int>(2);
std::cout << y << std::endl;
+ return 0;
}
diff --git a/gcc/testsuite/g++.old-deja/g++.robertl/eb83.C b/gcc/testsuite/g++.old-deja/g++.robertl/eb83.C
index ecdb6bed788..47cf5b88f04 100644
--- a/gcc/testsuite/g++.old-deja/g++.robertl/eb83.C
+++ b/gcc/testsuite/g++.old-deja/g++.robertl/eb83.C
@@ -8,11 +8,13 @@ test_swap(int& x, int& y) throw()
y = tmp;
}
+int
main()
{
int i = 5;
int j = 7;
test_swap(i, j);
+ return 0;
}
/* { dg-final { cleanup-coverage-files } } */
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr82838.c b/gcc/testsuite/gcc.c-torture/compile/pr82838.c
new file mode 100644
index 00000000000..a6ca163f82f
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr82838.c
@@ -0,0 +1,12 @@
+/* PR tree-optimization/82838 */
+
+struct S { unsigned short a, b, c; };
+struct S f[10];
+
+void
+foo (int e)
+{
+ struct S *x;
+ f[e].b = x[e].a;
+ f[e].c = x[e].b;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr82879.c b/gcc/testsuite/gcc.c-torture/compile/pr82879.c
new file mode 100644
index 00000000000..fad3fed8de9
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr82879.c
@@ -0,0 +1,11 @@
+int a, b;
+static __attribute__((cold)) void fn1() {
+ for (;;)
+ for (; a;)
+ ;
+}
+void fn2() {
+ if (b)
+ fn1();
+}
+
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr82913.c b/gcc/testsuite/gcc.c-torture/compile/pr82913.c
new file mode 100644
index 00000000000..5cf55573204
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr82913.c
@@ -0,0 +1,23 @@
+/* PR rtl-optimization/82913 */
+
+unsigned int a;
+unsigned long int b;
+
+int
+foo (void)
+{
+ ++a;
+ b = 0;
+}
+
+unsigned long int
+bar (int x)
+{
+ if (!foo () || !a)
+ {
+ int c = a != b;
+ if (c != x)
+ return a;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr23135.c b/gcc/testsuite/gcc.c-torture/execute/pr23135.c
index 8dd6358e9b4..e740ff52874 100644
--- a/gcc/testsuite/gcc.c-torture/execute/pr23135.c
+++ b/gcc/testsuite/gcc.c-torture/execute/pr23135.c
@@ -1,9 +1,8 @@
-/* { dg-add-options stack_size } */
-
/* Based on execute/simd-1.c, modified by joern.rennecke@st.com to
trigger a reload bug. Verified for gcc mainline from 20050722 13:00 UTC
for sh-elf -m4 -O2. */
/* { dg-options "-Wno-psabi" } */
+/* { dg-add-options stack_size } */
#ifndef STACK_SIZE
#define STACK_SIZE (256*1024)
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr82954.c b/gcc/testsuite/gcc.c-torture/execute/pr82954.c
new file mode 100644
index 00000000000..5ced28544e5
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr82954.c
@@ -0,0 +1,22 @@
+/* PR tree-optimization/82954 */
+
+__attribute__((noipa)) void
+foo (int *__restrict p, int *__restrict q)
+{
+ p[0] = p[0] ^ 1;
+ p[1] = p[1] ^ 2;
+ p[2] = p[2] ^ q[2];
+ p[3] = p[3] ^ q[3];
+}
+
+int
+main ()
+{
+ int p[4] = { 16, 32, 64, 128 };
+ int q[4] = { 8, 4, 2, 1 };
+ asm volatile ("" : : "g" (p), "g" (q) : "memory");
+ foo (p, q);
+ if (p[0] != 17 || p[1] != 34 || p[2] != 66 || p[3] != 129)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/Walloca-1.c b/gcc/testsuite/gcc.dg/Walloca-1.c
index ad39373fb9f..85e9160e845 100644
--- a/gcc/testsuite/gcc.dg/Walloca-1.c
+++ b/gcc/testsuite/gcc.dg/Walloca-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* { dg-require-effective-target alloca } */
-/* { dg-options "-Walloca-larger-than=2000 -O2" } */
+/* { dg-options "-Walloca-larger-than=2000 -O2 -ftrack-macro-expansion=0" } */
#define alloca __builtin_alloca
diff --git a/gcc/testsuite/gcc.dg/Wunknownprag.c b/gcc/testsuite/gcc.dg/Wunknownprag.c
index c5ba58f767d..3514825a97e 100644
--- a/gcc/testsuite/gcc.dg/Wunknownprag.c
+++ b/gcc/testsuite/gcc.dg/Wunknownprag.c
@@ -5,7 +5,7 @@
/* We used to get "unspellable token: CPP_EOF" warnings. */
-#pragma /* { dg-warning "ignoring #pragma" } */
-#pragma ~ /* { dg-warning "ignoring #pragma" } */
-#pragma baz /* { dg-warning "ignoring #pragma" } */
-#pragma baz baz /* { dg-warning "ignoring #pragma" } */
+#pragma /* { dg-warning "-:ignoring #pragma" } */
+#pragma ~ /* { dg-warning "-:ignoring #pragma" } */
+#pragma baz /* { dg-warning "-:ignoring #pragma" } */
+#pragma baz baz /* { dg-warning "-:ignoring #pragma" } */
diff --git a/gcc/testsuite/gcc.dg/builtin-redefine.c b/gcc/testsuite/gcc.dg/builtin-redefine.c
index 8090015f693..882b2210992 100644
--- a/gcc/testsuite/gcc.dg/builtin-redefine.c
+++ b/gcc/testsuite/gcc.dg/builtin-redefine.c
@@ -27,8 +27,8 @@
#define __TIME__ "X" /* Define while undefined. */
#define __TIME__ "X" /* Re-define while defined. */ /* { dg-line time_prev } */
-#define __TIME__ "Y" /* { dg-warning "\"__TIME__\" redefined" } */
-/* { dg-message "previous definition" "" { target *-*-* } time_prev } */
+#define __TIME__ "Y" /* { dg-warning "-:\"__TIME__\" redefined" } */
+/* { dg-message "-:previous definition" "" { target *-*-* } time_prev } */
#undef __TIME__ /* Undefine while defined. */
@@ -38,8 +38,8 @@
#define __DATE__ "X" /* Define while undefined. */
#define __DATE__ "X" /* Re-define while defined. */ /* { dg-line date_prev } */
-#define __DATE__ "Y" /* { dg-warning "\"__DATE__\" redefined" } */
-/* { dg-message "previous definition" "" { target *-*-* } date_prev } */
+#define __DATE__ "Y" /* { dg-warning "-:\"__DATE__\" redefined" } */
+/* { dg-message "-:previous definition" "" { target *-*-* } date_prev } */
#undef __DATE__ /* Undefine while defined. */
@@ -47,8 +47,8 @@
#define __TIMESTAMP__ "X" /* Define while already defined. */
#define __TIMESTAMP__ "X" /* Re-define while defined. */ /* { dg-line timestamp_prev } */
-#define __TIMESTAMP__ "Y" /* { dg-warning "\"__TIMESTAMP__\" redefined" } */
-/* { dg-message "previous definition" "" { target *-*-* } timestamp_prev } */
+#define __TIMESTAMP__ "Y" /* { dg-warning "-:\"__TIMESTAMP__\" redefined" } */
+/* { dg-message "-:previous definition" "" { target *-*-* } timestamp_prev } */
#undef __TIMESTAMP__ /* Undefine while defined. */
@@ -71,9 +71,9 @@
/* { dg-bogus "Expected built-in is not defined" "" { target *-*-* } .-1 } */
#endif
-#define __LINE__ 0 /* { dg-warning "\"__LINE__\" redef" } */
-#define __INCLUDE_LEVEL__ 0 /* { dg-warning "\"__INCLUDE_LEVEL__\" redef" } */
-#define __COUNTER__ 0 /* { dg-warning "\"__COUNTER__\" redef" } */
+#define __LINE__ 0 /* { dg-warning "-:\"__LINE__\" redef" } */
+#define __INCLUDE_LEVEL__ 0 /* { dg-warning "-:\"__INCLUDE_LEVEL__\" redef" } */
+#define __COUNTER__ 0 /* { dg-warning "-:\"__COUNTER__\" redef" } */
int unused; /* Silence `ISO C forbids an empty translation unit' warning. */
diff --git a/gcc/testsuite/gcc.dg/builtin-stpncpy.c b/gcc/testsuite/gcc.dg/builtin-stpncpy.c
index e4290d5635c..920079892dd 100644
--- a/gcc/testsuite/gcc.dg/builtin-stpncpy.c
+++ b/gcc/testsuite/gcc.dg/builtin-stpncpy.c
@@ -1,6 +1,6 @@
/* PR tree-optimization/80669 - Bad -Wstringop-overflow warnings for stpncpy
{ dg-do compile }
- { dg-options "-O2 -Wall" } */
+ { dg-options "-O2 -Wall -Wno-stringop-truncation" } */
#define SIZE_MAX __SIZE_MAX__
@@ -18,7 +18,9 @@ size_t range (size_t min, size_t max)
return val < min || max < val ? min : val;
}
-/* Verify that no warning is issued for stpncpy with constant size. */
+/* Verify that no -Wstringop-overflow warning is issued for stpncpy
+ with constant size. (Some tests cause -Wstringop-truncation and
+ that's expected). */
void test_cst (char *d)
{
__builtin_stpncpy (d, "123", 0);
@@ -37,7 +39,8 @@ void test_cst (char *d)
}
-/* Verify that no warning is issued for stpncpy with size in some range. */
+/* Verify that no -Wstringop-overflow warning is issued for stpncpy
+ with size in some range. */
void test_rng (char *d)
{
#define R(min, max) range (min, max)
diff --git a/gcc/testsuite/gcc.dg/builtin-tgmath-1.c b/gcc/testsuite/gcc.dg/builtin-tgmath-1.c
new file mode 100644
index 00000000000..ff87ace42fd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-tgmath-1.c
@@ -0,0 +1,322 @@
+/* Test __builtin_tgmath: valid uses, standard floating-point types. */
+/* { dg-do run } */
+/* { dg-options "" } */
+
+extern void abort (void);
+extern void exit (int);
+
+#define CHECK_CALL(C, E, V) \
+ do \
+ { \
+ if ((C) != (E)) \
+ abort (); \
+ extern __typeof (C) V; \
+ } \
+ while (0)
+
+extern float var_f;
+extern double var_d;
+extern long double var_ld;
+extern _Complex float var_cf;
+extern _Complex double var_cd;
+extern _Complex long double var_cld;
+extern int var_i;
+
+typedef float float_type;
+typedef double double_type;
+
+/* Test simple case, real arguments and return type. */
+
+float_type t1f (float x) { return x + 1; }
+double t1d (double_type x) { return x + 2; }
+long double t1l (volatile long double x) { return x + 3; }
+
+#define t1v(x) __builtin_tgmath (t1f, t1d, t1l, x)
+#define t1vr(x) __builtin_tgmath (t1l, t1d, t1f, x)
+
+static void
+test_1 (void)
+{
+ float_type f = 1;
+ volatile float vf = 2;
+ double d = 3;
+ long double ld = 4;
+ int i = 5;
+ long long ll = 6;
+ CHECK_CALL (t1v (f), 2, var_f);
+ CHECK_CALL (t1v (vf), 3, var_f);
+ CHECK_CALL (t1v (d), 5, var_d);
+ CHECK_CALL (t1v (ld), 7, var_ld);
+ CHECK_CALL (t1v (i), 7, var_d);
+ CHECK_CALL (t1v (ll), 8, var_d);
+ CHECK_CALL (t1vr (f), 2, var_f);
+ CHECK_CALL (t1vr (vf), 3, var_f);
+ CHECK_CALL (t1vr (d), 5, var_d);
+ CHECK_CALL (t1vr (ld), 7, var_ld);
+ CHECK_CALL (t1vr (i), 7, var_d);
+ CHECK_CALL (t1vr (ll), 8, var_d);
+}
+
+/* Test first argument not type-generic. */
+
+float t2f (int a, float x) { return a * x + 1; }
+double t2d (int a, double x) { return a * x + 2; }
+long double t2l (int a, long double x) { return a * x + 3; }
+
+#define t2v(a, x) __builtin_tgmath (t2f, t2d, t2l, a, x)
+
+static void
+test_2 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ int i = 4;
+ unsigned long long ll = 5;
+ CHECK_CALL (t2v (1, f), 2, var_f);
+ CHECK_CALL (t2v (2, d), 6, var_d);
+ CHECK_CALL (t2v (3, ld), 12, var_ld);
+ CHECK_CALL (t2v (4, i), 18, var_d);
+ CHECK_CALL (t2v (5, ll), 27, var_d);
+}
+
+/* Test return type not type-generic. */
+
+int t3f (float x) { return x + 1; }
+int t3d (double x) { return x + 2; }
+int t3l (long double x) { return x + 3; }
+
+#define t3v(x) __builtin_tgmath (t3f, t3d, t3l, x)
+
+static void
+test_3 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ short s = 4;
+ CHECK_CALL (t3v (f), 2, var_i);
+ CHECK_CALL (t3v (d), 4, var_i);
+ CHECK_CALL (t3v (ld), 6, var_i);
+ CHECK_CALL (t3v (s), 6, var_i);
+}
+
+/* Test multiple type-generic arguments. */
+
+float t4f (float x, float y) { return 10 * x + y; }
+double t4d (double x, double y) { return 100 * x + y; }
+long double t4l (long double x, long double y) { return 1000 * x + y; }
+
+#define t4v(x, y) __builtin_tgmath (t4f, t4d, t4l, x, y)
+
+static void
+test_4 (void)
+{
+ float f1 = 1;
+ float f2 = 2;
+ double d1 = 3;
+ double d2 = 4;
+ long double ld = 5;
+ long int l = 6;
+ CHECK_CALL (t4v (f1, f2), 12, var_f);
+ CHECK_CALL (t4v (f2, f1), 21, var_f);
+ CHECK_CALL (t4v (f1, d1), 103, var_d);
+ CHECK_CALL (t4v (d2, f2), 402, var_d);
+ CHECK_CALL (t4v (f1, l), 106, var_d);
+ CHECK_CALL (t4v (ld, f1), 5001, var_ld);
+ CHECK_CALL (t4v (l, l), 606, var_d);
+ CHECK_CALL (t4v (l, ld), 6005, var_ld);
+}
+
+/* Test complex argument, real return type. */
+
+float t5f (_Complex float x) { return 1 + __real__ x + 3 * __imag__ x; }
+double t5d (_Complex double x) { return 2 + __real__ x + 4 * __imag__ x; }
+long double t5l (_Complex long double x) { return 3 + __real__ x + 5 * __imag__ x; }
+
+#define t5v(x) __builtin_tgmath (t5f, t5d, t5l, x)
+
+static void
+test_5 (void)
+{
+ float f = 1;
+ _Complex float cf = 2 + 3i;
+ double d = 4;
+ _Complex double cd = 5 + 6i;
+ long double ld = 7;
+ _Complex long double cld = 8 + 9i;
+ int i = 10;
+ _Complex int ci = 11 + 12i;
+ CHECK_CALL (t5v (f), 2, var_f);
+ CHECK_CALL (t5v (cf), 12, var_f);
+ CHECK_CALL (t5v (d), 6, var_d);
+ CHECK_CALL (t5v (cd), 31, var_d);
+ CHECK_CALL (t5v (ld), 10, var_ld);
+ CHECK_CALL (t5v (cld), 56, var_ld);
+ CHECK_CALL (t5v (i), 12, var_d);
+ CHECK_CALL (t5v (ci), 61, var_d);
+}
+
+/* Test complex argument, complex return type. */
+
+_Complex float t6f (_Complex float x) { return 1 + x; }
+_Complex double t6d (_Complex double x) { return 2 + x; }
+_Complex long double t6l (_Complex long double x) { return 3 + x; }
+
+#define t6v(x) __builtin_tgmath (t6f, t6d, t6l, x)
+
+static void
+test_6 (void)
+{
+ float f = 1;
+ _Complex float cf = 2 + 3i;
+ double d = 4;
+ _Complex double cd = 5 + 6i;
+ long double ld = 7;
+ _Complex long double cld = 8 + 9i;
+ int i = 10;
+ _Complex int ci = 11 + 12i;
+ CHECK_CALL (t6v (f), 2, var_cf);
+ CHECK_CALL (t6v (cf), 3 + 3i, var_cf);
+ CHECK_CALL (t6v (d), 6, var_cd);
+ CHECK_CALL (t6v (cd), 7 + 6i, var_cd);
+ CHECK_CALL (t6v (ld), 10, var_cld);
+ CHECK_CALL (t6v (cld), 11 + 9i, var_cld);
+ CHECK_CALL (t6v (i), 12, var_cd);
+ CHECK_CALL (t6v (ci), 13 + 12i, var_cd);
+}
+
+/* Test real and complex argument, real return type. */
+
+float t7f (float x) { return 1 + x; }
+float t7cf (_Complex float x) { return 2 + __real__ x; }
+double t7d (double x) { return 3 + x; }
+double t7cd (_Complex double x) { return 4 + __real__ x; }
+long double t7l (long double x) { return 5 + x; }
+long double t7cl (_Complex long double x) { return 6 + __real__ x; }
+
+#define t7v(x) __builtin_tgmath (t7f, t7d, t7l, t7cf, t7cd, t7cl, x)
+
+static void
+test_7 (void)
+{
+ float f = 1;
+ _Complex float cf = 2 + 3i;
+ double d = 4;
+ _Complex double cd = 5 + 6i;
+ long double ld = 7;
+ _Complex long double cld = 8 + 9i;
+ int i = 10;
+ _Complex int ci = 11 + 12i;
+ CHECK_CALL (t7v (f), 2, var_f);
+ CHECK_CALL (t7v (cf), 4, var_f);
+ CHECK_CALL (t7v (d), 7, var_d);
+ CHECK_CALL (t7v (cd), 9, var_d);
+ CHECK_CALL (t7v (ld), 12, var_ld);
+ CHECK_CALL (t7v (cld), 14, var_ld);
+ CHECK_CALL (t7v (i), 13, var_d);
+ CHECK_CALL (t7v (ci), 15, var_d);
+}
+
+/* Test real and complex argument, real and complex return type. */
+
+float t8f (float x) { return 1 + x; }
+_Complex float t8cf (_Complex float x) { return 2 + x; }
+double t8d (double x) { return 3 + x; }
+_Complex double t8cd (_Complex double x) { return 4 + x; }
+long double t8l (long double x) { return 5 + x; }
+_Complex long double t8cl (_Complex long double x) { return 6 + x; }
+
+#define t8v(x) __builtin_tgmath (t8f, t8d, t8l, t8cf, t8cd, t8cl, x)
+
+static void
+test_8 (void)
+{
+ float f = 1;
+ _Complex float cf = 2 + 3i;
+ double d = 4;
+ _Complex double cd = 5 + 6i;
+ long double ld = 7;
+ _Complex long double cld = 8 + 9i;
+ int i = 10;
+ _Complex int ci = 11 + 12i;
+ CHECK_CALL (t8v (f), 2, var_f);
+ CHECK_CALL (t8v (cf), 4 + 3i, var_cf);
+ CHECK_CALL (t8v (d), 7, var_d);
+ CHECK_CALL (t8v (cd), 9 + 6i, var_cd);
+ CHECK_CALL (t8v (ld), 12, var_ld);
+ CHECK_CALL (t8v (cld), 14 + 9i, var_cld);
+ CHECK_CALL (t8v (i), 13, var_d);
+ CHECK_CALL (t8v (ci), 15 + 12i, var_cd);
+}
+
+/* Test multiple type-generic arguments, real and complex. */
+
+float t9f (float x, float y) { return x + 10 * y; }
+_Complex float t9cf (_Complex float x, _Complex float y) { return x + 100 * y; }
+double t9d (double x, double y) { return x + 1000 * y; }
+_Complex double t9cd (_Complex double x, _Complex double y) { return x + 10000 * y; }
+long double t9l (long double x, long double y) { return x + 100000 * y; }
+_Complex long double t9cl (_Complex long double x, _Complex long double y) { return x + 1000000 * y; }
+
+#define t9v(x, y) __builtin_tgmath (t9f, t9d, t9l, t9cf, t9cd, t9cl, x, y)
+
+static void
+test_9 (void)
+{
+ float f = 1;
+ _Complex float cf = 2 + 3i;
+ double d = 4;
+ _Complex double cd = 5 + 6i;
+ long double ld = 7;
+ _Complex long double cld = 8 + 9i;
+ int i = 10;
+ _Complex int ci = 11 + 12i;
+ CHECK_CALL (t9v (f, f), 11, var_f);
+ CHECK_CALL (t9v (f, cf), 201 + 300i, var_cf);
+ CHECK_CALL (t9v (cf, f), 102 + 3i, var_cf);
+ CHECK_CALL (t9v (f, i), 10001, var_d);
+ CHECK_CALL (t9v (i, f), 1010, var_d);
+ CHECK_CALL (t9v (d, d), 4004, var_d);
+ CHECK_CALL (t9v (d, cd), 50004 + 60000i, var_cd);
+ CHECK_CALL (t9v (ld, i), 1000007, var_ld);
+ CHECK_CALL (t9v (cf, cld), 8000002 + 9000003i, var_cld);
+ CHECK_CALL (t9v (i, i), 10010, var_d);
+ CHECK_CALL (t9v (ci, i), 100011 + 12i, var_cd);
+}
+
+/* Test functions rounding result to narrower type. */
+
+float t10d (double x) { return 1 + x; }
+float t10l (long double x) { return 2 + x; }
+
+#define t10v(x) __builtin_tgmath (t10d, t10l, x)
+
+static void
+test_10 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ short s = 4;
+ CHECK_CALL (t10v (f), 2, var_f);
+ CHECK_CALL (t10v (d), 3, var_f);
+ CHECK_CALL (t10v (ld), 5, var_f);
+ CHECK_CALL (t10v (s), 5, var_f);
+}
+
+int
+main (void)
+{
+ test_1 ();
+ test_2 ();
+ test_3 ();
+ test_4 ();
+ test_5 ();
+ test_6 ();
+ test_7 ();
+ test_8 ();
+ test_9 ();
+ test_10 ();
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/builtin-tgmath-2.c b/gcc/testsuite/gcc.dg/builtin-tgmath-2.c
new file mode 100644
index 00000000000..c4140cc2bd5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-tgmath-2.c
@@ -0,0 +1,51 @@
+/* Test __builtin_tgmath: valid uses, _FloatN types. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float32 } */
+/* { dg-require-effective-target float32_runtime } */
+
+extern void abort (void);
+extern void exit (int);
+
+#define CHECK_CALL(C, E, V) \
+ do \
+ { \
+ if ((C) != (E)) \
+ abort (); \
+ extern __typeof (C) V; \
+ } \
+ while (0)
+
+extern float var_f;
+extern double var_d;
+extern long double var_ld;
+extern _Float32 var_f32;
+
+float t1f (float x) { return x + 1; }
+double t1d (double x) { return x + 2; }
+long double t1l (long double x) { return x + 3; }
+_Float32 t1f32 (_Float32 x) { return x + 4; }
+
+#define t1v(x) __builtin_tgmath (t1f, t1d, t1l, t1f32, x)
+
+static void
+test_1 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ _Float32 f32 = 4;
+ int i = 5;
+ CHECK_CALL (t1v (f), 2, var_f);
+ CHECK_CALL (t1v (d), 4, var_d);
+ CHECK_CALL (t1v (ld), 6, var_ld);
+ CHECK_CALL (t1v (f32), 8, var_f32);
+ CHECK_CALL (t1v (i), 7, var_d);
+}
+
+int
+main (void)
+{
+ test_1 ();
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/builtin-tgmath-err-1.c b/gcc/testsuite/gcc.dg/builtin-tgmath-err-1.c
new file mode 100644
index 00000000000..9016ec742be
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-tgmath-err-1.c
@@ -0,0 +1,76 @@
+/* Test __builtin_tgmath: errors that indicate a bad definition of a
+ type-generic macro rather than bad arguments in a call to it. */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+void *p;
+double d;
+double unprototyped_d ();
+long double unprototyped_ld ();
+double variadic_d (double, ...);
+long double variadic_ld (long double, ...);
+double no_arguments_d (void);
+long double no_arguments_ld (void);
+double f_d (double);
+long double f_ld (long double);
+double many_args (double, double, double, double);
+int f_i_d (double);
+_Complex int f_ci_d (double);
+void * f_p_d (double);
+double f_d_i (int);
+double f_d_ci (_Complex int);
+double f_d_p (void *);
+long double f_ld_d (double);
+_Complex double f_cd_d (double);
+double f_d_f (float);
+double f_d_dd (double, double);
+long double f_ld_ldld (long double, long double);
+float f_f_fd (float, double);
+
+void
+test (void)
+{
+ /* Arguments individually invalid or no consistent number of
+ arguments followed by those arguments. */
+ __builtin_tgmath (); /* { dg-error "too few arguments" } */
+ __builtin_tgmath (f_d); /* { dg-error "too few arguments" } */
+ __builtin_tgmath (f_d, f_ld); /* { dg-error "too few arguments" } */
+ __builtin_tgmath (many_args, many_args, many_args); /* { dg-error "too few arguments" } */
+ __builtin_tgmath (many_args, d, d, d, d); /* { dg-error "too few arguments" } */
+ __builtin_tgmath (f_ld, many_args, d); /* { dg-error "has wrong number of arguments" } */
+ __builtin_tgmath (unprototyped_d, unprototyped_ld, d); /* { dg-error "is unprototyped" } */
+ __builtin_tgmath (f_d, unprototyped_ld, d); /* { dg-error "is unprototyped" } */
+ __builtin_tgmath (variadic_d, variadic_ld, d); /* { dg-error "variable arguments" } */
+ __builtin_tgmath (f_d, variadic_ld, d); /* { dg-error "variable arguments" } */
+ __builtin_tgmath (p, p, p); /* { dg-error "is not a function pointer" } */
+ __builtin_tgmath (f_d, p, p); /* { dg-error "is not a function pointer" } */
+ __builtin_tgmath (no_arguments_d, no_arguments_d, no_arguments_ld); /* { dg-error "has no arguments" } */
+ __builtin_tgmath (f_d, no_arguments_d, no_arguments_ld); /* { dg-error "has no arguments" } */
+
+ /* Invalid varying types of arguments. */
+ __builtin_tgmath (f_i_d, f_ld, 0); /* { dg-error "invalid type-generic return type" } */
+ __builtin_tgmath (f_ci_d, f_ld, 0); /* { dg-error "invalid type-generic return type" } */
+ __builtin_tgmath (f_p_d, f_ld, 0); /* { dg-error "invalid type-generic return type" } */
+ __builtin_tgmath (f_ld, f_i_d, 0); /* { dg-error "invalid type-generic return type" } */
+ __builtin_tgmath (f_ld, f_ci_d, 0); /* { dg-error "invalid type-generic return type" } */
+ __builtin_tgmath (f_ld, f_p_d, 0); /* { dg-error "invalid type-generic return type" } */
+ __builtin_tgmath (f_d_i, f_ld, 0); /* { dg-error "invalid type-generic type for argument" } */
+ __builtin_tgmath (f_d_ci, f_ld, 0); /* { dg-error "invalid type-generic type for argument" } */
+ __builtin_tgmath (f_d_p, f_ld, 0); /* { dg-error "invalid type-generic type for argument" } */
+ __builtin_tgmath (f_ld, f_d_i, 0); /* { dg-error "invalid type-generic type for argument" } */
+ __builtin_tgmath (f_ld, f_d_ci, 0); /* { dg-error "invalid type-generic type for argument" } */
+ __builtin_tgmath (f_ld, f_d_p, 0); /* { dg-error "invalid type-generic type for argument" } */
+
+ /* Arguments same type. */
+ __builtin_tgmath (f_d, f_d, 0); /* { dg-error "all have the same type" } */
+
+ /* Missing or invalid type-generic parameter. */
+ __builtin_tgmath (f_d, f_ld_d, 0); /* { dg-error "lack type-generic parameter" } */
+ __builtin_tgmath (f_d, f_ld, f_cd_d, 0); /* { dg-error "lack type-generic parameter" } */
+ __builtin_tgmath (f_d, f_ld, f_d, 0); /* { dg-error "duplicate type-generic parameter type" } */
+
+ /* Variation not consistent with the identified type-generic
+ parameter. */
+ __builtin_tgmath (f_d, f_ld, f_d_f, 0); /* { dg-error "bad return type for function argument" } */
+ __builtin_tgmath (f_d_dd, f_ld_ldld, f_f_fd, 0, 0); /* { dg-error "bad type for argument" } */
+}
diff --git a/gcc/testsuite/gcc.dg/builtin-tgmath-err-2.c b/gcc/testsuite/gcc.dg/builtin-tgmath-err-2.c
new file mode 100644
index 00000000000..df5655ef402
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-tgmath-err-2.c
@@ -0,0 +1,19 @@
+/* Test __builtin_tgmath: errors that indicate bad arguments in a call
+ to a type-generic macro, non-DFP. */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+float f_f (float);
+double f_d (double);
+long double f_ld (long double);
+void *p;
+long double ld;
+_Complex float cf;
+
+void
+test (void)
+{
+ __builtin_tgmath (f_f, f_d, f_ld, p); /* { dg-error "invalid type of argument" } */
+ __builtin_tgmath (f_f, f_d, ld); /* { dg-error "no matching function for type-generic call" } */
+ __builtin_tgmath (f_f, f_d, cf); /* { dg-error "no matching function for type-generic call" } */
+}
diff --git a/gcc/testsuite/gcc.dg/cpp/Wunknown-pragmas-1.c b/gcc/testsuite/gcc.dg/cpp/Wunknown-pragmas-1.c
index 4f6a04be45a..06a244e097d 100644
--- a/gcc/testsuite/gcc.dg/cpp/Wunknown-pragmas-1.c
+++ b/gcc/testsuite/gcc.dg/cpp/Wunknown-pragmas-1.c
@@ -5,25 +5,25 @@
/* Make sure we get warnings in the expected lines. */
-#pragma unknown1 /* { dg-warning "unknown1" "unknown1" } */
+#pragma unknown1 /* { dg-warning "-:unknown1" "unknown1" } */
#define COMMA ,
#define FOO(x) x
#define BAR(x) _Pragma("unknown_before") x
#define BAZ(x) x _Pragma("unknown_after")
-int _Pragma("unknown2") bar1; /* { dg-warning "unknown2" "unknown2" } */
+int _Pragma("unknown2") bar1; /* { dg-warning "-:unknown2" "unknown2" } */
-FOO(int _Pragma("unknown3") bar2); /* { dg-warning "unknown3" "unknown3" } */
+FOO(int _Pragma("unknown3") bar2); /* { dg-warning "-:unknown3" "unknown3" } */
-int BAR(bar3); /* { dg-warning "unknown_before" "unknown_before 1" } */
+int BAR(bar3); /* { dg-warning "-:unknown_before" "unknown_before 1" } */
-BAR(int bar4); /* { dg-warning "unknown_before" "unknown_before 2" } */
+BAR(int bar4); /* { dg-warning "-:unknown_before" "unknown_before 2" } */
-int BAZ(bar5); /* { dg-warning "unknown_after" "unknown_after 1" } */
+int BAZ(bar5); /* { dg-warning "-:unknown_after" "unknown_after 1" } */
-int BAZ(bar6;) /* { dg-warning "unknown_after" "unknown_after 2" } */
+int BAZ(bar6;) /* { dg-warning "-:unknown_after" "unknown_after 2" } */
-FOO(int bar7; _Pragma("unknown4")) /* { dg-warning "unknown4" "unknown4" } */
+FOO(int bar7; _Pragma("unknown4")) /* { dg-warning "-:unknown4" "unknown4" } */
-#pragma unknown5 /* { dg-warning "unknown5" "unknown5" } */
+#pragma unknown5 /* { dg-warning "-:unknown5" "unknown5" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/Wunused.c b/gcc/testsuite/gcc.dg/cpp/Wunused.c
index ac363ad04b8..d6f76288cd7 100644
--- a/gcc/testsuite/gcc.dg/cpp/Wunused.c
+++ b/gcc/testsuite/gcc.dg/cpp/Wunused.c
@@ -15,9 +15,9 @@
#define used3 /* { dg-bogus "used" } */
#define used4 used4 /* { dg-bogus "used" } */
-#define unused5 /* { dg-warning "used" } */
-#define unused6 /* { dg-warning "used" } */
-#define unused7() /* { dg-warning "used" } */
+#define unused5 /* { dg-warning "-:used" } */
+#define unused6 /* { dg-warning "-:used" } */
+#define unused7() /* { dg-warning "-:used" } */
#if defined used1
#endif
diff --git a/gcc/testsuite/gcc.dg/cpp/macsyntx.c b/gcc/testsuite/gcc.dg/cpp/macsyntx.c
index 146dcedab5f..ff7e37a0d06 100644
--- a/gcc/testsuite/gcc.dg/cpp/macsyntx.c
+++ b/gcc/testsuite/gcc.dg/cpp/macsyntx.c
@@ -51,15 +51,15 @@ one(ichi\
two(ichi) /* { dg-error "requires 2" } */
var0() /* OK. */
var0(ichi) /* OK. */
-var1() /* { dg-warning "requires at least one" } */
-var1(ichi) /* { dg-warning "requires at least one" } */
+var1() /* { dg-warning "requires at least one" "" { xfail *-*-* } } */
+var1(ichi) /* { dg-warning "requires at least one" "" { xfail *-*-* } } */
var1(ichi, ni) /* OK. */
/* This tests two oddities of GNU rest args - omitting a comma is OK,
and backtracking a token on pasting an empty rest args. */
#define rest(x, y...) x ## y /* { dg-warning "ISO C" } */
rest(ichi,) /* OK. */
-rest(ichi) /* { dg-warning "requires at least one" } */
+rest(ichi) /* { dg-warning "requires at least one" "" { xfail *-*-* } } */
#if 23 != rest(2, 3) /* OK, no warning. */
#error 23 != 23 !!
#endif
diff --git a/gcc/testsuite/gcc.dg/cpp/misspelled-directive-1.c b/gcc/testsuite/gcc.dg/cpp/misspelled-directive-1.c
index f79670a17cb..d4176144f5d 100644
--- a/gcc/testsuite/gcc.dg/cpp/misspelled-directive-1.c
+++ b/gcc/testsuite/gcc.dg/cpp/misspelled-directive-1.c
@@ -1,4 +1,4 @@
-#ifndef SOME_GUARD /* { dg-error "unterminated" } */
+#ifndef SOME_GUARD /* { dg-error "-:unterminated" } */
#if 1
/* Typo here: "endfi" should have been "endif". */
diff --git a/gcc/testsuite/gcc.dg/cpp/redef2.c b/gcc/testsuite/gcc.dg/cpp/redef2.c
index 1dbc10033ed..439d33a7057 100644
--- a/gcc/testsuite/gcc.dg/cpp/redef2.c
+++ b/gcc/testsuite/gcc.dg/cpp/redef2.c
@@ -17,15 +17,15 @@
#define foo(x) x
#define foo(x)x /* { dg-bogus "redefined" "redefined foo" } */
-/* { dg-warning "redefined" "redef mac" { target *-*-* } 7 }
- { dg-warning "redefined" "redef mac" { target *-*-* } 8 }
- { dg-warning "redefined" "redef mac" { target *-*-* } 9 }
- { dg-warning "redefined" "redef ro" { target *-*-* } 12 }
- { dg-warning "redefined" "redef va" { target *-*-* } 15 }
+/* { dg-warning "-:redefined" "redef mac" { target *-*-* } 7 }
+ { dg-warning "-:redefined" "redef mac" { target *-*-* } 8 }
+ { dg-warning "-:redefined" "redef mac" { target *-*-* } 9 }
+ { dg-warning "-:redefined" "redef ro" { target *-*-* } 12 }
+ { dg-warning "-:redefined" "redef va" { target *-*-* } 15 }
- { dg-message "previous" "prev def mac" { target *-*-* } 6 }
- { dg-message "previous" "prev def mac" { target *-*-* } 7 }
- { dg-message "previous" "prev def mac" { target *-*-* } 8 }
- { dg-message "previous" "prev def ro" { target *-*-* } 11 }
- { dg-message "previous" "prev def va" { target *-*-* } 14 }
+ { dg-message "-:previous" "prev def mac" { target *-*-* } 6 }
+ { dg-message "-:previous" "prev def mac" { target *-*-* } 7 }
+ { dg-message "-:previous" "prev def mac" { target *-*-* } 8 }
+ { dg-message "-:previous" "prev def ro" { target *-*-* } 11 }
+ { dg-message "-:previous" "prev def va" { target *-*-* } 14 }
*/
diff --git a/gcc/testsuite/gcc.dg/cpp/redef3.c b/gcc/testsuite/gcc.dg/cpp/redef3.c
index 1c541a45bb1..4e4ef128b10 100644
--- a/gcc/testsuite/gcc.dg/cpp/redef3.c
+++ b/gcc/testsuite/gcc.dg/cpp/redef3.c
@@ -11,11 +11,11 @@
#define D 1 2
#define E
-/* { dg-warning "redefined" "redef A" { target *-*-* } 7 }
- { dg-warning "redefined" "redef B" { target *-*-* } 9 }
- { dg-warning "redefined" "redef D" { target *-*-* } 11 }
- { dg-warning "redefined" "redef E" { target *-*-* } 12 }
- { dg-message "previous" "prev def A" { target *-*-* } 6 }
- { dg-message "previous" "prev def B" { target *-*-* } 8 }
- { dg-message "previous" "prev def D/E" { target *-*-* } 0 }
+/* { dg-warning "-:redefined" "redef A" { target *-*-* } 7 }
+ { dg-warning "-:redefined" "redef B" { target *-*-* } 9 }
+ { dg-warning "-:redefined" "redef D" { target *-*-* } 11 }
+ { dg-warning "-:redefined" "redef E" { target *-*-* } 12 }
+ { dg-message "-:previous" "prev def A" { target *-*-* } 6 }
+ { dg-message "-:previous" "prev def B" { target *-*-* } 8 }
+ { dg-message "-:previous" "prev def D/E" { target *-*-* } 0 }
*/
diff --git a/gcc/testsuite/gcc.dg/cpp/redef4.c b/gcc/testsuite/gcc.dg/cpp/redef4.c
index b34635b2e42..aa6729b770b 100644
--- a/gcc/testsuite/gcc.dg/cpp/redef4.c
+++ b/gcc/testsuite/gcc.dg/cpp/redef4.c
@@ -4,41 +4,41 @@
/* { dg-do preprocess } */
/* { dg-options "" } */
-#define str(x) #x /* { dg-message "previous definition" } */
-#define str(x) %: x /* { dg-warning "redefined" } */
+#define str(x) #x /* { dg-message "-:previous definition" } */
+#define str(x) %: x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) #x /* { dg-message "previous definition" } */
-#define str(x) # x /* { dg-warning "redefined" } */
+#define str(x) #x /* { dg-message "-:previous definition" } */
+#define str(x) # x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) #x /* { dg-message "previous definition" } */
-#define str(x) %: x /* { dg-warning "redefined" } */
+#define str(x) #x /* { dg-message "-:previous definition" } */
+#define str(x) %: x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) %:x /* { dg-message "previous definition" } */
-#define str(x) #x /* { dg-warning "redefined" } */
+#define str(x) %:x /* { dg-message "-:previous definition" } */
+#define str(x) #x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) %:x /* { dg-message "previous definition" } */
-#define str(x) %: x /* { dg-warning "redefined" } */
+#define str(x) %:x /* { dg-message "-:previous definition" } */
+#define str(x) %: x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) %:x /* { dg-message "previous definition" } */
-#define str(x) # x /* { dg-warning "redefined" } */
+#define str(x) %:x /* { dg-message "-:previous definition" } */
+#define str(x) # x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) %:x /* { dg-message "previous definition" } */
-#define str(x) %: x /* { dg-warning "redefined" } */
+#define str(x) %:x /* { dg-message "-:previous definition" } */
+#define str(x) %: x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) # x /* { dg-message "previous definition" } */
-#define str(x) #x /* { dg-warning "redefined" } */
+#define str(x) # x /* { dg-message "-:previous definition" } */
+#define str(x) #x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) # x /* { dg-message "previous definition" } */
-#define str(x) %: x /* { dg-warning "redefined" } */
+#define str(x) # x /* { dg-message "-:previous definition" } */
+#define str(x) %: x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) # x /* { dg-message "previous definition" } */
-#define str(x) %: x /* { dg-warning "redefined" } */
+#define str(x) # x /* { dg-message "-:previous definition" } */
+#define str(x) %: x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) %: x /* { dg-message "previous definition" } */
-#define str(x) #x /* { dg-warning "redefined" } */
+#define str(x) %: x /* { dg-message "-:previous definition" } */
+#define str(x) #x /* { dg-warning "-:redefined" } */
#undef str
-#define str(x) %: x /* { dg-message "previous definition" } */
-#define str(x) # x /* { dg-warning "redefined" } */
+#define str(x) %: x /* { dg-message "-:previous definition" } */
+#define str(x) # x /* { dg-warning "-:redefined" } */
#undef str
#define str(x) #x
@@ -54,173 +54,173 @@
#define str(x) %: x
#undef str
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a#x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a#x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%:x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a%:x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a# x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a# x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%: x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a%: x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%: x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a%: x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%: x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a%: x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%: x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a%: x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%: x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a%: x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a%: x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a%: x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a #x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a #x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %:x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a %:x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a # x /* { dg-message "previous definition" } */
-#define astr(x) a %: x /* { dg-warning "redefined" } */
+#define astr(x) a # x /* { dg-message "-:previous definition" } */
+#define astr(x) a %: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a#x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a#x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a# x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a# x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a%: x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a%: x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a #x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a #x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a %:x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a %:x /* { dg-warning "-:redefined" } */
#undef astr
-#define astr(x) a %: x /* { dg-message "previous definition" } */
-#define astr(x) a # x /* { dg-warning "redefined" } */
+#define astr(x) a %: x /* { dg-message "-:previous definition" } */
+#define astr(x) a # x /* { dg-warning "-:redefined" } */
#undef astr
#define astr(x) a#x
@@ -248,173 +248,173 @@
#define astr(x) a %: x
#undef astr
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x##y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x## y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x%:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x%:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ##y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x ##y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%:y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%:y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x ## y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x ## y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x##y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x## y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x## y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x%:%: y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x%:%: y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x ##y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ##y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x %:%:y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x %:%:y /* { dg-warning "-:redefined" } */
#undef cat
-#define cat(x,y) x %:%: y /* { dg-message "previous definition" } */
-#define cat(x,y) x ## y /* { dg-warning "redefined" } */
+#define cat(x,y) x %:%: y /* { dg-message "-:previous definition" } */
+#define cat(x,y) x ## y /* { dg-warning "-:redefined" } */
#undef cat
#define cat(x,y) x##y
@@ -442,28 +442,28 @@
#define cat(x,y) x %:%: y
#undef cat
-#define cat3(x,y,z) x##y##z /* { dg-message "previous definition" } */
-#define cat3(x,y,z) x##y####z /* { dg-warning "redefined" } */
+#define cat3(x,y,z) x##y##z /* { dg-message "-:previous definition" } */
+#define cat3(x,y,z) x##y####z /* { dg-warning "-:redefined" } */
#undef cat3
-#define cat3(x,y,z) x##y####z /* { dg-message "previous definition" } */
-#define cat3(x,y,z) x####y##z /* { dg-warning "redefined" } */
+#define cat3(x,y,z) x##y####z /* { dg-message "-:previous definition" } */
+#define cat3(x,y,z) x####y##z /* { dg-warning "-:redefined" } */
#undef cat3
-#define cat3(x,y,z) x##y####z /* { dg-message "previous definition" } */
-#define cat3(x,y,z) x##y## ##z /* { dg-warning "redefined" } */
+#define cat3(x,y,z) x##y####z /* { dg-message "-:previous definition" } */
+#define cat3(x,y,z) x##y## ##z /* { dg-warning "-:redefined" } */
#undef cat3
-#define cat3(x,y,z) x##y####z /* { dg-message "previous definition" } */
-#define cat3(x,y,z) x##y##%:%:z /* { dg-warning "redefined" } */
+#define cat3(x,y,z) x##y####z /* { dg-message "-:previous definition" } */
+#define cat3(x,y,z) x##y##%:%:z /* { dg-warning "-:redefined" } */
#undef cat3
-#define cat3(x,y,z) x##y######## ####z /* { dg-message "previous definition" } */
-#define cat3(x,y,z) x##y############z /* { dg-warning "redefined" } */
+#define cat3(x,y,z) x##y######## ####z /* { dg-message "-:previous definition" } */
+#define cat3(x,y,z) x##y############z /* { dg-warning "-:redefined" } */
#undef cat3
-#define cat3(x,y,z) x##y############z /* { dg-message "previous definition" } */
-#define cat3(x,y,z) x##y########%:%:##z /* { dg-warning "redefined" } */
+#define cat3(x,y,z) x##y############z /* { dg-message "-:previous definition" } */
+#define cat3(x,y,z) x##y########%:%:##z /* { dg-warning "-:redefined" } */
#undef cat3
#define cat3(x,y,z) x##y##z
diff --git a/gcc/testsuite/gcc.dg/cpp/sysmac1.c b/gcc/testsuite/gcc.dg/cpp/sysmac1.c
index 54f161e020f..55ec200ffff 100644
--- a/gcc/testsuite/gcc.dg/cpp/sysmac1.c
+++ b/gcc/testsuite/gcc.dg/cpp/sysmac1.c
@@ -22,5 +22,5 @@
(str); /* { dg-warning "used with arguments" } */
(sys_str); /* { dg-bogus "used with arguments" } */
-foo (one_arg); /* { dg-warning "requires at least one" } */
+foo (one_arg); /* { dg-warning "requires at least one" "" { xfail *-*-* } } */
sys_foo (one_arg); /* { dg-bogus "requires at least one" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/Wunused.c b/gcc/testsuite/gcc.dg/cpp/trad/Wunused.c
index 403d617f5d0..97465f3967c 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/Wunused.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/Wunused.c
@@ -14,9 +14,9 @@
#define used3 /* { dg-bogus "used" } */
#define used4 something /* { dg-bogus "used" } */
-#define unused5 /* { dg-warning "used" } */
-#define unused6 /* { dg-warning "used" } */
-#define unused7() /* { dg-warning "used" } */
+#define unused5 /* { dg-warning "-:used" } */
+#define unused6 /* { dg-warning "-:used" } */
+#define unused7() /* { dg-warning "-:used" } */
#if defined used1
#endif
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/argcount.c b/gcc/testsuite/gcc.dg/cpp/trad/argcount.c
index 208cd44b3b0..7098caf7d76 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/argcount.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/argcount.c
@@ -7,15 +7,15 @@
#define g(x, y) x y
#define h()
-f(); /* { dg-bogus "requires 1" "no arg is 1 empty arg" } */
-f( ); /* { dg-bogus "macro" "1 arg to 1 param macro" } */
-f(1,); /* { dg-error "passed 2" "2 args to 1 param macro" } */
-f(1,2); /* { dg-error "passed 2" "2 args to 1 param macro" } */
-h(); /* { dg-bogus "macro" "no arg to 1 param macro" } */
-h( ); /* { dg-error "passed 1" "1 arg to 0 param macro" } */
-h(1,2); /* { dg-error "passed 2" "2 args to 0 param macro" } */
-g(); /* { dg-error "requires 2" "0 args to 2 param macro" } */
-g( ); /* { dg-error "requires 2" "1 args to 2 param macro" } */
-g( ,2); /* { dg-bogus "requires 2" "2 args to 2 param macro" } */
-g(,); /* { dg-bogus "requires 2" "2 args to 2 param macro" } */
-g(1,2,3); /* { dg-error "passed 3" "3 args to 2 param macro" } */
+f(); /* { dg-bogus "-:requires 1" "no arg is 1 empty arg" } */
+f( ); /* { dg-bogus "-:macro" "1 arg to 1 param macro" } */
+f(1,); /* { dg-error "-:passed 2" "2 args to 1 param macro" } */
+f(1,2); /* { dg-error "-:passed 2" "2 args to 1 param macro" } */
+h(); /* { dg-bogus "-:macro" "no arg to 1 param macro" } */
+h( ); /* { dg-error "-:passed 1" "1 arg to 0 param macro" } */
+h(1,2); /* { dg-error "-:passed 2" "2 args to 0 param macro" } */
+g(); /* { dg-error "-:requires 2" "0 args to 2 param macro" } */
+g( ); /* { dg-error "-:requires 2" "1 args to 2 param macro" } */
+g( ,2); /* { dg-bogus "-:requires 2" "2 args to 2 param macro" } */
+g(,); /* { dg-bogus "-:requires 2" "2 args to 2 param macro" } */
+g(1,2,3); /* { dg-error "-:passed 3" "3 args to 2 param macro" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/comment-3.c b/gcc/testsuite/gcc.dg/cpp/trad/comment-3.c
index e2710ad5629..7d1d8252f01 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/comment-3.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/comment-3.c
@@ -3,4 +3,4 @@
/* { dg-do preprocess } */
#if 0
-#endif // /* { dg-warning "extra tokens" } */
+#endif // /* { dg-warning "-:extra tokens" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/comment.c b/gcc/testsuite/gcc.dg/cpp/trad/comment.c
index 971a78a6d10..36fa90d3f37 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/comment.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/comment.c
@@ -2,4 +2,4 @@
/* { dg-do preprocess } */
-/* { dg-error "unterminated comment" }
+/* { dg-error "-:unterminated comment" }
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/defined.c b/gcc/testsuite/gcc.dg/cpp/trad/defined.c
index 5fa1d93b8aa..fa4c4119d3d 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/defined.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/defined.c
@@ -16,7 +16,7 @@
#error REGPARMS should be defined
#endif
-#define defined /* { dg-error "defined" } */
+#define defined /* { dg-error "-:defined" } */
/* No diagnostics, though you could argue there should be. */
#if defined defined
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/directive.c b/gcc/testsuite/gcc.dg/cpp/trad/directive.c
index ee7ebcab031..0a42df62639 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/directive.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/directive.c
@@ -12,7 +12,7 @@ HASH
/* Directives with their #s indented are not recognized. */
#if 0 /* { dg-bogus "unterminated" } */
-#wrong /* { dg-error "invalid" } */
+#wrong /* { dg-error "-:invalid" } */
#define foo 2
#define bar + 3
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/funlike-3.c b/gcc/testsuite/gcc.dg/cpp/trad/funlike-3.c
index 5300afba708..2baa99a31c3 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/funlike-3.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/funlike-3.c
@@ -5,8 +5,8 @@
#define f(x) x
-#if 2 f(/* { dg-error "unterminated" "unterminated macro in directive" } */
+#if 2 f(/* { dg-error "-:unterminated" "unterminated macro in directive" } */
)
#endif
-f( /* { dg-error "unterminated" "unterminated macro" } */
+f( /* { dg-error "-:unterminated" "unterminated macro" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/funlike.c b/gcc/testsuite/gcc.dg/cpp/trad/funlike.c
index db550d53c76..1af6f9fcf97 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/funlike.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/funlike.c
@@ -21,5 +21,5 @@
# error /* { dg-bogus "error" "empty macro" } */
#endif
-#if f paren 6) /* { dg-error "missing binary" "macro-expanded parenthesis" } */
+#if f paren 6) /* { dg-error "-:missing binary" "macro-expanded parenthesis" } */
#endif
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/literals-2.c b/gcc/testsuite/gcc.dg/cpp/trad/literals-2.c
index faa7bd87674..a6f9637c640 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/literals-2.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/literals-2.c
@@ -2,7 +2,7 @@
recognized. */
/* { dg-do preprocess } */
-/* { dg-warning "missing terminating" "bad charconst" { target *-*-* } .+2 } */
-/* { dg-error "not valid" "bad charconst" { target *-*-* } .+1 } */
+/* { dg-warning "-:missing terminating" "bad charconst" { target *-*-* } .+2 } */
+/* { dg-error "-:not valid" "bad charconst" { target *-*-* } .+1 } */
#if 'x
#endif
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/macro.c b/gcc/testsuite/gcc.dg/cpp/trad/macro.c
index 164b4ecfee6..6f8aa9ac239 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/macro.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/macro.c
@@ -4,7 +4,7 @@
/* { dg-do preprocess } */
#define f(x)
-#define g(x, y...) /* { dg-error "macro parameter list" } */
+#define g(x, y...) /* { dg-error "-:macro parameter list" } */
#if 0
#define f(a,b) /* { dg-bogus "passed 2 arguments" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/pr65238-4.c b/gcc/testsuite/gcc.dg/cpp/trad/pr65238-4.c
index cf2f449133c..3cee7b9888b 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/pr65238-4.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/pr65238-4.c
@@ -11,9 +11,9 @@
#if __has_attribute(__has_attribute(unused))
#endif
-/* { dg-error "unterminated argument list invoking macro .__has_attribute." "" {target "*-*-*"} 5 } */
-/* { dg-error "#if with no expression" "" {target "*-*-*"} 5 } */
-/* { dg-error "unterminated argument list invoking macro .__has_attribute." "" {target "*-*-*"} 7 } */
-/* { dg-error "macro .__has_attribute. passed 2 arguments, but takes just 1" "" {target "*-*-*"} 9 } */
-/* { dg-error "missing ... in expression" "" {target "*-*-*"} 9 } */
-/* { dg-error "macro .__has_attribute. requires an identifier" "" {target "*-*-*"} 11 } */
+/* { dg-error "-:unterminated argument list invoking macro .__has_attribute." "" {target "*-*-*"} 5 } */
+/* { dg-error "-:#if with no expression" "" {target "*-*-*"} 5 } */
+/* { dg-error "-:unterminated argument list invoking macro .__has_attribute." "" {target "*-*-*"} 7 } */
+/* { dg-error "-:macro .__has_attribute. passed 2 arguments, but takes just 1" "" {target "*-*-*"} 9 } */
+/* { dg-error "-:missing ... in expression" "" {target "*-*-*"} 9 } */
+/* { dg-error "-:macro .__has_attribute. requires an identifier" "" {target "*-*-*"} 11 } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/recurse-1.c b/gcc/testsuite/gcc.dg/cpp/trad/recurse-1.c
index b5fd7af7f5f..31c020bad87 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/recurse-1.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/recurse-1.c
@@ -4,7 +4,7 @@
/* { dg-do preprocess } */
#define foo foo
-foo /* { dg-error "detected recursion" } */
+foo /* { dg-error "-:detected recursion" } */
#define bar a bar b
-bar /* { dg-error "detected recursion" } */
+bar /* { dg-error "-:detected recursion" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/recurse-2.c b/gcc/testsuite/gcc.dg/cpp/trad/recurse-2.c
index 5c6550fae2a..3e5929d5a04 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/recurse-2.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/recurse-2.c
@@ -4,13 +4,13 @@
/* { dg-do preprocess } */
#define foo() foo()
-foo(); /* { dg-error "detected recursion" } */
+foo(); /* { dg-error "-:detected recursion" } */
#define bar() bar baz() bar
bar(); /* { dg-bogus "detected recursion" } */
#define baz() foo()
-baz(); /* { dg-error "detected recursion" } */
+baz(); /* { dg-error "-:detected recursion" } */
#define a(x) x(a)
-a(a); /* { dg-error "detected recursion" } */
+a(a); /* { dg-error "-:detected recursion" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/trad/redef2.c b/gcc/testsuite/gcc.dg/cpp/trad/redef2.c
index 5fcd5eb32e8..ad10b86fd3d 100644
--- a/gcc/testsuite/gcc.dg/cpp/trad/redef2.c
+++ b/gcc/testsuite/gcc.dg/cpp/trad/redef2.c
@@ -2,31 +2,31 @@
/* { dg-do preprocess } */
-#define foo bar /* { dg-message "previous def" "foo prev def" } */
-#define foo barr /* { dg-warning "redefined" "foo redefined" } */
+#define foo bar /* { dg-message "-:previous def" "foo prev def" } */
+#define foo barr /* { dg-warning "-:redefined" "foo redefined" } */
#undef foo
-#define foo bar /* { dg-message "previous def" "foo prev def 2" } */
-#define foo() bar /* { dg-warning "redefined" "foo redefined 2" } */
+#define foo bar /* { dg-message "-:previous def" "foo prev def 2" } */
+#define foo() bar /* { dg-warning "-:redefined" "foo redefined 2" } */
#undef foo
-#define foo() bar /* { dg-message "previous def" "foo prev def" } */
-#define foo() barr /* { dg-warning "redefined" "foo redefined" } */
+#define foo() bar /* { dg-message "-:previous def" "foo prev def" } */
+#define foo() barr /* { dg-warning "-:redefined" "foo redefined" } */
-#define quux(thud) a thud b /* { dg-message "previous def" "quux prev def" } */
-#define quux(thu) a thud b /* { dg-warning "redefined" "quux redefined" } */
+#define quux(thud) a thud b /* { dg-message "-:previous def" "quux prev def" } */
+#define quux(thu) a thud b /* { dg-warning "-:redefined" "quux redefined" } */
-#define bar(x, y) x+y /* { dg-message "previous def" "bar prev def" } */
-#define bar(x, y) x+x /* { dg-warning "redefined" "bar redefined" } */
+#define bar(x, y) x+y /* { dg-message "-:previous def" "bar prev def" } */
+#define bar(x, y) x+x /* { dg-warning "-:redefined" "bar redefined" } */
-#define bat(x, y) x+y /* { dg-message "previous def" "bat prev def" } */
-#define bat(x, y) x+ y /* { dg-warning "redefined" "bat redefined" } */
+#define bat(x, y) x+y /* { dg-message "-:previous def" "bat prev def" } */
+#define bat(x, y) x+ y /* { dg-warning "-:redefined" "bat redefined" } */
-#define baz(x, y) x+y /* { dg-message "previous def" "baz prev def" } */
-#define baz(x, y) x +y /* { dg-warning "redefined" "baz redefined" } */
+#define baz(x, y) x+y /* { dg-message "-:previous def" "baz prev def" } */
+#define baz(x, y) x +y /* { dg-warning "-:redefined" "baz redefined" } */
-#define f(x, y) "x y" /* { dg-message "previous def" "f prev def" } */
-#define f(x, y) "x y" /* { dg-warning "redefined" "f redefined" } */
+#define f(x, y) "x y" /* { dg-message "-:previous def" "f prev def" } */
+#define f(x, y) "x y" /* { dg-warning "-:redefined" "f redefined" } */
-#define g(x, y) 'x' /* { dg-message "previous def" "g prev def" } */
-#define g(x, y) ' x' /* { dg-warning "redefined" "g redefined" } */
+#define g(x, y) 'x' /* { dg-message "-:previous def" "g prev def" } */
+#define g(x, y) ' x' /* { dg-warning "-:redefined" "g redefined" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/ucnid-11.c b/gcc/testsuite/gcc.dg/cpp/ucnid-11.c
index a44a3eaf421..b6956f54454 100644
--- a/gcc/testsuite/gcc.dg/cpp/ucnid-11.c
+++ b/gcc/testsuite/gcc.dg/cpp/ucnid-11.c
@@ -4,23 +4,23 @@
/* { dg-options "-std=c99 -pedantic-errors" } */
/* Different spelling of UCN in expansion. */
-#define m1 \u00c1 /* { dg-message "previous definition" } */
-#define m1 \u00C1 /* { dg-error "redefined" } */
+#define m1 \u00c1 /* { dg-message "-:previous definition" } */
+#define m1 \u00C1 /* { dg-error "-:redefined" } */
#define m1ok \u00c1
#define m1ok \u00c1
/* Different spelling of UCN in argument name. */
-#define m2(\u00c1) /* { dg-message "previous definition" } */
-#define m2(\u00C1) /* { dg-error "redefined" } */
+#define m2(\u00c1) /* { dg-message "-:previous definition" } */
+#define m2(\u00C1) /* { dg-error "-:redefined" } */
#define m2ok(\u00c1)
#define m2ok(\u00c1)
/* Same spelling in argument name but different spelling when used in
expansion. */
-#define m3(\u00c1) \u00c1 /* { dg-message "previous definition" } */
-#define m3(\u00c1) \u00C1 /* { dg-error "redefined" } */
+#define m3(\u00c1) \u00c1 /* { dg-message "-:previous definition" } */
+#define m3(\u00c1) \u00C1 /* { dg-error "-:redefined" } */
#define m3ok(\u00c1) \u00C1
#define m3ok(\u00c1) \u00C1
diff --git a/gcc/testsuite/gcc.dg/cpp/unc1.c b/gcc/testsuite/gcc.dg/cpp/unc1.c
index 18c306fc89e..d8eb1923103 100644
--- a/gcc/testsuite/gcc.dg/cpp/unc1.c
+++ b/gcc/testsuite/gcc.dg/cpp/unc1.c
@@ -1,7 +1,7 @@
/* Tests for un-terminated conditionals: 1. */
/* { dg-do preprocess } */
-#if 1 /* { dg-error "unterminated" "unterminated #if" } */
+#if 1 /* { dg-error "-:unterminated" "unterminated #if" } */
#ifdef notdef /* { dg-bogus "unterminated" "nested terminated #ifdef" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/unc2.c b/gcc/testsuite/gcc.dg/cpp/unc2.c
index 976d2b181ab..faa4b2905b4 100644
--- a/gcc/testsuite/gcc.dg/cpp/unc2.c
+++ b/gcc/testsuite/gcc.dg/cpp/unc2.c
@@ -1,7 +1,7 @@
/* Tests for unterminated conditionals: 2. */
/* { dg-do preprocess } */
-#ifdef __sparc__ /* { dg-error "unterminated" "unterminated if-elif-elif..." } */
+#ifdef __sparc__ /* { dg-error "-:unterminated" "unterminated if-elif-elif..." } */
sparc
#elif defined __powerpc__
ppc
diff --git a/gcc/testsuite/gcc.dg/cpp/unc3.c b/gcc/testsuite/gcc.dg/cpp/unc3.c
index d5f16f738ac..e20339cf809 100644
--- a/gcc/testsuite/gcc.dg/cpp/unc3.c
+++ b/gcc/testsuite/gcc.dg/cpp/unc3.c
@@ -1,5 +1,5 @@
/* Tests for unterminated conditionals: 3. */
/* { dg-do preprocess } */
-#if 1 /* { dg-error "#else" "unterminated #else" } */
+#if 1 /* { dg-error "-:#else" "unterminated #else" } */
#else
diff --git a/gcc/testsuite/gcc.dg/cpp/unc4.c b/gcc/testsuite/gcc.dg/cpp/unc4.c
index 10c49e9d4db..410e2fcda0c 100644
--- a/gcc/testsuite/gcc.dg/cpp/unc4.c
+++ b/gcc/testsuite/gcc.dg/cpp/unc4.c
@@ -36,4 +36,4 @@ ignored
/* dg.exp doesn't read the included files for tags, so we have to
do them explicitly here. */
-/* { dg-error "#if" "unc1.c: unterminated #if" { target *-*-* } 4 } */
+/* { dg-error "-:#if" "unc1.c: unterminated #if" { target *-*-* } 4 } */
diff --git a/gcc/testsuite/gcc.dg/cpp/undef2.c b/gcc/testsuite/gcc.dg/cpp/undef2.c
index 5614e039b22..189a72dd9c2 100644
--- a/gcc/testsuite/gcc.dg/cpp/undef2.c
+++ b/gcc/testsuite/gcc.dg/cpp/undef2.c
@@ -3,11 +3,11 @@
/* { dg-do preprocess } */
-#undef __DATE__ /* { dg-warning "undefining" "__DATE__" } */
-#undef __TIME__ /* { dg-warning "undefining" "__TIME__" } */
-#undef __FILE__ /* { dg-warning "undefining" "__FILE__" } */
-#undef __LINE__ /* { dg-warning "undefining" "__LINE__" } */
-#undef __STDC__ /* { dg-warning "undefining" "__STDC__" } */
+#undef __DATE__ /* { dg-warning "-:undefining \"__DATE__\"" } */
+#undef __TIME__ /* { dg-warning "-:undefining \"__TIME__\"" } */
+#undef __FILE__ /* { dg-warning "-:undefining \"__FILE__\"" } */
+#undef __LINE__ /* { dg-warning "undefining \"__LINE__\"" } */
+#undef __STDC__ /* { dg-warning "undefining \"__STDC__\"" } */
/* These should be protected from #undef, but aren't, because they
are set with normal #define commands - and on top of that, some
diff --git a/gcc/testsuite/gcc.dg/cpp/warn-redefined-2.c b/gcc/testsuite/gcc.dg/cpp/warn-redefined-2.c
index 3e2e57a79bb..ea5ad8170cd 100644
--- a/gcc/testsuite/gcc.dg/cpp/warn-redefined-2.c
+++ b/gcc/testsuite/gcc.dg/cpp/warn-redefined-2.c
@@ -6,13 +6,13 @@
// { dg-bogus "__TIME__ builtin is not defined" "no-time" { target *-*-* } .-1 }
#endif
-#define __TIME__ "X" // { dg-error "\"__TIME__\" redefined .-Werror=builtin-macro-redefined." }
+#define __TIME__ "X" // { dg-error "-:\"__TIME__\" redefined .-Werror=builtin-macro-redefined." }
#define __TIME__ "Y" // { dg-bogus "-Wbuiltin-macro-redefined" }
- // { dg-warning "\"__TIME__\" redefined" "not-builtin-1" { target *-*-* } .-1 }
- // { dg-message "previous definition" "previous-1" { target *-*-* } 9 }
+ // { dg-warning "-:\"__TIME__\" redefined" "not-builtin-1" { target *-*-* } .-1 }
+ // { dg-message "-:previous definition" "previous-1" { target *-*-* } 9 }
#define X "X"
#define X "Y" // { dg-bogus "-Wbuiltin-macro-redefined" }
- // { dg-warning "\"X\" redefined" "not-builtin-2" { target *-*-* } .-1 }
- // { dg-message "previous definition" "previous-2" { target *-*-* } 15 }
+ // { dg-warning "-:\"X\" redefined" "not-builtin-2" { target *-*-* } .-1 }
+ // { dg-message "-:previous definition" "previous-2" { target *-*-* } 15 }
diff --git a/gcc/testsuite/gcc.dg/cpp/warn-redefined.c b/gcc/testsuite/gcc.dg/cpp/warn-redefined.c
index c562d072e40..e0446689706 100644
--- a/gcc/testsuite/gcc.dg/cpp/warn-redefined.c
+++ b/gcc/testsuite/gcc.dg/cpp/warn-redefined.c
@@ -6,13 +6,13 @@
// { dg-bogus "__TIME__ builtin is not defined" "no-time" { target *-*-* } .-1 }
#endif
-#define __TIME__ "X" // { dg-warning "\"__TIME__\" redefined .-Wbuiltin-macro-redefined." }
+#define __TIME__ "X" // { dg-warning "-:\"__TIME__\" redefined .-Wbuiltin-macro-redefined." }
#define __TIME__ "Y" // { dg-bogus "-Wbuiltin-macro-redefined" }
- // { dg-warning "\"__TIME__\" redefined" "not-builtin-1" { target *-*-* } .-1 }
- // { dg-message "previous definition" "previous-1" { target *-*-* } 9 }
+ // { dg-warning "-:\"__TIME__\" redefined" "not-builtin-1" { target *-*-* } .-1 }
+ // { dg-message "-:previous definition" "previous-1" { target *-*-* } 9 }
#define X "X"
#define X "Y" // { dg-bogus "-Wbuiltin-macro-redefined" }
- // { dg-warning "\"X\" redefined" "not-builtin-2" { target *-*-* } .-1 }
- // { dg-message "previous definition" "previous-2" { target *-*-* } 15 }
+ // { dg-warning "-:\"X\" redefined" "not-builtin-2" { target *-*-* } .-1 }
+ // { dg-message "-:previous definition" "previous-2" { target *-*-* } 15 }
diff --git a/gcc/testsuite/gcc.dg/cpp/warn-unused-macros-2.c b/gcc/testsuite/gcc.dg/cpp/warn-unused-macros-2.c
index d7fe145c8cc..f82d67fe9c2 100644
--- a/gcc/testsuite/gcc.dg/cpp/warn-unused-macros-2.c
+++ b/gcc/testsuite/gcc.dg/cpp/warn-unused-macros-2.c
@@ -1,4 +1,4 @@
// { dg-do preprocess }
// { dg-options "-std=gnu99 -fdiagnostics-show-option -Werror=unused-macros" }
/* { dg-message "some warnings being treated as errors" "" {target "*-*-*"} 0 } */
-#define X X // { dg-error "macro \"X\" is not used .-Werror=unused-macros." }
+#define X X // { dg-error "-:macro \"X\" is not used .-Werror=unused-macros." }
diff --git a/gcc/testsuite/gcc.dg/cpp/warn-unused-macros.c b/gcc/testsuite/gcc.dg/cpp/warn-unused-macros.c
index e1ce94eeee6..b18f506b13b 100644
--- a/gcc/testsuite/gcc.dg/cpp/warn-unused-macros.c
+++ b/gcc/testsuite/gcc.dg/cpp/warn-unused-macros.c
@@ -1,4 +1,4 @@
// { dg-do preprocess }
// { dg-options "-std=gnu99 -fdiagnostics-show-option -Wunused-macros" }
-#define X X // { dg-warning "macro \"X\" is not used .-Wunused-macros." }
+#define X X // { dg-warning "-:macro \"X\" is not used .-Wunused-macros." }
diff --git a/gcc/testsuite/gcc.dg/debug/dwarf2/pr82837.c b/gcc/testsuite/gcc.dg/debug/dwarf2/pr82837.c
new file mode 100644
index 00000000000..743fb28ca41
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/debug/dwarf2/pr82837.c
@@ -0,0 +1,29 @@
+/* PR debug/82837 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -g" } */
+/* { dg-additional-options "-march=athlon" { target ia32 } } */
+/* { dg-additional-options "-fPIE" { target pie } } */
+
+static char b[100];
+static int *c;
+char *e;
+void a(char *f, char *i) {
+ int d = __builtin_object_size(f, 1);
+ __builtin___strcpy_chk(f, i, d);
+}
+void g(void) {
+ int h;
+ switch (*c) {
+ case 8:
+ e = "swapgs";
+ break;
+ case 9:
+ e = "rdtscp";
+ break;
+ default:
+ return;
+ }
+ h = __builtin_strlen(b);
+ a(b + h - 6, e);
+ c++;
+}
diff --git a/gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp-err.c b/gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp-err.c
new file mode 100644
index 00000000000..b94c760c3ec
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp-err.c
@@ -0,0 +1,33 @@
+/* Test __builtin_tgmath: errors that indicate bad arguments in a call
+ to a type-generic macro, DFP involved. */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+float f_f (float);
+double f_d (double);
+long double f_ld (long double);
+_Complex float f_cf (_Complex float);
+_Complex double f_cd (_Complex double);
+_Complex long double f_cld (_Complex long double);
+_Decimal32 f_d32 (_Decimal32);
+_Decimal64 f_d64 (_Decimal64);
+_Decimal128 f_d128 (_Decimal128);
+float f_ff (float, float);
+_Complex float f_cfcf (_Complex float, _Complex float);
+_Decimal32 f_d32d32 (_Decimal32, _Decimal32);
+_Complex float cf;
+float f;
+_Decimal32 d32;
+
+void
+test (void)
+{
+ __builtin_tgmath (f_cf, f_cd, f_cld, d32); /* { dg-error "decimal floating-point argument 1 to complex-only type-generic function" } */
+ __builtin_tgmath (f_f, f_d, f_ld, d32); /* { dg-error "decimal floating-point argument 1 to binary-only type-generic function" } */
+ __builtin_tgmath (f_cfcf, f_d32d32, cf, d32); /* { dg-error "both complex and decimal floating-point arguments to type-generic function" } */
+ __builtin_tgmath (f_ff, f_d32d32, f, d32); /* { dg-error "both binary and decimal floating-point arguments to type-generic function" } */
+ __builtin_tgmath (f_d32, f_d64, f_d128, cf); /* { dg-error "complex argument 1 to decimal-only type-generic function" } */
+ __builtin_tgmath (f_d32, f_d64, f_d128, f); /* { dg-error "binary argument 1 to decimal-only type-generic function" } */
+ __builtin_tgmath (f_cfcf, f_d32d32, d32, cf); /* { dg-error "both complex and decimal floating-point arguments to type-generic function" } */
+ __builtin_tgmath (f_ff, f_d32d32, d32, f); /* { dg-error "both binary and decimal floating-point arguments to type-generic function" } */
+}
diff --git a/gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp.c b/gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp.c
new file mode 100644
index 00000000000..256a71e68be
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/dfp/builtin-tgmath-dfp.c
@@ -0,0 +1,263 @@
+/* Test __builtin_tgmath: valid uses, decimal floating-point types. */
+/* { dg-do run } */
+/* { dg-options "" } */
+
+extern void abort (void);
+extern void exit (int);
+
+#define CHECK_CALL(C, E, V) \
+ do \
+ { \
+ if ((C) != (E)) \
+ abort (); \
+ extern __typeof (C) V; \
+ } \
+ while (0)
+
+extern float var_f;
+extern double var_d;
+extern long double var_ld;
+extern _Complex float var_cf;
+extern _Complex double var_cd;
+extern _Complex long double var_cld;
+extern _Decimal32 var_d32;
+extern _Decimal64 var_d64;
+extern _Decimal128 var_d128;
+extern int var_i;
+
+/* Test decimal-only function, single argument. */
+
+_Decimal32 t1d32 (_Decimal32 x) { return x + 1; }
+_Decimal64 t1d64 (_Decimal64 x) { return x + 2; }
+_Decimal128 t1d128 (_Decimal128 x) { return x + 3; }
+
+#define t1v(x) __builtin_tgmath (t1d32, t1d64, t1d128, x)
+
+static void
+test_1 (void)
+{
+ _Decimal32 d32 = 32;
+ _Decimal64 d64 = 64;
+ _Decimal128 d128 = 128;
+ int i = 256;
+ CHECK_CALL (t1v (d32), 33, var_d32);
+ CHECK_CALL (t1v (d64), 66, var_d64);
+ CHECK_CALL (t1v (d128), 131, var_d128);
+ CHECK_CALL (t1v (i), 258, var_d64);
+}
+
+/* Test decimal-only function, two arguments. */
+
+_Decimal32 t2d32 (_Decimal32 x, _Decimal32 y) { return 10 * x + y; }
+_Decimal64 t2d64 (_Decimal64 x, _Decimal64 y) { return 100 * x + y;; }
+_Decimal128 t2d128 (_Decimal128 x, _Decimal128 y) { return 1000 * x + y; }
+
+#define t2v(x, y) __builtin_tgmath (t2d32, t2d64, t2d128, x, y)
+
+static void
+test_2 (void)
+{
+ _Decimal32 d32 = 1;
+ _Decimal64 d64 = 2;
+ _Decimal128 d128 = 3;
+ int i = 4;
+ CHECK_CALL (t2v (d32, d32), 11, var_d32);
+ CHECK_CALL (t2v (d64, d64), 202, var_d64);
+ CHECK_CALL (t2v (d32, d64), 102, var_d64);
+ CHECK_CALL (t2v (d128, d64), 3002, var_d128);
+ CHECK_CALL (t2v (d128, i), 3004, var_d128);
+ CHECK_CALL (t2v (i, i), 404, var_d64);
+ CHECK_CALL (t2v (i, d32), 401, var_d64);
+}
+
+/* Test real-only function, single argument. */
+
+float t3f (float x) { return x + 1; }
+double t3d (double x) { return x + 2; }
+long double t3l (long double x) { return x + 3; }
+_Decimal32 t3d32 (_Decimal32 x) { return x + 4; }
+_Decimal64 t3d64 (_Decimal64 x) { return x + 5; }
+_Decimal128 t3d128 (_Decimal128 x) { return x + 6; }
+
+#define t3v(x) __builtin_tgmath (t3f, t3d, t3l, t3d32, t3d64, t3d128, x)
+
+static void
+test_3 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ int i = 4;
+ _Decimal32 d32 = 5;
+ _Decimal64 d64 = 6;
+ _Decimal128 d128 = 7;
+ CHECK_CALL (t3v (f), 2, var_f);
+ CHECK_CALL (t3v (d), 4, var_d);
+ CHECK_CALL (t3v (ld), 6, var_ld);
+ CHECK_CALL (t3v (i), 6, var_d);
+ CHECK_CALL (t3v (d32), 9, var_d32);
+ CHECK_CALL (t3v (d64), 11, var_d64);
+ CHECK_CALL (t3v (d128), 13, var_d128);
+}
+
+/* Test real-and-complex function, single argument. */
+
+float t4f (float x) { return x + 1; }
+double t4d (double x) { return x + 2; }
+long double t4l (long double x) { return x + 3; }
+_Complex float t4cf (_Complex float x) { return x + 4; }
+_Complex double t4cd (_Complex double x) { return x + 5; }
+_Complex long double t4cl (_Complex long double x) { return x + 6; }
+_Decimal32 t4d32 (_Decimal32 x) { return x + 7; }
+_Decimal64 t4d64 (_Decimal64 x) { return x + 8; }
+_Decimal128 t4d128 (_Decimal128 x) { return x + 9; }
+
+#define t4v(x) __builtin_tgmath (t4f, t4d, t4l, t4cf, t4cd, t4cl, t4d32, t4d64, t4d128, x)
+
+static void
+test_4 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ int i = 4;
+ _Complex float cf = 5;
+ _Complex double cd = 6;
+ _Complex long double cld = 7;
+ _Complex int ci = 8;
+ _Decimal32 d32 = 9;
+ _Decimal64 d64 = 10;
+ _Decimal128 d128 = 11;
+ CHECK_CALL (t4v (f), 2, var_f);
+ CHECK_CALL (t4v (d), 4, var_d);
+ CHECK_CALL (t4v (ld), 6, var_ld);
+ CHECK_CALL (t4v (i), 6, var_d);
+ CHECK_CALL (t4v (cf), 9, var_cf);
+ CHECK_CALL (t4v (cd), 11, var_cd);
+ CHECK_CALL (t4v (cld), 13, var_cld);
+ CHECK_CALL (t4v (ci), 13, var_cd);
+ CHECK_CALL (t4v (d32), 16, var_d32);
+ CHECK_CALL (t4v (d64), 18, var_d64);
+ CHECK_CALL (t4v (d128), 20, var_d128);
+}
+
+/* Test real-and-complex function, real return type, single argument. */
+
+float t5f (float x) { return x + 1; }
+double t5d (double x) { return x + 2; }
+long double t5l (long double x) { return x + 3; }
+float t5cf (_Complex float x) { return __real__ x + 4; }
+double t5cd (_Complex double x) { return __real__ x + 5; }
+long double t5cl (_Complex long double x) { return __real__ x + 6; }
+_Decimal32 t5d32 (_Decimal32 x) { return x + 7; }
+_Decimal64 t5d64 (_Decimal64 x) { return x + 8; }
+_Decimal128 t5d128 (_Decimal128 x) { return x + 9; }
+
+#define t5v(x) __builtin_tgmath (t5f, t5d, t5l, t5cf, t5cd, t5cl, t5d32, t5d64, t5d128, x)
+
+static void
+test_5 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ int i = 4;
+ _Complex float cf = 5;
+ _Complex double cd = 6;
+ _Complex long double cld = 7;
+ _Complex int ci = 8;
+ _Decimal32 d32 = 9;
+ _Decimal64 d64 = 10;
+ _Decimal128 d128 = 11;
+ CHECK_CALL (t5v (f), 2, var_f);
+ CHECK_CALL (t5v (d), 4, var_d);
+ CHECK_CALL (t5v (ld), 6, var_ld);
+ CHECK_CALL (t5v (i), 6, var_d);
+ CHECK_CALL (t5v (cf), 9, var_f);
+ CHECK_CALL (t5v (cd), 11, var_d);
+ CHECK_CALL (t5v (cld), 13, var_ld);
+ CHECK_CALL (t5v (ci), 13, var_d);
+ CHECK_CALL (t5v (d32), 16, var_d32);
+ CHECK_CALL (t5v (d64), 18, var_d64);
+ CHECK_CALL (t5v (d128), 20, var_d128);
+}
+
+/* Test real-and-complex function, two arguments. */
+
+float t6f (float x, float y) { return x * 10 + y; }
+double t6d (double x, double y) { return x * 100 + y; }
+long double t6l (long double x, long double y) { return x * 1000 + y; }
+_Complex float t6cf (_Complex float x, _Complex float y) { return x * 10000 + y; }
+_Complex double t6cd (_Complex double x, _Complex double y) { return x * 100000 + y; }
+_Complex long double t6cl (_Complex long double x, _Complex long double y) { return x * 1000000 + y; }
+_Decimal32 t6d32 (_Decimal32 x, _Decimal32 y) { return x * 50 + y; }
+_Decimal64 t6d64 (_Decimal64 x, _Decimal64 y) { return x * 500 + y; }
+_Decimal128 t6d128 (_Decimal128 x, _Decimal128 y) { return x * 5000 + y; }
+
+#define t6v(x, y) __builtin_tgmath (t6f, t6d, t6l, t6cf, t6cd, t6cl, t6d32, t6d64, t6d128, x, y)
+
+static void
+test_6 (void)
+{
+ float f = 1;
+ double d = 2;
+ long double ld = 3;
+ int i = 4;
+ _Complex float cf = 5;
+ _Complex double cd = 6;
+ _Complex long double cld = 7;
+ _Complex int ci = 8;
+ _Decimal32 d32 = 9;
+ _Decimal64 d64 = 10;
+ _Decimal128 d128 = 11;
+ CHECK_CALL (t6v (f, f), 11, var_f);
+ CHECK_CALL (t6v (d, f), 201, var_d);
+ CHECK_CALL (t6v (f, d), 102, var_d);
+ CHECK_CALL (t6v (f, i), 104, var_d);
+ CHECK_CALL (t6v (ld, f), 3001, var_ld);
+ CHECK_CALL (t6v (i, ld), 4003, var_ld);
+ CHECK_CALL (t6v (i, i), 404, var_d);
+ CHECK_CALL (t6v (cf, f), 50001, var_cf);
+ CHECK_CALL (t6v (cf, cf), 50005, var_cf);
+ CHECK_CALL (t6v (cd, cf), 600005, var_cd);
+ CHECK_CALL (t6v (d, cld), 2000007, var_cld);
+ CHECK_CALL (t6v (ci, ci), 800008, var_cd);
+ CHECK_CALL (t6v (ci, f), 800001, var_cd);
+ CHECK_CALL (t6v (d32, d32), 459, var_d32);
+ CHECK_CALL (t6v (d64, i), 5004, var_d64);
+ CHECK_CALL (t6v (i, d32), 2009, var_d64);
+ CHECK_CALL (t6v (d128, d32), 55009, var_d128);
+}
+
+/* Test decimal-only function rounding result to narrower type. */
+
+_Decimal32 t7d64 (_Decimal64 x) { return 1 + x; }
+_Decimal32 t7d128 (_Decimal128 x) { return 2 + x; }
+
+#define t7v(x) __builtin_tgmath (t7d64, t7d128, x)
+
+static void
+test_7 (void)
+{
+ _Decimal32 d32 = 1;
+ _Decimal64 d64 = 2;
+ _Decimal128 d128 = 3;
+ short s = 4;
+ CHECK_CALL (t7v (d32), 2, var_d32);
+ CHECK_CALL (t7v (d64), 3, var_d32);
+ CHECK_CALL (t7v (d128), 5, var_d32);
+ CHECK_CALL (t7v (s), 5, var_d32);
+}
+
+int
+main (void)
+{
+ test_1 ();
+ test_2 ();
+ test_3 ();
+ test_4 ();
+ test_5 ();
+ test_6 ();
+ test_7 ();
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/div_neg.c b/gcc/testsuite/gcc.dg/div_neg.c
new file mode 100644
index 00000000000..da499cda2fb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/div_neg.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+float
+div_neg (float x, float y)
+{
+ return (-x / y) * (x / -y);
+}
+
+/* { dg-final { scan-tree-dump-times " / " 1 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/empty-source-2.c b/gcc/testsuite/gcc.dg/empty-source-2.c
index ae36159b5af..e0180ec9475 100644
--- a/gcc/testsuite/gcc.dg/empty-source-2.c
+++ b/gcc/testsuite/gcc.dg/empty-source-2.c
@@ -3,4 +3,4 @@
/* { dg-do compile } */
/* { dg-options "-pedantic" } */
-/* { dg-warning "ISO C forbids an empty translation unit" "empty" } */
+/* { dg-warning "-:ISO C forbids an empty translation unit" "empty" } */
diff --git a/gcc/testsuite/gcc.dg/empty-source-3.c b/gcc/testsuite/gcc.dg/empty-source-3.c
index bcd76ac2905..f8c58b3ef3f 100644
--- a/gcc/testsuite/gcc.dg/empty-source-3.c
+++ b/gcc/testsuite/gcc.dg/empty-source-3.c
@@ -4,4 +4,4 @@
/* { dg-do compile } */
/* { dg-options "-pedantic-errors" } */
-/* { dg-error "ISO C forbids an empty translation unit" "empty" } */
+/* { dg-error "-:ISO C forbids an empty translation unit" "empty" } */
diff --git a/gcc/testsuite/gcc.dg/gomp/macro-4.c b/gcc/testsuite/gcc.dg/gomp/macro-4.c
index 7d20f4cd68d..28d198b1ce3 100644
--- a/gcc/testsuite/gcc.dg/gomp/macro-4.c
+++ b/gcc/testsuite/gcc.dg/gomp/macro-4.c
@@ -10,9 +10,9 @@ void bar (void);
void
foo (void)
{
-#pragma omp p /* { dg-warning "ignoring #pragma omp _Pragma" } */
+#pragma omp p /* { dg-warning "-:ignoring #pragma omp _Pragma" } */
bar ();
- omp_p /* { dg-warning "ignoring #pragma omp _Pragma" } */
+ omp_p /* { dg-warning "-:ignoring #pragma omp _Pragma" } */
bar ();
}
@@ -22,8 +22,8 @@ foo (void)
void
baz (void)
{
-#pragma omp parallel /* { dg-warning "ignoring #pragma omp serial" } */
+#pragma omp parallel /* { dg-warning "-:ignoring #pragma omp serial" } */
bar ();
- omp_parallel /* { dg-warning "ignoring #pragma omp serial" } */
+ omp_parallel /* { dg-warning "-:ignoring #pragma omp serial" } */
bar ();
}
diff --git a/gcc/testsuite/gcc.dg/noncompile/pr35447-1.c b/gcc/testsuite/gcc.dg/noncompile/pr35447-1.c
index 9c31e9bb617..b9bbb733fae 100644
--- a/gcc/testsuite/gcc.dg/noncompile/pr35447-1.c
+++ b/gcc/testsuite/gcc.dg/noncompile/pr35447-1.c
@@ -4,4 +4,4 @@
void foo()
{
({ int i().; }); /* { dg-error "expected" } */
-} /* { dg-error "expected" } */
+} /* { dg-error "-:expected" } */
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-1.c b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-1.c
index 7983c035862..1a80a668a0f 100644
--- a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-1.c
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-1.c
@@ -5,7 +5,7 @@
numbers are available. */
/* Verify that we're in column-less mode. */
-extern unknown_type test; /* { dg-error "0: unknown type name" } */
+extern unknown_type test; /* { dg-error "-:unknown type name" } */
/* PR c++/68819: verify that -Wmisleading-indentation is suppressed. */
@@ -13,7 +13,7 @@ int
fn_1 (int flag)
{
int x = 4, y = 5;
- if (flag) x = 3; y = 2; /* { dg-message "disabled from this point" } */
+ if (flag) x = 3; y = 2; /* { dg-message "-:disabled from this point" } */
return x * y;
}
diff --git a/gcc/testsuite/gcc.dg/pr20245-1.c b/gcc/testsuite/gcc.dg/pr20245-1.c
index 51089c6e996..b5767d88b7b 100644
--- a/gcc/testsuite/gcc.dg/pr20245-1.c
+++ b/gcc/testsuite/gcc.dg/pr20245-1.c
@@ -2,4 +2,4 @@
/* { dg-do compile } */
/* { dg-options "" } */
-void foo() x; /* { dg-error "expected" } */
+void foo() x; /* { dg-error "-:expected" } */
diff --git a/gcc/testsuite/gcc.dg/pr28419.c b/gcc/testsuite/gcc.dg/pr28419.c
index a1aa4bfe8db..9974864034b 100644
--- a/gcc/testsuite/gcc.dg/pr28419.c
+++ b/gcc/testsuite/gcc.dg/pr28419.c
@@ -1,3 +1,4 @@
/* { dg-do compile } */
void foo()
const char* p = __FUNCTION__; /* { dg-error "" } */
+/* { dg-error "-:expected" "" } */
diff --git a/gcc/testsuite/gcc.dg/pr44545.c b/gcc/testsuite/gcc.dg/pr44545.c
index 51983ef76b9..8058261f850 100644
--- a/gcc/testsuite/gcc.dg/pr44545.c
+++ b/gcc/testsuite/gcc.dg/pr44545.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fnon-call-exceptions -ftrapv -fexceptions" } */
-int
+void
DrawChunk(int *tabSize, int x)
{
const int numEnds = 10;
diff --git a/gcc/testsuite/gcc.dg/pr80131-1.c b/gcc/testsuite/gcc.dg/pr80131-1.c
new file mode 100644
index 00000000000..0bfe1f4f3da
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr80131-1.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target int32plus } */
+/* { dg-options "-fdump-tree-gimple" } */
+
+/* Checks the simplification of:
+ 1 << (C - x) to (1 << C) >> x, where C = precision (type) - 1
+ f1 is not simplified but f2, f3 and f4 are. */
+
+__INT64_TYPE__ f1 (__INT64_TYPE__ i)
+{
+ return (__INT64_TYPE__)1 << (31 - i);
+}
+
+__INT64_TYPE__ f2 (__INT64_TYPE__ i)
+{
+ return (__INT64_TYPE__)1 << (63 - i);
+}
+
+__UINT64_TYPE__ f3 (__INT64_TYPE__ i)
+{
+ return (__UINT64_TYPE__)1 << (63 - i);
+}
+
+__INT32_TYPE__ f4 (__INT32_TYPE__ i)
+{
+ return (__INT32_TYPE__)1 << (31 - i);
+}
+
+/* { dg-final { scan-tree-dump-times "= 31 -" 1 "gimple" } } */
+/* { dg-final { scan-tree-dump-times "9223372036854775808 >>" 2 "gimple" } } */
+/* { dg-final { scan-tree-dump "2147483648 >>" "gimple" } } */
diff --git a/gcc/testsuite/gcc.dg/pr82788.c b/gcc/testsuite/gcc.dg/pr82788.c
new file mode 100644
index 00000000000..a8f628fd7f6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr82788.c
@@ -0,0 +1,4 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-probe-interval=10 --param stack-clash-protection-guard-size=12" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+int main() { int a[1442]; return 0;}
diff --git a/gcc/testsuite/gcc.dg/pr82863.c b/gcc/testsuite/gcc.dg/pr82863.c
new file mode 100644
index 00000000000..b4028169a96
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr82863.c
@@ -0,0 +1,12 @@
+/* PR c/82167 */
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef long long a;
+a b;
+float
+c ()
+{
+ float d = b > 0;
+ return d;
+}
diff --git a/gcc/testsuite/gcc.dg/pr82916.c b/gcc/testsuite/gcc.dg/pr82916.c
new file mode 100644
index 00000000000..50e467f3244
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr82916.c
@@ -0,0 +1,47 @@
+/* PR bootstrap/82916 */
+/* { dg-do run } */
+/* { dg-options "-O2 -fno-tree-dse" } */
+
+struct A { struct A *next; };
+struct C
+{
+ int *of;
+ struct C *parent, *prev, *next;
+ int depth;
+ int min;
+ struct C *min_occ;
+};
+
+__attribute__((noipa)) struct C *
+foo (int *node)
+{
+ struct A *p = __builtin_malloc (sizeof (struct C));
+ if (!p)
+ return 0;
+ p->next = 0;
+ /* Originally placement new. */
+ struct C *nw = (struct C *)(void *)p;
+ nw->of = node;
+ nw->parent = 0;
+ nw->prev = 0;
+ nw->next = 0;
+ nw->depth = 0;
+ nw->min_occ = nw;
+ nw->min = 0;
+ return nw;
+}
+
+int
+main ()
+{
+ int o;
+ struct C *p = foo (&o);
+ if (p)
+ {
+ if (p->of != &o || p->parent || p->prev || p->next || p->depth
+ || p->min || p->min_occ != p)
+ __builtin_abort ();
+ }
+ __builtin_free (p);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr82929.c b/gcc/testsuite/gcc.dg/pr82929.c
new file mode 100644
index 00000000000..afe9394ab31
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr82929.c
@@ -0,0 +1,18 @@
+/* PR tree-optimization/82929 */
+/* { dg-do compile { target store_merge } } */
+/* { dg-options "-O2 -fdump-tree-store-merging" } */
+
+void
+foo (short *p, short *q, short *r)
+{
+ short a = q[0];
+ short b = q[1];
+ short c = ~a;
+ short d = r[0];
+ short e = r[1];
+ short f = ~b;
+ p[0] = c & d;
+ p[1] = e & f;
+}
+
+/* { dg-final { scan-tree-dump-times "Merging successful" 1 "store-merging" } } */
diff --git a/gcc/testsuite/gcc.dg/rtl/truncated-rtl-file.c b/gcc/testsuite/gcc.dg/rtl/truncated-rtl-file.c
index 4dd8214317b..fffb4d7011a 100644
--- a/gcc/testsuite/gcc.dg/rtl/truncated-rtl-file.c
+++ b/gcc/testsuite/gcc.dg/rtl/truncated-rtl-file.c
@@ -1,2 +1,2 @@
void __RTL test (void)
-{ /* { dg-error "no closing brace" } */
+{ /* { dg-error "-:no closing brace" } */
diff --git a/gcc/testsuite/gcc.dg/store_merging_13.c b/gcc/testsuite/gcc.dg/store_merging_13.c
index d4e9ad2d260..6ee0a9c2c80 100644
--- a/gcc/testsuite/gcc.dg/store_merging_13.c
+++ b/gcc/testsuite/gcc.dg/store_merging_13.c
@@ -104,6 +104,90 @@ f6 (struct S *p, struct S *q)
p->g = pg;
}
+__attribute__((noipa)) void
+f7 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a |= q->a;
+ p->b |= q->b;
+ p->c |= q->c;
+ p->d |= q->d;
+ p->e |= q->e;
+ p->f |= q->f;
+ p->g |= q->g;
+}
+
+__attribute__((noipa)) void
+f8 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a &= q->a;
+ p->b &= q->b;
+ p->c &= q->c;
+ p->d &= q->d;
+ p->e &= q->e;
+ p->f &= q->f;
+ p->g &= q->g;
+}
+
+__attribute__((noipa)) void
+f9 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a ^= q->a;
+ p->b ^= q->b;
+ p->c ^= q->c;
+ p->d ^= q->d;
+ p->e ^= q->e;
+ p->f ^= q->f;
+ p->g ^= q->g;
+}
+
+__attribute__((noipa)) void
+f10 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a = ~q->a;
+ p->b = ~q->b;
+ p->c = ~q->c;
+ p->d = ~q->d;
+ p->e = ~q->e;
+ p->f = ~q->f;
+ p->g = ~q->g;
+}
+
+__attribute__((noipa)) void
+f11 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a = p->a | (unsigned char) ~q->a;
+ p->b = p->b | (unsigned char) ~q->b;
+ p->c = p->c | (unsigned short) ~q->c;
+ p->d = p->d | (unsigned char) ~q->d;
+ p->e = p->e | (unsigned char) ~q->e;
+ p->f = p->f | (unsigned char) ~q->f;
+ p->g = p->g | (unsigned char) ~q->g;
+}
+
+__attribute__((noipa)) void
+f12 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a = p->a & (unsigned char) ~q->a;
+ p->b = p->b & (unsigned char) ~q->b;
+ p->c = p->c & (unsigned short) ~q->c;
+ p->d = p->d & (unsigned char) ~q->d;
+ p->e = p->e & (unsigned char) ~q->e;
+ p->f = p->f & (unsigned char) ~q->f;
+ p->g = p->g & (unsigned char) ~q->g;
+}
+
+__attribute__((noipa)) void
+f13 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a = p->a ^ (unsigned char) ~q->a;
+ p->b = p->b ^ (unsigned char) ~q->b;
+ p->c = p->c ^ (unsigned short) ~q->c;
+ p->d = p->d ^ (unsigned char) ~q->d;
+ p->e = p->e ^ (unsigned char) ~q->e;
+ p->f = p->f ^ (unsigned char) ~q->f;
+ p->g = p->g ^ (unsigned char) ~q->g;
+}
+
struct S s = { 20, 21, 22, 23, 24, 25, 26, 27 };
struct S t = { 0x71, 0x72, 0x7f04, 0x78, 0x31, 0x32, 0x34, 0xf1f2f3f4f5f6f7f8ULL };
struct S u = { 28, 29, 30, 31, 32, 33, 34, 35 };
@@ -151,7 +235,62 @@ main ()
|| s.e != (40 ^ 0x31) || s.f != (41 ^ 0x32)
|| s.g != (42 ^ 0x34) || s.h != 27)
__builtin_abort ();
+ f3 (&s, &v);
+ f7 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.a != (36 | 0x71) || s.b != (37 | 0x72)
+ || s.c != (38 | 0x7f04) || s.d != (39 | 0x78)
+ || s.e != (40 | 0x31) || s.f != (41 | 0x32)
+ || s.g != (42 | 0x34) || s.h != 27)
+ __builtin_abort ();
+ f3 (&s, &u);
+ f8 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.a != (28 & 0x71) || s.b != (29 & 0x72)
+ || s.c != (30 & 0x7f04) || s.d != (31 & 0x78)
+ || s.e != (32 & 0x31) || s.f != (33 & 0x32)
+ || s.g != (34 & 0x34) || s.h != 27)
+ __builtin_abort ();
+ f2 (&s, &v);
+ f9 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.a != (36 ^ 0x71) || s.b != (37 ^ 0x72)
+ || s.c != (38 ^ 0x7f04) || s.d != (39 ^ 0x78)
+ || s.e != (40 ^ 0x31) || s.f != (41 ^ 0x32)
+ || s.g != (42 ^ 0x34) || s.h != 27)
+ __builtin_abort ();
+ f10 (&s, &u);
+ asm volatile ("" : : : "memory");
+ if (s.a != (unsigned char) ~28 || s.b != (unsigned char) ~29
+ || s.c != (unsigned short) ~30 || s.d != (unsigned char) ~31
+ || s.e != (unsigned char) ~32 || s.f != (unsigned char) ~33
+ || s.g != (unsigned char) ~34 || s.h != 27)
+ __builtin_abort ();
+ f3 (&s, &v);
+ f11 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.a != (36 | (unsigned char) ~0x71) || s.b != (37 | (unsigned char) ~0x72)
+ || s.c != (38 | (unsigned short) ~0x7f04) || s.d != (39 | (unsigned char) ~0x78)
+ || s.e != (40 | (unsigned char) ~0x31) || s.f != (41 | (unsigned char) ~0x32)
+ || s.g != (42 | (unsigned char) ~0x34) || s.h != 27)
+ __builtin_abort ();
+ f3 (&s, &u);
+ f12 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.a != (28 & (unsigned char) ~0x71) || s.b != (29 & (unsigned char) ~0x72)
+ || s.c != (30 & (unsigned short) ~0x7f04) || s.d != (31 & (unsigned char) ~0x78)
+ || s.e != (32 & (unsigned char) ~0x31) || s.f != (33 & (unsigned char) ~0x32)
+ || s.g != (34 & (unsigned char) ~0x34) || s.h != 27)
+ __builtin_abort ();
+ f2 (&s, &v);
+ f13 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.a != (36 ^ (unsigned char) ~0x71) || s.b != (37 ^ (unsigned char) ~0x72)
+ || s.c != (38 ^ (unsigned short) ~0x7f04) || s.d != (39 ^ (unsigned char) ~0x78)
+ || s.e != (40 ^ (unsigned char) ~0x31) || s.f != (41 ^ (unsigned char) ~0x32)
+ || s.g != (42 ^ (unsigned char) ~0x34) || s.h != 27)
+ __builtin_abort ();
return 0;
}
-/* { dg-final { scan-tree-dump-times "Merging successful" 6 "store-merging" } } */
+/* { dg-final { scan-tree-dump-times "Merging successful" 13 "store-merging" } } */
diff --git a/gcc/testsuite/gcc.dg/store_merging_14.c b/gcc/testsuite/gcc.dg/store_merging_14.c
index 49af24951cb..3885acb10a2 100644
--- a/gcc/testsuite/gcc.dg/store_merging_14.c
+++ b/gcc/testsuite/gcc.dg/store_merging_14.c
@@ -104,6 +104,42 @@ f6 (struct S *p, struct S *q)
p->g = pg;
}
+__attribute__((noipa)) void
+f7 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a |= q->a;
+ p->b |= q->b;
+ p->c |= q->c;
+ p->d |= q->d;
+ p->e |= q->e;
+ p->f |= q->f;
+ p->g |= q->g;
+}
+
+__attribute__((noipa)) void
+f8 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a &= q->a;
+ p->b &= q->b;
+ p->c &= q->c;
+ p->d &= q->d;
+ p->e &= q->e;
+ p->f &= q->f;
+ p->g &= q->g;
+}
+
+__attribute__((noipa)) void
+f9 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a ^= q->a;
+ p->b ^= q->b;
+ p->c ^= q->c;
+ p->d ^= q->d;
+ p->e ^= q->e;
+ p->f ^= q->f;
+ p->g ^= q->g;
+}
+
struct S s = { 72, 20, 21, 73, 22, 23, 24, 25, 26, 74, 27 };
struct S t = { 75, 0x71, 0x72, 76, 0x7f04, 0x78, 0x31, 0x32, 0x34, 77, 0xf1f2f3f4f5f6f7f8ULL };
struct S u = { 78, 28, 29, 79, 30, 31, 32, 33, 34, 80, 35 };
@@ -151,7 +187,31 @@ main ()
|| s.e != (40 ^ 0x31) || s.f != (41 ^ 0x32)
|| s.g != (42 ^ 0x34) || s.k != 74 || s.h != 27)
__builtin_abort ();
+ f3 (&s, &v);
+ f7 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.i != 72 || s.a != (36 | 0x71) || s.b != (37 | 0x72) || s.j != 73
+ || s.c != (38 | 0x7f04) || s.d != (39 | 0x78)
+ || s.e != (40 | 0x31) || s.f != (41 | 0x32)
+ || s.g != (42 | 0x34) || s.k != 74 || s.h != 27)
+ __builtin_abort ();
+ f3 (&s, &u);
+ f8 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.i != 72 || s.a != (28 & 0x71) || s.b != (29 & 0x72) || s.j != 73
+ || s.c != (30 & 0x7f04) || s.d != (31 & 0x78)
+ || s.e != (32 & 0x31) || s.f != (33 & 0x32)
+ || s.g != (34 & 0x34) || s.k != 74 || s.h != 27)
+ __builtin_abort ();
+ f2 (&s, &v);
+ f9 (&s, &t);
+ asm volatile ("" : : : "memory");
+ if (s.i != 72 || s.a != (36 ^ 0x71) || s.b != (37 ^ 0x72) || s.j != 73
+ || s.c != (38 ^ 0x7f04) || s.d != (39 ^ 0x78)
+ || s.e != (40 ^ 0x31) || s.f != (41 ^ 0x32)
+ || s.g != (42 ^ 0x34) || s.k != 74 || s.h != 27)
+ __builtin_abort ();
return 0;
}
-/* { dg-final { scan-tree-dump-times "Merging successful" 6 "store-merging" } } */
+/* { dg-final { scan-tree-dump-times "Merging successful" 9 "store-merging" } } */
diff --git a/gcc/testsuite/gcc.dg/store_merging_15.c b/gcc/testsuite/gcc.dg/store_merging_15.c
new file mode 100644
index 00000000000..57075ebea28
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/store_merging_15.c
@@ -0,0 +1,56 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target store_merge } */
+/* { dg-options "-O2 -fdump-tree-store-merging" } */
+
+struct S { unsigned char a, b; unsigned short c; unsigned char d, e, f, g; unsigned long long h; };
+
+__attribute__((noipa)) void
+f1 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a = ~q->a;
+ p->b = q->b;
+ p->c = ~q->c;
+ p->d = ~q->d;
+ p->e = q->e;
+ p->f = ~q->f;
+ p->g = ~q->g;
+}
+
+__attribute__((noipa)) void
+f2 (struct S *__restrict p, struct S *__restrict q)
+{
+ p->a = ~(unsigned char) (p->a & q->a);
+ p->b = ((unsigned char) ~p->b) & q->b;
+ p->c = p->c & (unsigned short) ~q->c;
+ p->d = p->d & q->d;
+ p->e = p->e & (unsigned char) ~q->e;
+ p->f = p->f & (unsigned char) ~q->f;
+ p->g = ~(unsigned char) (p->g & q->g);
+}
+
+struct S s = { 20, 21, 22, 23, 24, 25, 26, 27 };
+struct S u = { 28, 29, 30, 31, 32, 33, 34, 35 };
+struct S v = { 36, 37, 38, 39, 40, 41, 42, 43 };
+
+int
+main ()
+{
+ asm volatile ("" : : : "memory");
+ f1 (&s, &u);
+ asm volatile ("" : : : "memory");
+ if (s.a != (unsigned char) ~28 || s.b != 29
+ || s.c != (unsigned short) ~30 || s.d != (unsigned char) ~31
+ || s.e != 32 || s.f != (unsigned char) ~33 || s.g != (unsigned char) ~34
+ || s.h != 27)
+ __builtin_abort ();
+ f2 (&u, &v);
+ asm volatile ("" : : : "memory");
+ if (u.a != (unsigned char) ~(28 & 36) || u.b != (((unsigned char) ~29) & 37)
+ || u.c != (30 & (unsigned short) ~38) || u.d != (31 & 39)
+ || u.e != (32 & (unsigned char) ~40) || u.f != (33 & (unsigned char) ~41)
+ || u.g != (unsigned char) ~(34 & 42) || u.h != 35)
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "Merging successful" 2 "store-merging" } } */
diff --git a/gcc/testsuite/gcc.dg/strlenopt-33g.c b/gcc/testsuite/gcc.dg/strlenopt-33g.c
index 0223f82f1d1..7d24d2bfc32 100644
--- a/gcc/testsuite/gcc.dg/strlenopt-33g.c
+++ b/gcc/testsuite/gcc.dg/strlenopt-33g.c
@@ -1,5 +1,4 @@
/* { dg-do run { target *-*-linux* *-*-gnu* } } */
-/* { dg-do run } */
/* { dg-options "-O2 -fdump-tree-strlen" } */
#define USE_GNU
diff --git a/gcc/testsuite/gcc.dg/strncpy-fix-1.c b/gcc/testsuite/gcc.dg/strncpy-fix-1.c
index b8bc916e008..b4fd4aa4877 100644
--- a/gcc/testsuite/gcc.dg/strncpy-fix-1.c
+++ b/gcc/testsuite/gcc.dg/strncpy-fix-1.c
@@ -1,7 +1,7 @@
/* Test that use of strncpy does not result in a "value computed is
not used" warning. */
/* { dg-do compile } */
-/* { dg-options "-O2 -Wall" } */
+/* { dg-options "-O2 -Wall -Wno-stringop-truncation" } */
#include <string.h>
void
diff --git a/gcc/testsuite/gcc.dg/torture/Wsizeof-pointer-memaccess1.c b/gcc/testsuite/gcc.dg/torture/Wsizeof-pointer-memaccess1.c
index f9bc57c4e86..cd9dc72decb 100644
--- a/gcc/testsuite/gcc.dg/torture/Wsizeof-pointer-memaccess1.c
+++ b/gcc/testsuite/gcc.dg/torture/Wsizeof-pointer-memaccess1.c
@@ -1,6 +1,6 @@
/* Test -Wsizeof-pointer-memaccess warnings. */
/* { dg-do compile } */
-/* { dg-options "-Wall -Wno-sizeof-array-argument -Wno-stringop-overflow" } */
+/* { dg-options "-Wall -Wno-sizeof-array-argument -Wno-stringop-overflow -Wno-stringop-truncation" } */
/* Test just twice, once with -O0 non-fortified, once with -O2 fortified. */
/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" "-O2" } } */
/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
@@ -704,12 +704,17 @@ f4 (char *x, char **y, int z, char w[64])
strncat (w, s2, sizeof (w)); /* { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" } */
stpncpy (w, s1, sizeof (w)); /* { dg-warning "call is the same expression as the destination; did you mean to provide an explicit length" } */
- /* These are correct, no warning. */
+ /* These are pointless when the destination is large enough, and
+ cause overflow otherwise. If the copies are guaranteed to be
+ safe the calls might as well be replaced by strcat(), strcpy(),
+ or memcpy(). */
const char s3[] = "foobarbaz";
const char s4[] = "abcde12345678";
- strncpy (x, s3, sizeof (s3));
- strncat (x, s4, sizeof (s4));
- stpncpy (x, s3, sizeof (s3));
+ strncpy (x, s3, sizeof (s3)); /* { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" } */
+ strncat (x, s4, sizeof (s4)); /* { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" } */
+ stpncpy (x, s3, sizeof (s3)); /* { dg-warning "call is the same expression as the source; did you mean to use the size of the destination?" } */
+
+ /* These are correct, no warning. */
y[1] = strndup (s3, sizeof (s3));
z += strncmp (s3, s4, sizeof (s3));
z += strncmp (s3, s4, sizeof (s4));
diff --git a/gcc/testsuite/gcc.dg/torture/pr60092.c b/gcc/testsuite/gcc.dg/torture/pr60092.c
index c23516c73b4..74e7c174a83 100644
--- a/gcc/testsuite/gcc.dg/torture/pr60092.c
+++ b/gcc/testsuite/gcc.dg/torture/pr60092.c
@@ -4,7 +4,6 @@
/* { dg-skip-if "No undefined weak" { nvptx-*-* } } */
/* { dg-additional-options "-Wl,-undefined,dynamic_lookup" { target *-*-darwin* } } */
/* { dg-additional-options "-Wl,-flat_namespace" { target *-*-darwin[89]* } } */
-/* { dg-xfail-run-if "posix_memalign modifies first arg on error" { *-*-solaris2.11* } { "-O0" } } */
typedef __SIZE_TYPE__ size_t;
extern int posix_memalign(void **memptr, size_t alignment, size_t size) __attribute__((weak));
diff --git a/gcc/testsuite/gcc.dg/torture/pr63554.c b/gcc/testsuite/gcc.dg/torture/pr63554.c
index fa06c5a55d1..9162266da2c 100644
--- a/gcc/testsuite/gcc.dg/torture/pr63554.c
+++ b/gcc/testsuite/gcc.dg/torture/pr63554.c
@@ -1,4 +1,5 @@
-/* { dg-do compile } */
+/* PR c/63554 - ice in "execute_todo, at passes.c:1797" with -O3
+ { dg-do compile } */
char *a;
void
@@ -7,3 +8,5 @@ nssutil_ReadSecmodDB (void)
long b = __builtin_object_size (0, 0);
a = __builtin___strncat_chk (a, " ", 1, b);
}
+
+/* { dg-prune-output "\\\[-Wstringop-overflow=]" } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr78305.c b/gcc/testsuite/gcc.dg/torture/pr78305.c
index ccb8c6faa78..36d3620179e 100644
--- a/gcc/testsuite/gcc.dg/torture/pr78305.c
+++ b/gcc/testsuite/gcc.dg/torture/pr78305.c
@@ -1,5 +1,5 @@
-/* { dg-require-effective-target int32plus } */
/* { dg-do run } */
+/* { dg-require-effective-target int32plus } */
int main ()
{
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/bitops-1.c b/gcc/testsuite/gcc.dg/tree-ssa/bitops-1.c
new file mode 100644
index 00000000000..cf2823deb62
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/bitops-1.c
@@ -0,0 +1,72 @@
+/* { dg-do run } */
+/* { dg-options "-O -fdump-tree-optimized-raw" } */
+
+#define DECLS(n,VOL) \
+__attribute__((noinline,noclone)) \
+int f##n(int A,int B){ \
+ VOL int C = A & ~B; \
+ VOL int D = A ^ B; \
+ return C | D; \
+} \
+__attribute__((noinline,noclone)) \
+int g##n(int A,int B){ \
+ VOL int C = A & ~B; \
+ return C ^ ~A; \
+} \
+__attribute__((noinline,noclone)) \
+int h##n(int A,int B){ \
+ VOL int C = A | B; \
+ VOL int D = A ^ B; \
+ return C & ~D; \
+} \
+__attribute__((noinline,noclone)) \
+int i##n(int A,int B){ \
+ VOL int C = A ^ B; \
+ return A | ~C; \
+} \
+__attribute__((noinline,noclone)) \
+int J##n(int A,int B){ \
+ VOL int C = A | B; \
+ VOL int D = A & B; \
+ return C | D; \
+} \
+__attribute__((noinline,noclone)) \
+int k##n(int A,int B){ \
+ VOL int C = A & B; \
+ VOL int D = A ^ B; \
+ return C | ~D; \
+} \
+__attribute__((noinline,noclone)) \
+int l##n(int A,int B){ \
+ VOL int C = A & ~B; \
+ return ~C; \
+} \
+__attribute__((noinline,noclone)) \
+int m##n(int A,int B){ \
+ VOL int C = A & B; \
+ VOL int D = A ^ B; \
+ return C ^ D; \
+}
+
+DECLS(0,)
+DECLS(1,volatile)
+
+int main(){
+ for(int A = 0; A <= 1; ++A)
+ for(int B = 0; B <= 1; ++B)
+ {
+ if (f0 (A, B) != f1 (A, B)) __builtin_abort();
+ if (g0 (A, B) != g1 (A, B)) __builtin_abort();
+ if (h0 (A, B) != h1 (A, B)) __builtin_abort();
+ if (i0 (A, B) != i1 (A, B)) __builtin_abort();
+ if (J0 (A, B) != J1 (A, B)) __builtin_abort();
+ if (k0 (A, B) != k1 (A, B)) __builtin_abort();
+ if (l0 (A, B) != l1 (A, B)) __builtin_abort();
+ if (m0 (A, B) != m1 (A, B)) __builtin_abort();
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "bit_not_expr" 12 "optimized"} } */
+/* { dg-final { scan-tree-dump-times "bit_and_expr" 9 "optimized"} } */
+/* { dg-final { scan-tree-dump-times "bit_ior_expr" 10 "optimized"} } */
+/* { dg-final { scan-tree-dump-times "bit_xor_expr" 9 "optimized"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-1.c b/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-1.c
index ad5fc101cd8..1b9696dcb11 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-1.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-1.c
@@ -19,4 +19,5 @@ main(void)
return 0;
}
/* { dg-final { scan-tree-dump-times "Splitting function at:" 1 "fnsplit"} } */
+/* { dg-final { scan-tree-dump-times "Invalid sum" 0 "fnsplit"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-2.c b/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-2.c
new file mode 100644
index 00000000000..c00213cc182
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/fnsplit-2.c
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-fnsplit-blocks-details" } */
+void q (void);
+int b;
+void test (void);
+void
+split_me (int *a)
+{
+ if (__builtin_expect (a==0, 0))
+ do
+ {
+ test();
+ test();
+ test();
+ test();
+ test();
+ }
+ while (b);
+ else
+ q();
+}
+
+int
+main(void)
+{
+ int i;
+ for (i = 0; i < 1000; i++)
+ split_me(&i);
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "Splitting function at:" 1 "fnsplit"} } */
+/* { dg-final { scan-tree-dump-times "Invalid sum" 0 "fnsplit"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/negminus.c b/gcc/testsuite/gcc.dg/tree-ssa/negminus.c
new file mode 100644
index 00000000000..f857a007983
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/negminus.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fno-rounding-math -fno-signed-zeros -fdump-tree-optimized-raw" } */
+
+double f(double a, double b){
+ double c = a - b;
+ return -c;
+}
+
+int g(unsigned x){
+ unsigned y = ~x;
+ int z = (int) y;
+ return -z;
+}
+
+unsigned h(unsigned a, unsigned b, unsigned c){
+ unsigned d = b - c;
+ unsigned e = a + d;
+ return -e;
+}
+
+/* { dg-final { scan-tree-dump-not "negate_expr" "optimized"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr82726.c b/gcc/testsuite/gcc.dg/tree-ssa/pr82726.c
new file mode 100644
index 00000000000..22bc59dacc8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr82726.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 --param tree-reassoc-width=4" } */
+/* { dg-additional-options "-mavx2" { target { x86_64-*-* i?86-*-* } } } */
+
+#define N 40
+#define M 128
+unsigned int in[N+M];
+unsigned short out[N];
+
+/* Outer-loop vectorization. */
+
+void
+foo (){
+ int i,j;
+ unsigned int diff;
+
+ for (i = 0; i < N; i++) {
+ diff = 0;
+ for (j = 0; j < M; j+=8) {
+ diff += in[j+i];
+ }
+ out[i]=(unsigned short)diff;
+ }
+
+ return;
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp101.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp101.c
index aad41f91f47..95b4d2b05a8 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp101.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp101.c
@@ -10,4 +10,4 @@ int main ()
return 0;
}
-/* { dg-final { scan-tree-dump "<bb 2> \\\[\[0-9.\]+%\\\] \\\[count: \[0-9INV\]*\\\]:\[\n\r \]*return 0;" "optimized" { xfail aarch64*-*-* } } } */
+/* { dg-final { scan-tree-dump "<bb 2> \\\[local count: \[0-9INV\]*\\\]:\[\n\r \]*return 0;" "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/unclosed-init.c b/gcc/testsuite/gcc.dg/unclosed-init.c
index c0e4dd8da9b..3deb88e777c 100644
--- a/gcc/testsuite/gcc.dg/unclosed-init.c
+++ b/gcc/testsuite/gcc.dg/unclosed-init.c
@@ -1,3 +1,3 @@
int unclosed[] = { /* { dg-message "18: to match this '.'" } */
42
- /* { dg-error "0: expected '.' at end of input" } */
+ /* { dg-error "-: expected '.' at end of input" } */
diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c b/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c
index e78dc46611e..0e4f1a71b6b 100644
--- a/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c
+++ b/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c
@@ -59,6 +59,4 @@ int main()
/* We should also be able to use 2-lane SLP to initialize the real and
imaginary components in the first loop of main. */
-/* For targets with gather/scatter we can vectorize the unrolled loop
- directly, before SLP runs. That's probably a pessimisation though. */
-/* { dg-final { scan-tree-dump-times "basic block vectorized" 2 "slp1" { xfail vect_gather_scatter } } } */
+/* { dg-final { scan-tree-dump-times "basic block vectorized" 2 "slp1" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c b/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c
index 99ecb793973..7c7acd5bab6 100644
--- a/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c
+++ b/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c
@@ -18,5 +18,4 @@ foo (void)
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-/* Requires VF <= 4. */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail { aarch64_sve && { ! vect256 } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/no-fast-math-vect16.c b/gcc/testsuite/gcc.dg/vect/no-fast-math-vect16.c
index a9a8b864e66..6c97e5f9c12 100644
--- a/gcc/testsuite/gcc.dg/vect/no-fast-math-vect16.c
+++ b/gcc/testsuite/gcc.dg/vect/no-fast-math-vect16.c
@@ -34,4 +34,4 @@ int main (void)
}
/* Requires fast-math. */
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { ! vect_ieee_add_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { ! vect_fold_left_plus } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr25413a.c b/gcc/testsuite/gcc.dg/vect/pr25413a.c
index a80ca868112..e444b2c3e8e 100644
--- a/gcc/testsuite/gcc.dg/vect/pr25413a.c
+++ b/gcc/testsuite/gcc.dg/vect/pr25413a.c
@@ -123,6 +123,7 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! vect_scatter_store } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target vect_scatter_store } } } */
/* { dg-final { scan-tree-dump-times "vector alignment may not be reachable" 1 "vect" { target { ! vector_alignment_reachable } } } } */
/* { dg-final { scan-tree-dump-times "Alignment of access forced using versioning" 1 "vect" { target { ! vector_alignment_reachable } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr45752.c b/gcc/testsuite/gcc.dg/vect/pr45752.c
index 755205b275a..4ddac7ad509 100644
--- a/gcc/testsuite/gcc.dg/vect/pr45752.c
+++ b/gcc/testsuite/gcc.dg/vect/pr45752.c
@@ -158,4 +158,4 @@ int main (int argc, const char* argv[])
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { scan-tree-dump-times "gaps requires scalar epilogue loop" 0 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { xfail { vect_gather_scatter && { ! vect_perm5_int } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-1.c b/gcc/testsuite/gcc.dg/vect/pr65947-1.c
index bb886137dfd..bf6c098b3ee 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-1.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-1.c
@@ -41,4 +41,4 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction" 4 "vect" } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction" 4 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-10.c b/gcc/testsuite/gcc.dg/vect/pr65947-10.c
index 6016cefc6a1..b58b3456bd4 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-10.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-10.c
@@ -42,6 +42,6 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-12.c b/gcc/testsuite/gcc.dg/vect/pr65947-12.c
index 973105cc251..1c959e16ab8 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-12.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-12.c
@@ -42,5 +42,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-13.c b/gcc/testsuite/gcc.dg/vect/pr65947-13.c
index f0735072df0..fc88cbe6227 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-13.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-13.c
@@ -42,5 +42,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-14.c b/gcc/testsuite/gcc.dg/vect/pr65947-14.c
index c118f2a1b6e..194e40f280e 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-14.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-14.c
@@ -1,4 +1,3 @@
-/* { dg-do run { xfail { ! vect_last_reduc } } } */
/* { dg-require-effective-target vect_condition } */
#include "tree-vect.h"
@@ -42,5 +41,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction based on integer induction" 4 "vect" { target { ! vect_last_reduc } } } }*/
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction based on integer induction" 4 "vect" { target { ! vect_fold_extract_last } } } }*/
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-2.c b/gcc/testsuite/gcc.dg/vect/pr65947-2.c
index 0dbf9e5622b..569da87ceaa 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-2.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-2.c
@@ -42,5 +42,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-3.c b/gcc/testsuite/gcc.dg/vect/pr65947-3.c
index ba5b9c2c76a..05c266686b0 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-3.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-3.c
@@ -52,5 +52,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-4.c b/gcc/testsuite/gcc.dg/vect/pr65947-4.c
index a6f92d9757c..0fa50cef31f 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-4.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-4.c
@@ -41,5 +41,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction" 4 "vect" } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction" 4 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-5.c b/gcc/testsuite/gcc.dg/vect/pr65947-5.c
index 709f17f80a4..15f5ea8d8fa 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-5.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-5.c
@@ -50,8 +50,8 @@ main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" { target { ! vect_last_reduc } } } } */
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump "loop size is greater than data size" "vect" { xfail vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" { target { ! vect_fold_extract_last } } } } */
+/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump "loop size is greater than data size" "vect" { xfail vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-6.c b/gcc/testsuite/gcc.dg/vect/pr65947-6.c
index 7a93326582b..1c760366e71 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-6.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-6.c
@@ -41,5 +41,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 4 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-9.c b/gcc/testsuite/gcc.dg/vect/pr65947-9.c
index 8ef154d1751..49dc7cb9ed2 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-9.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-9.c
@@ -45,8 +45,8 @@ main ()
return 0;
}
-/* { dg-final { scan-tree-dump-not "LOOP VECTORIZED" "vect" { target { ! vect_last_reduc } } } } */
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump "loop size is greater than data size" "vect" { target { ! vect_last_reduc } } } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB" 2 "vect" { target vect_last_reduc } } } */
-/* { dg-final { scan-tree-dump-not "Optimizing condition reduction" "vect" { target { ! vect_last_reduc } } } } */
+/* { dg-final { scan-tree-dump-not "LOOP VECTORIZED" "vect" { target { ! vect_fold_extract_last } } } } */
+/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump "loop size is greater than data size" "vect" { target { ! vect_fold_extract_last } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 2 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-not "optimizing condition reduction" "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr79920.c b/gcc/testsuite/gcc.dg/vect/pr79920.c
index b2640e83091..3ad68788f0a 100644
--- a/gcc/testsuite/gcc.dg/vect/pr79920.c
+++ b/gcc/testsuite/gcc.dg/vect/pr79920.c
@@ -41,5 +41,5 @@ int main()
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { { vect_double && { ! vect_ieee_add_reduc } } && { vect_perm && vect_hw_misalign } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { vect_double && vect_ieee_add_reduc } && { vect_perm && vect_hw_misalign } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { { vect_double && { ! vect_fold_left_plus } } && { vect_perm && vect_hw_misalign } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { vect_double && vect_fold_left_plus } && { vect_perm && vect_hw_misalign } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-13-big-array.c b/gcc/testsuite/gcc.dg/vect/slp-13-big-array.c
index b553b61cc5a..a16656ace00 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-13-big-array.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-13-big-array.c
@@ -131,8 +131,7 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { vect_interleave && vect_extract_even_odd } && { { ! vect_pack_trunc } && { ! vect_gather_scatter } } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { target vect_gather_scatter } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { vect_interleave && vect_extract_even_odd } && { ! vect_pack_trunc } } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { ! vect_pack_trunc } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { target { { vect_interleave && vect_extract_even_odd } && vect_pack_trunc } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 3 "vect" { target vect_pack_trunc xfail vect_variable_length } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-13.c b/gcc/testsuite/gcc.dg/vect/slp-13.c
index 57dc28bafe3..8769d62cfd4 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-13.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-13.c
@@ -125,8 +125,7 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { vect_interleave && vect_extract_even_odd } && { { ! vect_pack_trunc } && { ! vect_gather_scatter } } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { target vect_gather_scatter } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { vect_interleave && vect_extract_even_odd } && { ! vect_pack_trunc } } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { ! vect_pack_trunc } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { target { { vect_interleave && vect_extract_even_odd } && vect_pack_trunc } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 3 "vect" { target vect_pack_trunc xfail vect_variable_length } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-16.c b/gcc/testsuite/gcc.dg/vect/slp-16.c
index a19deb92552..a7da9932c54 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-16.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-16.c
@@ -66,5 +66,5 @@ int main (void)
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_int_mult } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_int_mult xfail { vect_variable_length && vect_gather_scatter } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_int_mult } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-19c.c b/gcc/testsuite/gcc.dg/vect/slp-19c.c
index cda6a096332..32566cb5e13 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-19c.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-19c.c
@@ -103,7 +103,5 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! vect_gather } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target vect_gather } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_gather } } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_gather } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-23.c b/gcc/testsuite/gcc.dg/vect/slp-23.c
index 88708e645d6..3cda497db0c 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-23.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-23.c
@@ -109,6 +109,6 @@ int main (void)
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { vect_strided8 || vect_no_align } } } } } */
/* We fail to vectorize the second loop with variable-length SVE but
fall back to 128-bit vectors, which does use SLP. */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_perm } xfail aarch64_sve } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_perm } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_perm } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-35.c b/gcc/testsuite/gcc.dg/vect/slp-35.c
index 2e8f57c7067..76dd7456d89 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-35.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-35.c
@@ -67,5 +67,5 @@ int main (void)
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail { vect_variable_length && vect_gather_scatter } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-37.c b/gcc/testsuite/gcc.dg/vect/slp-37.c
index 700ffd85f91..b6a044dfd3a 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-37.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-37.c
@@ -58,7 +58,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 0 "vect" { target { ! vect_scatter } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_scatter xfail vect_variable_length } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 0 "vect" } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-perm-4.c b/gcc/testsuite/gcc.dg/vect/slp-perm-4.c
index 3a4420c53e4..8457e4f45d6 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-perm-4.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-perm-4.c
@@ -114,4 +114,4 @@ int main (int argc, const char* argv[])
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { scan-tree-dump-times "gaps requires scalar epilogue loop" 0 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail { { ! vect_perm5_int } && vect_gather_scatter } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-71.c b/gcc/testsuite/gcc.dg/vect/vect-71.c
index 2d1a3ffd0ad..f15521176df 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-71.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-71.c
@@ -36,4 +36,4 @@ int main (void)
return main1 ();
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { ! vect_scatter_store } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-6.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-6.c
index 0f9fc20567e..a9a6021ed2b 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-reduc-6.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-6.c
@@ -50,5 +50,5 @@ int main (void)
/* need -ffast-math to vectorizer these loops. */
/* ARM NEON passes -ffast-math to these tests, so expect this to fail. */
-/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 1 "vect" { xfail { vect_ieee_add_reduc || arm_neon_ok } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_ieee_add_reduc } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 1 "vect" { target { ! vect_fold_left_plus } xfail arm_neon_ok } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_fold_left_plus } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-or_1.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-or_1.c
index ead9548c4a6..cff3f16107f 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-reduc-or_1.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-or_1.c
@@ -24,17 +24,17 @@ main (unsigned char argc, char **argv)
check_vect ();
for (i = 0; i < N; i++)
- in[i] = (i + i + 1) & 0xfd;
+ {
+ in[i] = (i + i + 1) & 0xfd;
+ asm volatile ("" ::: "memory");
+ }
for (i = 0; i < N; i++)
{
expected |= in[i];
- asm volatile ("");
+ asm volatile ("" ::: "memory");
}
- /* Prevent constant propagation of the entire loop below. */
- asm volatile ("" : : : "memory");
-
for (i = 0; i < N; i++)
sum |= in[i];
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-or_2.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-or_2.c
index 799ac173e21..cd1af6dc9ae 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-reduc-or_2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-or_2.c
@@ -23,17 +23,17 @@ main (unsigned char argc, char **argv)
check_vect ();
for (i = 0; i < N; i++)
- in[i] = (i + i + 1) & 0xfd;
+ {
+ in[i] = (i + i + 1) & 0xfd;
+ asm volatile ("" ::: "memory");
+ }
for (i = 0; i < N; i++)
{
expected |= in[i];
- asm volatile ("");
+ asm volatile ("" ::: "memory");
}
- /* Prevent constant propagation of the entire loop below. */
- asm volatile ("" : : : "memory");
-
for (i = 0; i < N; i++)
sum |= in[i];
diff --git a/gcc/testsuite/gcc.misc-tests/gcov-3.c b/gcc/testsuite/gcc.misc-tests/gcov-3.c
index eb6e4cc46bf..5b07dd74bd1 100644
--- a/gcc/testsuite/gcc.misc-tests/gcov-3.c
+++ b/gcc/testsuite/gcc.misc-tests/gcov-3.c
@@ -1,10 +1,10 @@
+/* { dg-do run { target native } } */
/* { dg-require-effective-target label_values } */
/* Test Gcov with computed gotos.
This is the same as test gcc.c-torture/execute/980526-1.c */
/* { dg-options "-fprofile-arcs -ftest-coverage" } */
-/* { dg-do run { target native } } */
extern void abort (void);
extern void exit (int);
diff --git a/gcc/testsuite/gcc.target/aarch64/bsl-idiom.c b/gcc/testsuite/gcc.target/aarch64/bsl-idiom.c
new file mode 100644
index 00000000000..8151387600f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/bsl-idiom.c
@@ -0,0 +1,88 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -fdump-rtl-combine --save-temps" } */
+
+/* Test that we don't generate BSL when in DImode with values in integer
+ registers, and do generate it where we have values in floating-point
+ registers. This is useful, as it allows us to avoid register moves
+ in the general case.
+
+ We want:
+ eor x0, x0, x1
+ and x0, x0, x2
+ eor x0, x0, x1
+ ret
+
+ Rather than:
+ fmov d2, x0
+ fmov d0, x2
+ fmov d1, x1
+ bsl v0.8b, v2.8b, v1.8b
+ fmov x0, d0
+ ret */
+
+extern void abort (void);
+
+unsigned long long __attribute__ ((noinline))
+foo (unsigned long long a, unsigned long long b, unsigned long long c)
+{
+ return ((a ^ b) & c) ^ b;
+}
+
+unsigned long long __attribute__ ((noinline))
+foo2 (unsigned long long a, unsigned long long b, unsigned long long c)
+{
+ return ((a ^ b) & c) ^ a;
+}
+
+#define force_simd(V1) asm volatile ("mov %d0, %1.d[0]" \
+ : "=w"(V1) \
+ : "w"(V1) \
+ : /* No clobbers */);
+
+unsigned long long __attribute__ ((noinline))
+bar (unsigned long long a, unsigned long long b, unsigned long long c)
+{
+ force_simd (a);
+ force_simd (b);
+ force_simd (c);
+ c = ((a ^ b) & c) ^ b;
+ force_simd (c);
+ return c;
+}
+
+unsigned long long __attribute__ ((noinline))
+bar2 (unsigned long long a, unsigned long long b, unsigned long long c)
+{
+ force_simd (a);
+ force_simd (b);
+ force_simd (c);
+ c = ((a ^ b) & c) ^ a;
+ force_simd (c);
+ return c;
+}
+
+int
+main (int argc, char** argv)
+{
+ unsigned long long a = 0x0123456789abcdefULL;
+ unsigned long long b = 0xfedcba9876543210ULL;
+ unsigned long long c = 0xaabbccddeeff7777ULL;
+ if (foo (a, b, c) != bar (a, b, c))
+ abort ();
+ if (foo2 (a, b, c) != bar2 (a, b, c))
+ abort ();
+ return 0;
+}
+
+/* 2 BSL, 6 FMOV (to floating-point registers), and 2 FMOV (to general
+purpose registers) for the "bar" tests, which should still use BSL. */
+/* { dg-final { scan-assembler-times "bsl\tv\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fmov\td\[0-9\]" 6 } } */
+/* { dg-final { scan-assembler-times "fmov\tx\[0-9\]" 2 } } */
+
+/* { dg-final { scan-assembler-not "bif\tv\[0-9\]" } } */
+/* { dg-final { scan-assembler-not "bit\tv\[0-9\]" } } */
+
+/* We always match the idiom during combine. */
+/* { dg-final { scan-rtl-dump-times "aarch64_simd_bsldi_internal" 2 "combine" } } */
+/* { dg-final { scan-rtl-dump-times "aarch64_simd_bsldi_alt" 2 "combine" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/construct_lane_zero_1.c b/gcc/testsuite/gcc.target/aarch64/construct_lane_zero_1.c
new file mode 100644
index 00000000000..d87f3290828
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/construct_lane_zero_1.c
@@ -0,0 +1,37 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef long long v2di __attribute__ ((vector_size (16)));
+typedef double v2df __attribute__ ((vector_size (16)));
+
+v2di
+construct_lanedi (long long *y)
+{
+ v2di x =
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ { 0, y[0] }
+#else
+ { y[0], 0 }
+#endif
+ ;
+ return x;
+}
+
+v2df
+construct_lanedf (double *y)
+{
+ v2df x =
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ { 0.0, y[0] }
+#else
+ { y[0], 0.0 }
+#endif
+ ;
+ return x;
+}
+
+/* Check that creating V2DI and V2DF vectors from a lane with a zero
+ makes use of the D-reg LDR rather than doing explicit lane inserts. */
+
+/* { dg-final { scan-assembler-times "ldr\td\[0-9\]+" 2 } } */
+/* { dg-final { scan-assembler-not "ins\t" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/copysign-bsl.c b/gcc/testsuite/gcc.target/aarch64/copysign-bsl.c
new file mode 100644
index 00000000000..0ec7109c738
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/copysign-bsl.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+/* Test that we can generate DImode BSL when we are using
+ copysign. */
+
+double
+foo (double a, double b)
+{
+ return __builtin_copysign (a, b);
+}
+
+/* { dg-final { scan-assembler "b\(sl|it|if\)\tv\[0-9\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/dwarf-cfa-reg.c b/gcc/testsuite/gcc.target/aarch64/dwarf-cfa-reg.c
index cce88155aca..ae5b3797021 100644
--- a/gcc/testsuite/gcc.target/aarch64/dwarf-cfa-reg.c
+++ b/gcc/testsuite/gcc.target/aarch64/dwarf-cfa-reg.c
@@ -3,7 +3,7 @@
/* { dg-options "-O0 -gdwarf-2" } */
/* { dg-final { scan-assembler ".cfi_restore 30" } } */
/* { dg-final { scan-assembler ".cfi_restore 29" } } */
-/* { dg-final { scan-assembler ".cfi_def_cfa 31, 0" } } */
+/* { dg-final { scan-assembler ".cfi_def_cfa_offset 0" } } */
/* { dg-final { scan-assembler "ret" } } */
int bar (unsigned int);
diff --git a/gcc/testsuite/gcc.target/aarch64/load_v2vec_lanes_1.c b/gcc/testsuite/gcc.target/aarch64/load_v2vec_lanes_1.c
new file mode 100644
index 00000000000..3c31b340154
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/load_v2vec_lanes_1.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef long long v2di __attribute__ ((vector_size (16)));
+typedef double v2df __attribute__ ((vector_size (16)));
+
+v2di
+construct_lanedi (long long *y)
+{
+ v2di x = { y[0], y[1] };
+ return x;
+}
+
+v2df
+construct_lanedf (double *y)
+{
+ v2df x = { y[0], y[1] };
+ return x;
+}
+
+/* We can use the load_pair_lanes<mode> pattern to vec_concat two DI/DF
+ values from consecutive memory into a 2-element vector by using
+ a Q-reg LDR. */
+
+/* { dg-final { scan-assembler-times "ldr\tq\[0-9\]+" 2 } } */
+/* { dg-final { scan-assembler-not "ins\t" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/store_v2vec_lanes.c b/gcc/testsuite/gcc.target/aarch64/store_v2vec_lanes.c
new file mode 100644
index 00000000000..6810db3c54d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/store_v2vec_lanes.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef long long v2di __attribute__ ((vector_size (16)));
+typedef double v2df __attribute__ ((vector_size (16)));
+
+void
+construct_lane_1 (double *y, v2df *z)
+{
+ double y0 = y[0] + 1;
+ double y1 = y[1] + 2;
+ v2df x = {y0, y1};
+ z[2] = x;
+}
+
+void
+construct_lane_2 (long long *y, v2di *z)
+{
+ long long y0 = y[0] + 1;
+ long long y1 = y[1] + 2;
+ v2di x = {y0, y1};
+ z[2] = x;
+}
+
+/* We can use the load_pair_lanes<mode> pattern to vec_concat two DI/DF
+ values from consecutive memory into a 2-element vector by using
+ a Q-reg LDR. */
+
+/* { dg-final { scan-assembler-times "stp\td\[0-9\]+, d\[0-9\]+" 1 } } */
+/* { dg-final { scan-assembler-times "stp\tx\[0-9\]+, x\[0-9\]+" 1 } } */
+/* { dg-final { scan-assembler-not "ins\t" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_cap_4.c b/gcc/testsuite/gcc.target/aarch64/sve_cap_4.c
index b22828d621b..c3bf2f326d3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_cap_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_cap_4.c
@@ -36,7 +36,7 @@ LOOP (double)
/* { dg-final { scan-assembler-times {\tstr\td[0-9]+} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, sxtw 2\]} 4 } } */
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d\]} 4 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d, lsl 3\]} 4 } } */
/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+\.s, sxtw 2\]} 2 } } */
-/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+\.d\]} 2 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+\.d, lsl 3\]} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_1.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_1.c
index a176d9ce251..4651c70afda 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_1.c
@@ -1,11 +1,11 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define N 32
/* Simple condition reduction. */
-int
+int __attribute__ ((noinline, noclone))
condition_reduction (int *a, int min_v)
{
int last = 66; /* High start value. */
@@ -17,6 +17,4 @@ condition_reduction (int *a, int min_v)
return last;
}
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\tw[0-9]+, p[0-7], w[0-9]+, z[0-9]+\.s} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_1_run.c
index 8e6444e4239..0dcba03b61c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_1_run.c
@@ -1,24 +1,22 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_clastb_1.c"
-extern void abort (void) __attribute__ ((noreturn));
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
int a[N] = {
- 11, -12, 13, 14, 15, 16, 17, 18, 19, 20,
- 1, 2, -3, 4, 5, 6, 7, -8, 9, 10,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32
+ 11, -12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 1, 2, -3, 4, 5, 6, 7, -8, 9, 10,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32
};
int ret = condition_reduction (a, 1);
if (ret != 17)
- abort ();
+ __builtin_abort ();
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_2.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_2.c
index dcae41f5425..381cbd17577 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_2.c
@@ -1,15 +1,17 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
#if !defined(TYPE)
-#define TYPE unsigned int
+#define TYPE uint32_t
#endif
#define N 254
/* Non-simple condition reduction. */
-TYPE
+TYPE __attribute__ ((noinline, noclone))
condition_reduction (TYPE *a, TYPE min_v)
{
TYPE last = 65;
@@ -21,7 +23,4 @@ condition_reduction (TYPE *a, TYPE min_v)
return last;
}
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\tw[0-9]+, p[0-7]+, w[0-9]+, z[0-9]+\.s} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_2_run.c
index 0503ba36c3d..0d5187ba3ae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_2_run.c
@@ -1,25 +1,23 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_clastb_2.c"
-extern void abort (void) __attribute__ ((noreturn));
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
unsigned int a[N] = {
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32
};
- __builtin_memset (a+32, 43, (N-32)*sizeof (int));
+ __builtin_memset (a + 32, 43, (N - 32) * sizeof (int));
unsigned int ret = condition_reduction (a, 16);
if (ret != 10)
- abort ();
+ __builtin_abort ();
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_3.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_3.c
index 1061194a08e..90a3b938593 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_3.c
@@ -1,11 +1,8 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE unsigned char
+#define TYPE uint8_t
#include "sve_clastb_2.c"
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\tw[0-9]+, p[0-7]+, w[0-9]+, z[0-9]+\.b} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_3_run.c
index 90c3e4a0cf3..f90fbfc5e9b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_3_run.c
@@ -1,25 +1,23 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_clastb_3.c"
-extern void abort (void) __attribute__ ((noreturn));
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
unsigned char a[N] = {
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32
};
- __builtin_memset (a+32, 43, N-32);
+ __builtin_memset (a + 32, 43, N - 32);
unsigned char ret = condition_reduction (a, 16);
if (ret != 10)
- abort ();
+ __builtin_abort ();
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_4.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_4.c
index 698d958693a..dc01b21c273 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_4.c
@@ -1,11 +1,8 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE short
+#define TYPE int16_t
#include "sve_clastb_2.c"
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\tw[0-9]+, p[0-7], w[0-9]+, z[0-9]+\.h} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_4_run.c
index d0337ab300d..e17199f3672 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_4_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_4_run.c
@@ -5,7 +5,7 @@
extern void abort (void) __attribute__ ((noreturn));
-int
+int __attribute__ ((optimize (1)))
main (void)
{
short a[N] = {
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_5.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_5.c
index 655f95f410a..aef2a80c68f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_5.c
@@ -1,11 +1,8 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE long
+#define TYPE uint64_t
#include "sve_clastb_2.c"
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\tx[0-9]+, p[0-7], x[0-9]+, z[0-9]+\.d} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_5_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_5_run.c
index 573787233d8..e251db0bb76 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_5_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_5_run.c
@@ -1,25 +1,23 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_clastb_5.c"
-extern void abort (void) __attribute__ ((noreturn));
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
long a[N] = {
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32
};
- __builtin_memset (a+32, 43, (N-32)*sizeof (long));
+ __builtin_memset (a + 32, 43, (N - 32) * sizeof (long));
long ret = condition_reduction (a, 16);
if (ret != 10)
- abort ();
+ __builtin_abort ();
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_6.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_6.c
index bf1bc1a346a..93fec6396a2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_6.c
@@ -1,5 +1,5 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define N 32
@@ -9,7 +9,7 @@
/* Non-integer data types. */
-TYPE
+TYPE __attribute__ ((noinline, noclone))
condition_reduction (TYPE *a, TYPE min_v)
{
TYPE last = 0;
@@ -21,8 +21,4 @@ condition_reduction (TYPE *a, TYPE min_v)
return last;
}
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\ts[0-9]+, p[0-7], s[0-9]+, z[0-9]+\.s} } } */
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_6_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_6_run.c
index 4c760daba89..c204ed4c4f0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_6_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_6_run.c
@@ -1,24 +1,22 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_clastb_6.c"
-extern void abort (void) __attribute__ ((noreturn));
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
float a[N] = {
- 11.5, 12.2, 13.22, 14.1, 15.2, 16.3, 17, 18.7, 19, 20,
- 1, 2, 3.3, 4.3333, 5.5, 6.23, 7, 8.63, 9, 10.6,
- 21, 22.12, 23.55, 24.76, 25, 26, 27.34, 28.765, 29, 30,
- 31.111, 32.322
+ 11.5, 12.2, 13.22, 14.1, 15.2, 16.3, 17, 18.7, 19, 20,
+ 1, 2, 3.3, 4.3333, 5.5, 6.23, 7, 8.63, 9, 10.6,
+ 21, 22.12, 23.55, 24.76, 25, 26, 27.34, 28.765, 29, 30,
+ 31.111, 32.322
};
float ret = condition_reduction (a, 16.7);
- if (ret != (float)10.6)
- abort ();
+ if (ret != (float) 10.6)
+ __builtin_abort ();
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_7.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_7.c
index 12e53b75e8a..d232a87e41d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_7.c
@@ -1,11 +1,7 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE double
#include "sve_clastb_6.c"
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
-/* { dg-final { scan-tree-dump-times "Optimizing condition reduction with CLASTB." 2 "vect" } } */
/* { dg-final { scan-assembler {\tclastb\td[0-9]+, p[0-7], d[0-9]+, z[0-9]+\.d} } } */
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_clastb_7_run.c b/gcc/testsuite/gcc.target/aarch64/sve_clastb_7_run.c
index d0001a923e8..2f87a4766e0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_clastb_7_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_clastb_7_run.c
@@ -1,24 +1,22 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_clastb_7.c"
-extern void abort (void) __attribute__ ((noreturn));
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
double a[N] = {
- 11.5, 12.2, 13.22, 14.1, 15.2, 16.3, 17, 18.7, 19, 20,
- 1, 2, 3.3, 4.3333, 5.5, 6.23, 7, 8.63, 9, 10.6,
- 21, 22.12, 23.55, 24.76, 25, 26, 27.34, 28.765, 29, 30,
- 31.111, 32.322
+ 11.5, 12.2, 13.22, 14.1, 15.2, 16.3, 17, 18.7, 19, 20,
+ 1, 2, 3.3, 4.3333, 5.5, 6.23, 7, 8.63, 9, 10.6,
+ 21, 22.12, 23.55, 24.76, 25, 26, 27.34, 28.765, 29, 30,
+ 31.111, 32.322
};
double ret = condition_reduction (a, 16.7);
- if (ret != (double)10.6)
- abort ();
+ if (ret != 10.6)
+ __builtin_abort ();
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_1.C b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_1.C
index 4937e7f10e5..3f30a527cae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_1.C
+++ b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_1.C
@@ -1,15 +1,15 @@
/* { dg-do compile } */
/* { dg-options "-O2 -march=armv8-a+sve -msve-vector-bits=256" } */
-typedef signed char v32qi __attribute__((vector_size(32)));
+typedef signed char vnx16qi __attribute__((vector_size(32)));
-v32qi
-foo (v32qi x, v32qi y)
+vnx16qi
+foo (vnx16qi x, vnx16qi y)
{
- return (v32qi) { -1, 0, 0, -1, -1, -1, 0, 0,
- -1, -1, -1, -1, 0, 0, 0, 0,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 0, 0, 0, 0, 0, 0, 0, 0 } ? x : y;
+ return (vnx16qi) { -1, 0, 0, -1, -1, -1, 0, 0,
+ -1, -1, -1, -1, 0, 0, 0, 0,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0 } ? x : y;
}
/* { dg-final { scan-assembler {\tldr\tp[0-9]+,} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_2.C b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_2.C
index 3de4a8ccd00..ec8a0ab9d69 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_2.C
+++ b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_2.C
@@ -1,13 +1,13 @@
/* { dg-do compile } */
/* { dg-options "-O2 -march=armv8-a+sve -msve-vector-bits=256" } */
-typedef short v16hi __attribute__((vector_size(32)));
+typedef short vnx8hi __attribute__((vector_size(32)));
-v16hi
-foo (v16hi x, v16hi y)
+vnx8hi
+foo (vnx8hi x, vnx8hi y)
{
- return (v16hi) { -1, 0, 0, -1, -1, -1, 0, 0,
- -1, -1, -1, -1, 0, 0, 0, 0 } ? x : y;
+ return (vnx8hi) { -1, 0, 0, -1, -1, -1, 0, 0,
+ -1, -1, -1, -1, 0, 0, 0, 0 } ? x : y;
}
/* { dg-final { scan-assembler {\tldr\tp[0-9]+,} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_3.C b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_3.C
index 8185f7baa76..ab1429d4e40 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_3.C
+++ b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_3.C
@@ -1,12 +1,12 @@
/* { dg-do compile } */
/* { dg-options "-O2 -march=armv8-a+sve -msve-vector-bits=256" } */
-typedef int v8si __attribute__((vector_size(32)));
+typedef int vnx4si __attribute__((vector_size(32)));
-v8si
-foo (v8si x, v8si y)
+vnx4si
+foo (vnx4si x, vnx4si y)
{
- return (v8si) { -1, 0, 0, -1, -1, -1, 0, 0 } ? x : y;
+ return (vnx4si) { -1, 0, 0, -1, -1, -1, 0, 0 } ? x : y;
}
/* { dg-final { scan-assembler {\tldr\tp[0-9]+,} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_4.C b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_4.C
index b15da8a59e2..3ad39b9df7d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_const_pred_4.C
+++ b/gcc/testsuite/gcc.target/aarch64/sve_const_pred_4.C
@@ -1,12 +1,12 @@
/* { dg-do compile } */
/* { dg-options "-O2 -march=armv8-a+sve -msve-vector-bits=256" } */
-typedef long long v4di __attribute__((vector_size(32)));
+typedef long long vnx2di __attribute__((vector_size(32)));
-v4di
-foo (v4di x, v4di y)
+vnx2di
+foo (vnx2di x, vnx2di y)
{
- return (v4di) { -1, 0, 0, -1 } ? x : y;
+ return (vnx2di) { -1, 0, 0, -1 } ? x : y;
}
/* { dg-final { scan-assembler {\tldr\tp[0-9]+,} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_dup_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve_dup_lane_1.c
index ea977207226..8df86eb6b1b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_dup_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_dup_lane_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define MASK_2(X) X, X
#define MASK_4(X) MASK_2 (X), MASK_2 (X)
@@ -17,10 +17,10 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
#define MASK_16(X) MASK_8 (X), MASK_8 (X)
#define MASK_32(X) MASK_16 (X), MASK_16 (X)
-#define INDEX_4 v4di
-#define INDEX_8 v8si
-#define INDEX_16 v16hi
-#define INDEX_32 v32qi
+#define INDEX_4 vnx2di
+#define INDEX_8 vnx4si
+#define INDEX_16 vnx8hi
+#define INDEX_32 vnx16qi
#define DUP_LANE(TYPE, NUNITS, INDEX) \
TYPE dup_##INDEX##_##TYPE (TYPE values1, TYPE values2) \
@@ -30,27 +30,27 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v4di, 4, 0) \
- T (v4di, 4, 2) \
- T (v4di, 4, 3) \
- T (v8si, 8, 0) \
- T (v8si, 8, 5) \
- T (v8si, 8, 7) \
- T (v16hi, 16, 0) \
- T (v16hi, 16, 6) \
- T (v16hi, 16, 15) \
- T (v32qi, 32, 0) \
- T (v32qi, 32, 19) \
- T (v32qi, 32, 31) \
- T (v4df, 4, 0) \
- T (v4df, 4, 2) \
- T (v4df, 4, 3) \
- T (v8sf, 8, 0) \
- T (v8sf, 8, 5) \
- T (v8sf, 8, 7) \
- T (v16hf, 16, 0) \
- T (v16hf, 16, 6) \
- T (v16hf, 16, 15) \
+ T (vnx2di, 4, 0) \
+ T (vnx2di, 4, 2) \
+ T (vnx2di, 4, 3) \
+ T (vnx4si, 8, 0) \
+ T (vnx4si, 8, 5) \
+ T (vnx4si, 8, 7) \
+ T (vnx8hi, 16, 0) \
+ T (vnx8hi, 16, 6) \
+ T (vnx8hi, 16, 15) \
+ T (vnx16qi, 32, 0) \
+ T (vnx16qi, 32, 19) \
+ T (vnx16qi, 32, 31) \
+ T (vnx2df, 4, 0) \
+ T (vnx2df, 4, 2) \
+ T (vnx2df, 4, 3) \
+ T (vnx4sf, 8, 0) \
+ T (vnx4sf, 8, 5) \
+ T (vnx4sf, 8, 7) \
+ T (vnx8hf, 16, 0) \
+ T (vnx8hf, 16, 6) \
+ T (vnx8hf, 16, 15) \
TEST_ALL (DUP_LANE)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_ext_1.c b/gcc/testsuite/gcc.target/aarch64/sve_ext_1.c
index 1ec51aa2eaf..05bd6dc8f65 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_ext_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_ext_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define MASK_2(X) X, X + 1
#define MASK_4(X) MASK_2 (X), MASK_2 (X + 2)
@@ -17,10 +17,10 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
#define MASK_16(X) MASK_8 (X), MASK_8 (X + 8)
#define MASK_32(X) MASK_16 (X), MASK_16 (X + 16)
-#define INDEX_4 v4di
-#define INDEX_8 v8si
-#define INDEX_16 v16hi
-#define INDEX_32 v32qi
+#define INDEX_4 vnx2di
+#define INDEX_8 vnx4si
+#define INDEX_16 vnx8hi
+#define INDEX_32 vnx16qi
#define DUP_LANE(TYPE, NUNITS, INDEX) \
TYPE dup_##INDEX##_##TYPE (TYPE values1, TYPE values2) \
@@ -30,27 +30,27 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v4di, 4, 1) \
- T (v4di, 4, 2) \
- T (v4di, 4, 3) \
- T (v8si, 8, 1) \
- T (v8si, 8, 5) \
- T (v8si, 8, 7) \
- T (v16hi, 16, 1) \
- T (v16hi, 16, 6) \
- T (v16hi, 16, 15) \
- T (v32qi, 32, 1) \
- T (v32qi, 32, 19) \
- T (v32qi, 32, 31) \
- T (v4df, 4, 1) \
- T (v4df, 4, 2) \
- T (v4df, 4, 3) \
- T (v8sf, 8, 1) \
- T (v8sf, 8, 5) \
- T (v8sf, 8, 7) \
- T (v16hf, 16, 1) \
- T (v16hf, 16, 6) \
- T (v16hf, 16, 15) \
+ T (vnx2di, 4, 1) \
+ T (vnx2di, 4, 2) \
+ T (vnx2di, 4, 3) \
+ T (vnx4si, 8, 1) \
+ T (vnx4si, 8, 5) \
+ T (vnx4si, 8, 7) \
+ T (vnx8hi, 16, 1) \
+ T (vnx8hi, 16, 6) \
+ T (vnx8hi, 16, 15) \
+ T (vnx16qi, 32, 1) \
+ T (vnx16qi, 32, 19) \
+ T (vnx16qi, 32, 31) \
+ T (vnx2df, 4, 1) \
+ T (vnx2df, 4, 2) \
+ T (vnx2df, 4, 3) \
+ T (vnx4sf, 8, 1) \
+ T (vnx4sf, 8, 5) \
+ T (vnx4sf, 8, 7) \
+ T (vnx8hf, 16, 1) \
+ T (vnx8hf, 16, 6) \
+ T (vnx8hf, 16, 15) \
TEST_ALL (DUP_LANE)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_ext_2.c b/gcc/testsuite/gcc.target/aarch64/sve_ext_2.c
index b93574e50f7..047d4c59651 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_ext_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_ext_2.c
@@ -1,16 +1,16 @@
/* { dg-do compile } */
/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256" } */
-typedef int v8si __attribute__((vector_size (32)));
+typedef int vnx4si __attribute__((vector_size (32)));
void
foo (void)
{
- register v8si x asm ("z0");
- register v8si y asm ("z1");
+ register vnx4si x asm ("z0");
+ register vnx4si y asm ("z1");
asm volatile ("" : "=w" (y));
- x = __builtin_shuffle (y, y, (v8si) { 1, 2, 3, 4, 5, 6, 7, 8 });
+ x = __builtin_shuffle (y, y, (vnx4si) { 1, 2, 3, 4, 5, 6, 7, 8 });
asm volatile ("" :: "w" (x));
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_extract_1.c b/gcc/testsuite/gcc.target/aarch64/sve_extract_1.c
index 1ba277ffa6d..f9cd8d2998e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_extract_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_extract_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define EXTRACT(ELT_TYPE, TYPE, INDEX) \
ELT_TYPE permute_##TYPE##_##INDEX (void) \
@@ -20,39 +20,39 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (int64_t, v4di, 0) \
- T (int64_t, v4di, 1) \
- T (int64_t, v4di, 2) \
- T (int64_t, v4di, 3) \
- T (int32_t, v8si, 0) \
- T (int32_t, v8si, 1) \
- T (int32_t, v8si, 3) \
- T (int32_t, v8si, 4) \
- T (int32_t, v8si, 7) \
- T (int16_t, v16hi, 0) \
- T (int16_t, v16hi, 1) \
- T (int16_t, v16hi, 7) \
- T (int16_t, v16hi, 8) \
- T (int16_t, v16hi, 15) \
- T (int8_t, v32qi, 0) \
- T (int8_t, v32qi, 1) \
- T (int8_t, v32qi, 15) \
- T (int8_t, v32qi, 16) \
- T (int8_t, v32qi, 31) \
- T (double, v4df, 0) \
- T (double, v4df, 1) \
- T (double, v4df, 2) \
- T (double, v4df, 3) \
- T (float, v8sf, 0) \
- T (float, v8sf, 1) \
- T (float, v8sf, 3) \
- T (float, v8sf, 4) \
- T (float, v8sf, 7) \
- T (_Float16, v16hf, 0) \
- T (_Float16, v16hf, 1) \
- T (_Float16, v16hf, 7) \
- T (_Float16, v16hf, 8) \
- T (_Float16, v16hf, 15)
+ T (int64_t, vnx2di, 0) \
+ T (int64_t, vnx2di, 1) \
+ T (int64_t, vnx2di, 2) \
+ T (int64_t, vnx2di, 3) \
+ T (int32_t, vnx4si, 0) \
+ T (int32_t, vnx4si, 1) \
+ T (int32_t, vnx4si, 3) \
+ T (int32_t, vnx4si, 4) \
+ T (int32_t, vnx4si, 7) \
+ T (int16_t, vnx8hi, 0) \
+ T (int16_t, vnx8hi, 1) \
+ T (int16_t, vnx8hi, 7) \
+ T (int16_t, vnx8hi, 8) \
+ T (int16_t, vnx8hi, 15) \
+ T (int8_t, vnx16qi, 0) \
+ T (int8_t, vnx16qi, 1) \
+ T (int8_t, vnx16qi, 15) \
+ T (int8_t, vnx16qi, 16) \
+ T (int8_t, vnx16qi, 31) \
+ T (double, vnx2df, 0) \
+ T (double, vnx2df, 1) \
+ T (double, vnx2df, 2) \
+ T (double, vnx2df, 3) \
+ T (float, vnx4sf, 0) \
+ T (float, vnx4sf, 1) \
+ T (float, vnx4sf, 3) \
+ T (float, vnx4sf, 4) \
+ T (float, vnx4sf, 7) \
+ T (_Float16, vnx8hf, 0) \
+ T (_Float16, vnx8hf, 1) \
+ T (_Float16, vnx8hf, 7) \
+ T (_Float16, vnx8hf, 8) \
+ T (_Float16, vnx8hf, 15)
TEST_ALL (EXTRACT)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_extract_2.c b/gcc/testsuite/gcc.target/aarch64/sve_extract_2.c
index b163f28ef28..717546997b3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_extract_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_extract_2.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v8di __attribute__((vector_size (64)));
-typedef int32_t v16si __attribute__((vector_size (64)));
-typedef int16_t v32hi __attribute__((vector_size (64)));
-typedef int8_t v64qi __attribute__((vector_size (64)));
-typedef double v8df __attribute__((vector_size (64)));
-typedef float v16sf __attribute__((vector_size (64)));
-typedef _Float16 v32hf __attribute__((vector_size (64)));
+typedef int64_t vnx4di __attribute__((vector_size (64)));
+typedef int32_t vnx8si __attribute__((vector_size (64)));
+typedef int16_t vnx16hi __attribute__((vector_size (64)));
+typedef int8_t vnx32qi __attribute__((vector_size (64)));
+typedef double vnx4df __attribute__((vector_size (64)));
+typedef float vnx8sf __attribute__((vector_size (64)));
+typedef _Float16 vnx16hf __attribute__((vector_size (64)));
#define EXTRACT(ELT_TYPE, TYPE, INDEX) \
ELT_TYPE permute_##TYPE##_##INDEX (void) \
@@ -20,39 +20,39 @@ typedef _Float16 v32hf __attribute__((vector_size (64)));
}
#define TEST_ALL(T) \
- T (int64_t, v8di, 0) \
- T (int64_t, v8di, 1) \
- T (int64_t, v8di, 2) \
- T (int64_t, v8di, 7) \
- T (int32_t, v16si, 0) \
- T (int32_t, v16si, 1) \
- T (int32_t, v16si, 3) \
- T (int32_t, v16si, 4) \
- T (int32_t, v16si, 15) \
- T (int16_t, v32hi, 0) \
- T (int16_t, v32hi, 1) \
- T (int16_t, v32hi, 7) \
- T (int16_t, v32hi, 8) \
- T (int16_t, v32hi, 31) \
- T (int8_t, v64qi, 0) \
- T (int8_t, v64qi, 1) \
- T (int8_t, v64qi, 15) \
- T (int8_t, v64qi, 16) \
- T (int8_t, v64qi, 63) \
- T (double, v8df, 0) \
- T (double, v8df, 1) \
- T (double, v8df, 2) \
- T (double, v8df, 7) \
- T (float, v16sf, 0) \
- T (float, v16sf, 1) \
- T (float, v16sf, 3) \
- T (float, v16sf, 4) \
- T (float, v16sf, 15) \
- T (_Float16, v32hf, 0) \
- T (_Float16, v32hf, 1) \
- T (_Float16, v32hf, 7) \
- T (_Float16, v32hf, 8) \
- T (_Float16, v32hf, 31)
+ T (int64_t, vnx4di, 0) \
+ T (int64_t, vnx4di, 1) \
+ T (int64_t, vnx4di, 2) \
+ T (int64_t, vnx4di, 7) \
+ T (int32_t, vnx8si, 0) \
+ T (int32_t, vnx8si, 1) \
+ T (int32_t, vnx8si, 3) \
+ T (int32_t, vnx8si, 4) \
+ T (int32_t, vnx8si, 15) \
+ T (int16_t, vnx16hi, 0) \
+ T (int16_t, vnx16hi, 1) \
+ T (int16_t, vnx16hi, 7) \
+ T (int16_t, vnx16hi, 8) \
+ T (int16_t, vnx16hi, 31) \
+ T (int8_t, vnx32qi, 0) \
+ T (int8_t, vnx32qi, 1) \
+ T (int8_t, vnx32qi, 15) \
+ T (int8_t, vnx32qi, 16) \
+ T (int8_t, vnx32qi, 63) \
+ T (double, vnx4df, 0) \
+ T (double, vnx4df, 1) \
+ T (double, vnx4df, 2) \
+ T (double, vnx4df, 7) \
+ T (float, vnx8sf, 0) \
+ T (float, vnx8sf, 1) \
+ T (float, vnx8sf, 3) \
+ T (float, vnx8sf, 4) \
+ T (float, vnx8sf, 15) \
+ T (_Float16, vnx16hf, 0) \
+ T (_Float16, vnx16hf, 1) \
+ T (_Float16, vnx16hf, 7) \
+ T (_Float16, vnx16hf, 8) \
+ T (_Float16, vnx16hf, 31)
TEST_ALL (EXTRACT)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_extract_3.c b/gcc/testsuite/gcc.target/aarch64/sve_extract_3.c
index 87ac2351768..19a22cdd7b7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_extract_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_extract_3.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v16di __attribute__((vector_size (128)));
-typedef int32_t v32si __attribute__((vector_size (128)));
-typedef int16_t v64hi __attribute__((vector_size (128)));
-typedef int8_t v128qi __attribute__((vector_size (128)));
-typedef double v16df __attribute__((vector_size (128)));
-typedef float v32sf __attribute__((vector_size (128)));
-typedef _Float16 v64hf __attribute__((vector_size (128)));
+typedef int64_t vnx8di __attribute__((vector_size (128)));
+typedef int32_t vnx16si __attribute__((vector_size (128)));
+typedef int16_t vnx32hi __attribute__((vector_size (128)));
+typedef int8_t vnx64qi __attribute__((vector_size (128)));
+typedef double vnx8df __attribute__((vector_size (128)));
+typedef float vnx16sf __attribute__((vector_size (128)));
+typedef _Float16 vnx32hf __attribute__((vector_size (128)));
#define EXTRACT(ELT_TYPE, TYPE, INDEX) \
ELT_TYPE permute_##TYPE##_##INDEX (void) \
@@ -20,60 +20,60 @@ typedef _Float16 v64hf __attribute__((vector_size (128)));
}
#define TEST_ALL(T) \
- T (int64_t, v16di, 0) \
- T (int64_t, v16di, 1) \
- T (int64_t, v16di, 2) \
- T (int64_t, v16di, 7) \
- T (int64_t, v16di, 8) \
- T (int64_t, v16di, 9) \
- T (int64_t, v16di, 15) \
- T (int32_t, v32si, 0) \
- T (int32_t, v32si, 1) \
- T (int32_t, v32si, 3) \
- T (int32_t, v32si, 4) \
- T (int32_t, v32si, 15) \
- T (int32_t, v32si, 16) \
- T (int32_t, v32si, 21) \
- T (int32_t, v32si, 31) \
- T (int16_t, v64hi, 0) \
- T (int16_t, v64hi, 1) \
- T (int16_t, v64hi, 7) \
- T (int16_t, v64hi, 8) \
- T (int16_t, v64hi, 31) \
- T (int16_t, v64hi, 32) \
- T (int16_t, v64hi, 47) \
- T (int16_t, v64hi, 63) \
- T (int8_t, v128qi, 0) \
- T (int8_t, v128qi, 1) \
- T (int8_t, v128qi, 15) \
- T (int8_t, v128qi, 16) \
- T (int8_t, v128qi, 63) \
- T (int8_t, v128qi, 64) \
- T (int8_t, v128qi, 100) \
- T (int8_t, v128qi, 127) \
- T (double, v16df, 0) \
- T (double, v16df, 1) \
- T (double, v16df, 2) \
- T (double, v16df, 7) \
- T (double, v16df, 8) \
- T (double, v16df, 9) \
- T (double, v16df, 15) \
- T (float, v32sf, 0) \
- T (float, v32sf, 1) \
- T (float, v32sf, 3) \
- T (float, v32sf, 4) \
- T (float, v32sf, 15) \
- T (float, v32sf, 16) \
- T (float, v32sf, 21) \
- T (float, v32sf, 31) \
- T (_Float16, v64hf, 0) \
- T (_Float16, v64hf, 1) \
- T (_Float16, v64hf, 7) \
- T (_Float16, v64hf, 8) \
- T (_Float16, v64hf, 31) \
- T (_Float16, v64hf, 32) \
- T (_Float16, v64hf, 47) \
- T (_Float16, v64hf, 63)
+ T (int64_t, vnx8di, 0) \
+ T (int64_t, vnx8di, 1) \
+ T (int64_t, vnx8di, 2) \
+ T (int64_t, vnx8di, 7) \
+ T (int64_t, vnx8di, 8) \
+ T (int64_t, vnx8di, 9) \
+ T (int64_t, vnx8di, 15) \
+ T (int32_t, vnx16si, 0) \
+ T (int32_t, vnx16si, 1) \
+ T (int32_t, vnx16si, 3) \
+ T (int32_t, vnx16si, 4) \
+ T (int32_t, vnx16si, 15) \
+ T (int32_t, vnx16si, 16) \
+ T (int32_t, vnx16si, 21) \
+ T (int32_t, vnx16si, 31) \
+ T (int16_t, vnx32hi, 0) \
+ T (int16_t, vnx32hi, 1) \
+ T (int16_t, vnx32hi, 7) \
+ T (int16_t, vnx32hi, 8) \
+ T (int16_t, vnx32hi, 31) \
+ T (int16_t, vnx32hi, 32) \
+ T (int16_t, vnx32hi, 47) \
+ T (int16_t, vnx32hi, 63) \
+ T (int8_t, vnx64qi, 0) \
+ T (int8_t, vnx64qi, 1) \
+ T (int8_t, vnx64qi, 15) \
+ T (int8_t, vnx64qi, 16) \
+ T (int8_t, vnx64qi, 63) \
+ T (int8_t, vnx64qi, 64) \
+ T (int8_t, vnx64qi, 100) \
+ T (int8_t, vnx64qi, 127) \
+ T (double, vnx8df, 0) \
+ T (double, vnx8df, 1) \
+ T (double, vnx8df, 2) \
+ T (double, vnx8df, 7) \
+ T (double, vnx8df, 8) \
+ T (double, vnx8df, 9) \
+ T (double, vnx8df, 15) \
+ T (float, vnx16sf, 0) \
+ T (float, vnx16sf, 1) \
+ T (float, vnx16sf, 3) \
+ T (float, vnx16sf, 4) \
+ T (float, vnx16sf, 15) \
+ T (float, vnx16sf, 16) \
+ T (float, vnx16sf, 21) \
+ T (float, vnx16sf, 31) \
+ T (_Float16, vnx32hf, 0) \
+ T (_Float16, vnx32hf, 1) \
+ T (_Float16, vnx32hf, 7) \
+ T (_Float16, vnx32hf, 8) \
+ T (_Float16, vnx32hf, 31) \
+ T (_Float16, vnx32hf, 32) \
+ T (_Float16, vnx32hf, 47) \
+ T (_Float16, vnx32hf, 63)
TEST_ALL (EXTRACT)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fdiv_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fdiv_1.c
index b193726ea0a..5934b2dfb12 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fdiv_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fdiv_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vdiv_##TYPE (TYPE *x, TYPE y) \
@@ -29,9 +29,9 @@ void vdivr_##TYPE (TYPE *x, TYPE y) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfdiv\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h, z[0-9]+\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfdivr\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h, z[0-9]+\.h\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fmad_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fmad_1.c
index 2b1dbb087bc..7b1575f9ee4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fmad_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fmad_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfmad\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfmad\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fmla_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fmla_1.c
index d5e4df266bf..381af4c8517 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fmla_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fmla_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfmla\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfmla\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fmls_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fmls_1.c
index c3f2c8a5823..744d0bb7bcc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fmls_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fmls_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfmls\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfmls\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fmsb_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fmsb_1.c
index 30e1895c8d5..e1251bd9cf6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fmsb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fmsb_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfmsb\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfmsb\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fnmad_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fnmad_1.c
index 84a95187314..238bd852117 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fnmad_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fnmad_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfnmad\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfnmad\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fnmla_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fnmla_1.c
index dcc4811f1d8..f258a7454da 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fnmla_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fnmla_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfnmla\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfnmla\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fnmls_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fnmls_1.c
index 7a89399f4be..4d859d4b0a1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fnmls_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fnmls_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfnmls\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfnmls\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_fnmsb_1.c b/gcc/testsuite/gcc.target/aarch64/sve_fnmsb_1.c
index 6c95b0abc8e..2510a6f2831 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_fnmsb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_fnmsb_1.c
@@ -1,9 +1,9 @@
/* { dg-do assemble } */
/* { dg-options " -O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-typedef _Float16 v16hf __attribute__((vector_size(32)));
-typedef float v8sf __attribute__((vector_size(32)));
-typedef double v4df __attribute__((vector_size(32)));
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef double vnx2df __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -20,9 +20,9 @@ void vmad##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v16hf)
-DO_OP (v8sf)
-DO_OP (v4df)
+DO_OP (vnx8hf)
+DO_OP (vnx4sf)
+DO_OP (vnx2df)
/* { dg-final { scan-assembler-times {\tfnmsb\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfnmsb\tz0\.s, p[0-7]/m, z2\.s, z4\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_1.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_1.c
index 096a969d756..6ed5c06bd51 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_1.c
@@ -1,72 +1,32 @@
/* { dg-do assemble } */
-/* { dg-options "-O3 -march=armv8-a+sve --save-temps" } */
-
-void gather_load64(unsigned long * restrict dst, unsigned long * restrict src, unsigned long * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load32(unsigned int * restrict dst, unsigned int * restrict src, unsigned int * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load16(unsigned short * restrict dst, unsigned short * restrict src, unsigned short * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load8(unsigned char * restrict dst, unsigned char * restrict src, unsigned char * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load64s(signed long * restrict dst, signed long * restrict src, unsigned long * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load32s(signed int * restrict dst, signed int * restrict src, unsigned int * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load16s(signed short * restrict dst, signed short * restrict src, unsigned short * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load8s(signed char * restrict dst, signed char * restrict src, unsigned char * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load_double(double * restrict dst, double * restrict src, unsigned long * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load_float(float * restrict dst, float * restrict src, unsigned int * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, lsl 3\\\]" 3 } } */
-/* { dg-final { scan-assembler-not "ld1d\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 3\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1w\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, sxtw 2\\\]" } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, uxtw 2\\\]" 3 } } */
-/* { dg-final { scan-assembler-not "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1h\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, sxtw 1\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1h\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 1\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1b\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, sxtw\\\ ]" } } */
-/* { dg-final { scan-assembler-not "ld1b\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\ ]" } } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ INDEX##BITS *indices, int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ dest[i] += src[indices[i]]; \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, 32) \
+ T (uint32_t, 32) \
+ T (float, 32) \
+ T (int64_t, 64) \
+ T (uint64_t, 64) \
+ T (double, 64)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_10.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_10.c
deleted file mode 100644
index b31b4508114..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_10.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/* { dg-do assemble } */
-/* { dg-options "-O3 -march=armv8-a+sve --save-temps" } */
-
-void gather_load64(unsigned long * restrict dst, unsigned long * restrict src, signed long * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load32(unsigned int * restrict dst, unsigned int * restrict src, signed int * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load16(unsigned short * restrict dst, unsigned short * restrict src, signed short * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load8(unsigned char * restrict dst, unsigned char * restrict src, signed char * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load64s(signed long * restrict dst, signed long * restrict src, signed long * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load32s(signed int * restrict dst, signed int * restrict src, signed int * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load16s(signed short * restrict dst, signed short * restrict src, signed short * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load8s(signed char * restrict dst, signed char * restrict src, signed char * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load_double(double * restrict dst, double * restrict src, signed long * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_load_float(float * restrict dst, float * restrict src, signed int * restrict indices, int count)
-{
- for (int i=0; i<count; i++)
- dst[i] = src[indices[i]];
-}
-
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, lsl 3\\\]" 3 } } */
-/* { dg-final { scan-assembler-not "ld1d\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 3\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1w\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, sxtw 2\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, uxtw 2\\\]" } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 3 } } */
-/* { dg-final { scan-assembler-not "ld1h\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, sxtw 1\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1h\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 1\\\]" } } */
-/* { dg-final { scan-assembler-not "ld1b\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, sxtw\\\ ]" } } */
-/* { dg-final { scan-assembler-not "ld1b\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\ ]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_11.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_11.c
deleted file mode 100644
index d8a85396eb4..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_11.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/* { dg-do assemble } */
-/* { dg-options "-O3 -march=armv8-a+sve --save-temps" } */
-
-void
-f (double *restrict a, double *restrict b, short *c, int *d, int n)
-{
- for (int i = 0; i < n; i++)
- a[i] = b[c[i] + d[i]];
-}
-
-/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+.h,} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+.s,} 2 } } */
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+.d,} 4 } } */
-/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+.d,} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_2.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_2.c
index 9b62b12904e..4e348db3bf1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_2.c
@@ -1,72 +1,10 @@
/* { dg-do assemble } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-void gather_loadu64_s16(unsigned long * restrict dst, unsigned long * restrict src,
- short int * restrict indices, short n)
-{
- for (short i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
-void gather_loadu64_u16(unsigned long * restrict dst, unsigned long * restrict src,
- unsigned short int * restrict indices, short n)
-{
- for (short i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
+#include "sve_gather_load_1.c"
-void gather_loadd_s16(double * restrict dst, double * restrict src,
- short * restrict indices, short n)
-{
- for (short i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_loadd_u16(double * restrict dst, double * restrict src,
- unsigned short * restrict indices, short n)
-{
- for (short i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_loadu64_s32(unsigned long * restrict dst, unsigned long * restrict src,
- int * restrict indices, int n)
-{
- for (int i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_loadu64_u32(unsigned long * restrict dst, unsigned long * restrict src,
- unsigned int * restrict indices, int n)
-{
- for (int i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_loadd_s32(double * restrict dst, double * restrict src,
- int * restrict indices, int n)
-{
- for (int i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
-
-void gather_loadd_u32(double * restrict dst, double * restrict src,
- unsigned int * restrict indices, int n)
-{
- for (int i=0; i<n; i++)
- dst[i] = src[indices[i]];
-}
-
-/* At present we only use unpacks for the 32/64 combinations. */
-/* { dg-final { scan-assembler-times {\tpunpklo\tp[0-9]+\.h, p[0-9]+\.b} 4 } } */
-/* { dg-final { scan-assembler-times {\tpunpkhi\tp[0-9]+\.h, p[0-9]+\.b} 4 } } */
-
-/* { dg-final { scan-assembler-times {\tsunpklo\tz[0-9]+\.s, z[0-9]+\.h} 2 } } */
-/* { dg-final { scan-assembler-times {\tsunpkhi\tz[0-9]+\.s, z[0-9]+\.h} 2 } } */
-/* { dg-final { scan-assembler-times {\tsunpklo\tz[0-9]+\.d, z[0-9]+\.s} 6 } } */
-/* { dg-final { scan-assembler-times {\tsunpkhi\tz[0-9]+\.d, z[0-9]+\.s} 6 } } */
-/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.s, z[0-9]+\.h} 2 } } */
-/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.s, z[0-9]+\.h} 2 } } */
-/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.d, z[0-9]+\.s} 6 } } */
-/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.d, z[0-9]+\.s} 6 } } */
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]} 24 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, uxtw 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3.c
index 0a8f802ce56..a113a0faeb9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3.c
@@ -1,45 +1,32 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math --save-temps" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TEST_LOOP(NAME, DATA_TYPE, INDEX_TYPE) \
- DATA_TYPE __attribute__ ((noinline)) \
- NAME (char *data, INDEX_TYPE *indices, int n) \
- { \
- DATA_TYPE sum = 0; \
- for (int i = 0; i < n; ++i) \
- sum += *(DATA_TYPE *) (data + indices[i]); \
- return sum; \
- }
+#include <stdint.h>
-#define TEST32(NAME, DATA_TYPE) \
- TEST_LOOP (NAME ## _u8, DATA_TYPE, unsigned char) \
- TEST_LOOP (NAME ## _u16, DATA_TYPE, unsigned short) \
- TEST_LOOP (NAME ## _u32, DATA_TYPE, unsigned int) \
- TEST_LOOP (NAME ## _s8, DATA_TYPE, signed char) \
- TEST_LOOP (NAME ## _s16, DATA_TYPE, signed short) \
- TEST_LOOP (NAME ## _s32, DATA_TYPE, signed int)
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
-#define TEST64(NAME, DATA_TYPE) \
- TEST_LOOP (NAME ## _s8, DATA_TYPE, signed char) \
- TEST_LOOP (NAME ## _u8, DATA_TYPE, unsigned char) \
- TEST_LOOP (NAME ## _s16, DATA_TYPE, short) \
- TEST_LOOP (NAME ## _u16, DATA_TYPE, unsigned short) \
- TEST_LOOP (NAME ## _s32, DATA_TYPE, int) \
- TEST_LOOP (NAME ## _u32, DATA_TYPE, unsigned int) \
- TEST_LOOP (NAME ## _s64, DATA_TYPE, long) \
- TEST_LOOP (NAME ## _u64, DATA_TYPE, unsigned long)
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ INDEX##BITS *indices, int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ dest[i] += *(DATA_TYPE *) ((char *) src + indices[i]); \
+ }
-TEST32 (f_s32, int)
-TEST32 (f_u32, unsigned int)
-TEST32 (f_f32, float)
+#define TEST_ALL(T) \
+ T (int32_t, 32) \
+ T (uint32_t, 32) \
+ T (float, 32) \
+ T (int64_t, 64) \
+ T (uint64_t, 64) \
+ T (double, 64)
-TEST64 (f_s64, long)
-TEST64 (f_u64, unsigned long)
-TEST64 (f_f64, double)
+TEST_ALL (TEST_LOOP)
-/* (4 + 2 + 1) * 3 */
-/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, sxtw\]} 21 } } */
-/* (4 + 2 + 1) * 3 */
-/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, uxtw\]} 21 } } */
-/* (8 + 8 + 4 + 4 + 2 + 2 + 1 + 1) * 3 */
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d\]} 90 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3_run.c
deleted file mode 100644
index baa90d5d5fc..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_3_run.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math" } */
-
-#include "sve_gather_load_3.c"
-
-extern void abort (void);
-
-#define N 57
-
-#undef TEST_LOOP
-#define TEST_LOOP(NAME, DATA_TYPE, INDEX_TYPE) \
- { \
- INDEX_TYPE indices[N]; \
- DATA_TYPE data[N * 2]; \
- for (int i = 0; i < N * 2; ++i) \
- data[i] = (i / 2) * 4 + i % 2; \
- DATA_TYPE sum = 0; \
- for (int i = 0; i < N; ++i) \
- { \
- INDEX_TYPE j = (i * 3 / 2) * sizeof (DATA_TYPE); \
- j &= (1ULL << (sizeof (INDEX_TYPE) * 8 - 1)) - 1; \
- sum += data[j / sizeof (DATA_TYPE)]; \
- indices[i] = j; \
- } \
- DATA_TYPE res = NAME ((char *) data, indices, N); \
- if (res != sum) \
- abort (); \
- }
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST32 (f_s32, int)
- TEST32 (f_u32, unsigned int)
- TEST32 (f_f32, float)
-
- TEST64 (f_s64, long)
- TEST64 (f_u64, unsigned long)
- TEST64 (f_f64, double)
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4.c
index 4d0da987d30..5382e523689 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4.c
@@ -1,18 +1,10 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math --save-temps" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TEST_LOOP(NAME, TYPE) \
- TYPE __attribute__ ((noinline)) \
- NAME (TYPE **indices, int n) \
- { \
- TYPE sum = 0; \
- for (int i = 0; i < n; ++i) \
- sum += *indices[i]; \
- return sum; \
- }
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
-TEST_LOOP (f_s64, long)
-TEST_LOOP (f_u64, unsigned long)
-TEST_LOOP (f_f64, double)
+#include "sve_gather_load_3.c"
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[z[0-9]+\.d\]} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, uxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4_run.c
deleted file mode 100644
index 00d3dea6acd..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_4_run.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math" } */
-
-#include "sve_gather_load_4.c"
-
-extern void abort (void);
-
-#define N 57
-
-#undef TEST_LOOP
-#define TEST_LOOP(NAME, TYPE) \
- { \
- TYPE *ptrs[N]; \
- TYPE data[N * 2]; \
- for (int i = 0; i < N * 2; ++i) \
- data[i] = (i / 2) * 4 + i % 2; \
- TYPE sum = 0; \
- for (int i = 0; i < N; ++i) \
- { \
- ptrs[i] = &data[i * 3 / 2]; \
- sum += *ptrs[i]; \
- } \
- TYPE res = NAME (ptrs, N); \
- if (res != sum) \
- abort (); \
- }
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST_LOOP (f_s64, long)
- TEST_LOOP (f_u64, unsigned long)
- TEST_LOOP (f_f64, double)
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5.c
index 0aaf9553a11..8e4f689243b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5.c
@@ -1,113 +1,23 @@
/* { dg-do assemble } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define GATHER_LOAD1(OBJTYPE,STRIDETYPE,STRIDE)\
-void gather_load1##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- dst[i] = src[i * STRIDE];\
-}
-
-#define GATHER_LOAD2(OBJTYPE,STRIDETYPE)\
-void gather_load2##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- dst[i] = src[i * stride];\
-}
-
-#define GATHER_LOAD3(OBJTYPE,STRIDETYPE)\
-void gather_load3s5##OBJTYPE##STRIDETYPE\
- (OBJTYPE * restrict d1, OBJTYPE * restrict d2, OBJTYPE * restrict d3,\
- OBJTYPE * restrict d4, OBJTYPE * restrict d5, OBJTYPE * restrict src,\
- STRIDETYPE count)\
-{\
- const STRIDETYPE STRIDE = 5;\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- d1[i] = src[0 + (i * STRIDE)];\
- d2[i] = src[1 + (i * STRIDE)];\
- d3[i] = src[2 + (i * STRIDE)];\
- d4[i] = src[3 + (i * STRIDE)];\
- d5[i] = src[4 + (i * STRIDE)];\
- }\
-}
-
-#define GATHER_LOAD4(OBJTYPE,STRIDETYPE,STRIDE)\
-void gather_load4##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- *dst = *src;\
- dst += 1;\
- src += STRIDE;\
- }\
-}
-
-#define GATHER_LOAD5(OBJTYPE,STRIDETYPE)\
-void gather_load5##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- *dst = *src;\
- dst += 1;\
- src += stride;\
- }\
-}
-
-GATHER_LOAD1 (double, long, 5)
-GATHER_LOAD1 (double, long, 8)
-GATHER_LOAD1 (double, long, 21)
-GATHER_LOAD1 (double, long, 1009)
-
-GATHER_LOAD1 (float, int, 5)
-GATHER_LOAD1 (float, int, 8)
-GATHER_LOAD1 (float, int, 21)
-GATHER_LOAD1 (float, int, 1009)
-
-GATHER_LOAD2 (double, long)
-GATHER_LOAD2 (float, int)
-
-GATHER_LOAD3 (double, long)
-GATHER_LOAD3 (float, int)
-
-GATHER_LOAD4 (double, long, 5)
-
-/* NOTE: We can't vectorize GATHER_LOAD4 (float, int, 5) because we can't prove
- that the offsets used for the gather load won't overflow. */
-
-GATHER_LOAD5 (double, long)
-GATHER_LOAD5 (float, int)
-
-/* Widened forms. */
-GATHER_LOAD1 (double, int, 5)
-GATHER_LOAD1 (double, int, 8)
-GATHER_LOAD1 (double, short, 5)
-GATHER_LOAD1 (double, short, 8)
-
-GATHER_LOAD1 (float, short, 5)
-GATHER_LOAD1 (float, short, 8)
-
-GATHER_LOAD2 (double, int)
-GATHER_LOAD2 (float, short)
-
-GATHER_LOAD4 (double, int, 5)
-GATHER_LOAD4 (float, short, 5)
-
-GATHER_LOAD5 (double, int)
-
-/* TODO: We generate abysmal code for this even though we don't use gathers. */
-/*GATHER_LOAD5 (float, short)*/
-
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d\\\]" 19 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 12 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\]" 3 } } */
+#include <stdint.h>
+
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict *src, \
+ int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ dest[i] += *src[i]; \
+ }
+
+#define TEST_ALL(T) \
+ T (int64_t) \
+ T (uint64_t) \
+ T (double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[z[0-9]+.d\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5_run.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5_run.c
deleted file mode 100644
index 7608f9b569b..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_5_run.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include <unistd.h>
-
-extern void abort (void);
-extern void *memset(void *, int, size_t);
-
-#include "sve_gather_load_5.c"
-
-#define NUM_DST_ELEMS 13
-#define NUM_SRC_ELEMS(STRIDE) (NUM_DST_ELEMS * STRIDE)
-
-#define TEST_GATHER_LOAD_COMMON1(FUN,OBJTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS (STRIDE)) * sizeof (OBJTYPE));\
- memset (real_dst, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- src[i * STRIDE] = i;\
- FUN##OBJTYPE##STRIDETYPE##STRIDE \
- (dst, src, NUM_DST_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- if (dst[i] != i)\
- abort ();\
-}
-
-#define TEST_GATHER_LOAD_COMMON2(FUN,OBJTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS (STRIDE)) * sizeof (OBJTYPE));\
- memset (real_dst, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- src[i * STRIDE] = i;\
- FUN##OBJTYPE##STRIDETYPE \
- (dst, src, STRIDE, NUM_DST_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- if (dst[i] != i)\
- abort ();\
-}
-
-#define TEST_GATHER_LOAD1(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_GATHER_LOAD_COMMON1 (gather_load1, OBJTYPE, STRIDETYPE, STRIDE)
-
-#define TEST_GATHER_LOAD2(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_GATHER_LOAD_COMMON2 (gather_load2, OBJTYPE, STRIDETYPE, STRIDE)
-
-#define TEST_GATHER_LOAD3(OBJTYPE,STRIDETYPE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS (5)]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst1[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst2[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst3[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst4[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst5[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS (5)) * sizeof (OBJTYPE));\
- memset (real_dst1, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst2, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst3, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst4, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst5, 0, (1 + NUM_DST_ELEMS) * sizeof (OBJTYPE));\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst1 = &real_dst1[1];\
- OBJTYPE *dst2 = &real_dst2[1];\
- OBJTYPE *dst3 = &real_dst3[1];\
- OBJTYPE *dst4 = &real_dst4[1];\
- OBJTYPE *dst5 = &real_dst5[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS (5); i++)\
- src[i] = i;\
- gather_load3s5##OBJTYPE##STRIDETYPE \
- (dst1, dst2, dst3, dst4, dst5, src, NUM_DST_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- {\
- STRIDETYPE base = i * 5;\
- if (dst1[i] != base)\
- abort ();\
- if (dst2[i] != (base + 1))\
- abort ();\
- if (dst3[i] != (base + 2))\
- abort ();\
- if (dst4[i] != (base + 3))\
- abort ();\
- if (dst5[i] != (base + 4))\
- abort ();\
- }\
-}
-
-#define TEST_GATHER_LOAD4(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_GATHER_LOAD_COMMON1 (gather_load4, OBJTYPE, STRIDETYPE, STRIDE)
-
-#define TEST_GATHER_LOAD5(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_GATHER_LOAD_COMMON2 (gather_load5, OBJTYPE, STRIDETYPE, STRIDE)
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST_GATHER_LOAD1 (double, long, 5);
- TEST_GATHER_LOAD1 (double, long, 8);
- TEST_GATHER_LOAD1 (double, long, 21);
-
- TEST_GATHER_LOAD1 (float, int, 5);
- TEST_GATHER_LOAD1 (float, int, 8);
- TEST_GATHER_LOAD1 (float, int, 21);
-
- TEST_GATHER_LOAD2 (double, long, 5);
- TEST_GATHER_LOAD2 (double, long, 8);
- TEST_GATHER_LOAD2 (double, long, 21);
-
- TEST_GATHER_LOAD2 (float, int, 5);
- TEST_GATHER_LOAD2 (float, int, 8);
- TEST_GATHER_LOAD2 (float, int, 21);
-
- TEST_GATHER_LOAD3 (double, long);
- TEST_GATHER_LOAD3 (float, int);
-
- TEST_GATHER_LOAD4 (double, long, 5);
-
- TEST_GATHER_LOAD5 (double, long, 5);
- TEST_GATHER_LOAD5 (float, int, 5);
-
- /* Widened forms. */
- TEST_GATHER_LOAD1 (double, int, 5)
- TEST_GATHER_LOAD1 (double, int, 8)
- TEST_GATHER_LOAD1 (double, short, 5)
- TEST_GATHER_LOAD1 (double, short, 8)
-
- TEST_GATHER_LOAD1 (float, short, 5)
- TEST_GATHER_LOAD1 (float, short, 8)
-
- TEST_GATHER_LOAD2 (double, int, 5);
- TEST_GATHER_LOAD2 (double, int, 8);
- TEST_GATHER_LOAD2 (double, int, 21);
-
- TEST_GATHER_LOAD2 (float, short, 5);
- TEST_GATHER_LOAD2 (float, short, 8);
- TEST_GATHER_LOAD2 (float, short, 21);
-
- TEST_GATHER_LOAD4 (double, int, 5);
- TEST_GATHER_LOAD4 (float, short, 5);
-
- TEST_GATHER_LOAD5 (double, int, 5);
-
- return 0;
-}
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_6.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_6.c
index 68b0b4d59b6..745e00f1e50 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_6.c
@@ -1,14 +1,36 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
-
-void
-foo (double *__restrict y, double *__restrict x1,
- double *__restrict x2, int m)
-{
- for (int i = 0; i < 256; ++i)
- y[i * m] = x1[i * m] + x2[i * m];
-}
-
-/* { dg-final { scan-assembler-times {\tindex\tz[0-9]+\.d, #0, x[0-9]+} 1 } } */
-/* { dg-final { scan-assembler-times {\tadd\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d} 1 } } */
-/* { dg-final { scan-assembler-not {\torr\tz[0-9]+} } } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -fwrapv -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX16 int16_t
+#define INDEX32 int32_t
+#endif
+
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ INDEX##BITS *indices, INDEX##BITS mask, int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ dest[i] = src[(INDEX##BITS) (indices[i] | mask)]; \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, 16) \
+ T (uint32_t, 16) \
+ T (float, 16) \
+ T (int64_t, 32) \
+ T (uint64_t, 32) \
+ T (double, 32)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tsunpkhi\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tsunpklo\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tsunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tsunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_7.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_7.c
index 788aeb08df2..8f2dfb75149 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_7.c
@@ -1,15 +1,15 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-void
-foo (double *x, int m)
-{
- for (int i = 0; i < 256; ++i)
- x[i * m] += x[i * m];
-}
+#define INDEX16 uint16_t
+#define INDEX32 uint32_t
-/* { dg-final { scan-assembler-times {\tcbz\tw1,} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, } 1 } } */
-/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, } 1 } } */
-/* { dg-final { scan-assembler-times {\tldr\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tstr\t} 1 } } */
+#include "sve_gather_load_6.c"
+
+/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* Either extension type is OK here. */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, [us]xtw 2\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_8.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_8.c
deleted file mode 100644
index 0c0cf73be55..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_8.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
-
-void
-f (int *__restrict a,
- int *__restrict b,
- int *__restrict c,
- int count)
-{
- for (int i = 0; i < count; ++i)
- a[i] = (b[i * 4] + b[i * 4 + 1] + b[i * 4 + 2]
- + c[i * 5] + c[i * 5 + 3]);
-}
-
-/* There must be a final scalar iteration because b[(count - 1) * 4 + 3]
- is not accessed by the original code. */
-/* { dg-final { scan-assembler-times {\tld4w\t{z[0-9]+.*}} 1 } } */
-/* { dg-final { scan-assembler {\tldr\t} } } */
-/* { dg-final { scan-assembler {\tstr\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_9.c b/gcc/testsuite/gcc.target/aarch64/sve_gather_load_9.c
deleted file mode 100644
index dad798c8106..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_gather_load_9.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
-
-void
-f (int *__restrict a,
- int *__restrict b,
- int *__restrict c,
- int count)
-{
- for (int i = 0; i < count; ++i)
- a[i] = (b[i * 4] + b[i * 4 + 1] + b[i * 4 + 3]
- + c[i * 5] + c[i * 5 + 3]);
-}
-
-/* There's no need for a scalar tail here. */
-/* { dg-final { scan-assembler-times {\tld4w\t{z[0-9]+.*}} 1 } } */
-/* { dg-final { scan-assembler-not {\tldr\t} } } */
-/* { dg-final { scan-assembler-not {\tstr\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_index_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve_index_offset_1.c
new file mode 100644
index 00000000000..9c4bb37f04e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_index_offset_1.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256" } */
+
+#define SIZE (15 * 8 + 3)
+
+#define DEF_INDEX_OFFSET(SIGNED, TYPE, ITERTYPE) \
+void __attribute__ ((noinline, noclone)) \
+set_##SIGNED##_##TYPE##_##ITERTYPE (SIGNED TYPE *restrict out, \
+ SIGNED TYPE *restrict in) \
+{ \
+ SIGNED ITERTYPE i; \
+ for (i = 0; i < SIZE; i++) \
+ { \
+ out[i] = in[i]; \
+ } \
+} \
+void __attribute__ ((noinline, noclone)) \
+set_##SIGNED##_##TYPE##_##ITERTYPE##_var (SIGNED TYPE *restrict out, \
+ SIGNED TYPE *restrict in, \
+ SIGNED ITERTYPE n) \
+{ \
+ SIGNED ITERTYPE i; \
+ for (i = 0; i < n; i++) \
+ { \
+ out[i] = in[i]; \
+ } \
+}
+
+#define TEST_TYPE(T, SIGNED, TYPE) \
+ T (SIGNED, TYPE, char) \
+ T (SIGNED, TYPE, short) \
+ T (SIGNED, TYPE, int) \
+ T (SIGNED, TYPE, long)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, signed, long) \
+ TEST_TYPE (T, unsigned, long) \
+ TEST_TYPE (T, signed, int) \
+ TEST_TYPE (T, unsigned, int) \
+ TEST_TYPE (T, signed, short) \
+ TEST_TYPE (T, unsigned, short) \
+ TEST_TYPE (T, signed, char) \
+ TEST_TYPE (T, unsigned, char)
+
+TEST_ALL (DEF_INDEX_OFFSET)
+
+/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+, lsl 3\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "st1d\\tz\[0-9\]+.d, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+, lsl 3\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+, lsl 2\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "st1w\\tz\[0-9\]+.s, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+, lsl 2\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "ld1h\\tz\[0-9\]+.h, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+, lsl 1\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "st1h\\tz\[0-9\]+.h, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+, lsl 1\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "ld1b\\tz\[0-9\]+.b, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+\\\]" 16 } } */
+/* { dg-final { scan-assembler-times "st1b\\tz\[0-9\]+.b, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+\\\]" 16 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_index_offset_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_index_offset_1_run.c
new file mode 100644
index 00000000000..276d259ac3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_index_offset_1_run.c
@@ -0,0 +1,34 @@
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256" { target aarch64_sve256_hw } } */
+
+#include "sve_index_offset_1.c"
+
+#define TEST_INDEX_OFFSET(SIGNED, TYPE, ITERTYPE) \
+{ \
+ SIGNED TYPE out[SIZE + 1]; \
+ SIGNED TYPE in1[SIZE + 1]; \
+ SIGNED TYPE in2[SIZE + 1]; \
+ for (int i = 0; i < SIZE + 1; ++i) \
+ { \
+ in1[i] = (i * 4) ^ i; \
+ in2[i] = (i * 2) ^ i; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ out[SIZE] = 42; \
+ set_##SIGNED##_##TYPE##_##ITERTYPE (out, in1); \
+ if (0 != __builtin_memcmp (out, in1, SIZE * sizeof (TYPE))) \
+ __builtin_abort (); \
+ set_##SIGNED##_##TYPE##_##ITERTYPE##_var (out, in2, SIZE); \
+ if (0 != __builtin_memcmp (out, in2, SIZE * sizeof (TYPE))) \
+ __builtin_abort (); \
+ if (out[SIZE] != 42) \
+ __builtin_abort (); \
+}
+
+int __attribute__ ((optimize (1)))
+main (void)
+{
+ TEST_ALL (TEST_INDEX_OFFSET);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1.c b/gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1.c
deleted file mode 100644
index 949449cde9f..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-std=c99 -ftree-vectorize -O2 -fno-inline -march=armv8-a+sve -msve-vector-bits=256" } */
-
-#define SIZE 15*8+3
-
-#define INDEX_OFFSET_TEST_1(SIGNED, TYPE, ITERTYPE) \
-void set_##SIGNED##_##TYPE##_##ITERTYPE (SIGNED TYPE *__restrict out, \
- SIGNED TYPE *__restrict in) \
-{ \
- SIGNED ITERTYPE i; \
- for (i = 0; i < SIZE; i++) \
- { \
- out[i] = in[i]; \
- } \
-} \
-void set_##SIGNED##_##TYPE##_##ITERTYPE##_var (SIGNED TYPE *__restrict out, \
- SIGNED TYPE *__restrict in, \
- SIGNED ITERTYPE n) \
-{\
- SIGNED ITERTYPE i;\
- for (i = 0; i < n; i++)\
- {\
- out[i] = in[i];\
- }\
-}
-
-#define INDEX_OFFSET_TEST(SIGNED, TYPE)\
- INDEX_OFFSET_TEST_1 (SIGNED, TYPE, char) \
- INDEX_OFFSET_TEST_1 (SIGNED, TYPE, short) \
- INDEX_OFFSET_TEST_1 (SIGNED, TYPE, int) \
- INDEX_OFFSET_TEST_1 (SIGNED, TYPE, long)
-
-INDEX_OFFSET_TEST (signed, long)
-INDEX_OFFSET_TEST (unsigned, long)
-INDEX_OFFSET_TEST (signed, int)
-INDEX_OFFSET_TEST (unsigned, int)
-INDEX_OFFSET_TEST (signed, short)
-INDEX_OFFSET_TEST (unsigned, short)
-INDEX_OFFSET_TEST (signed, char)
-INDEX_OFFSET_TEST (unsigned, char)
-
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+, lsl 3\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "st1d\\tz\[0-9\]+.d, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+, lsl 3\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+, lsl 2\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "st1w\\tz\[0-9\]+.s, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+, lsl 2\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "ld1h\\tz\[0-9\]+.h, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+, lsl 1\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "st1h\\tz\[0-9\]+.h, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+, lsl 1\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "ld1b\\tz\[0-9\]+.b, p\[0-9\]+/z, \\\[x\[0-9\]+, x\[0-9\]+\\\]" 16 } } */
-/* { dg-final { scan-assembler-times "st1b\\tz\[0-9\]+.b, p\[0-9\]+, \\\[x\[0-9\]+, x\[0-9\]+\\\]" 16 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1_run.c
deleted file mode 100644
index d6b2646798c..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_indexoffset_1_run.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-std=c99 -ftree-vectorize -O2 -fno-inline -march=armv8-a+sve" } */
-/* { dg-options "-std=c99 -ftree-vectorize -O2 -fno-inline -march=armv8-a+sve -msve-vector-bits=256" { target aarch64_sve256_hw } } */
-
-#include "sve_indexoffset_1.c"
-
-#include <string.h>
-
-#define CALL_INDEX_OFFSET_TEST_1(SIGNED, TYPE, ITERTYPE)\
-{\
- SIGNED TYPE out[SIZE + 1];\
- SIGNED TYPE in1[SIZE + 1];\
- SIGNED TYPE in2[SIZE + 1];\
- for (int i = 0; i < SIZE + 1; ++i)\
- {\
- in1[i] = (i * 4) ^ i;\
- in2[i] = (i * 2) ^ i;\
- }\
- out[SIZE] = 42;\
- set_##SIGNED##_##TYPE##_##ITERTYPE (out, in1); \
- if (0 != memcmp (out, in1, SIZE * sizeof (TYPE)))\
- return 1;\
- set_##SIGNED##_##TYPE##_##ITERTYPE##_var (out, in2, SIZE); \
- if (0 != memcmp (out, in2, SIZE * sizeof (TYPE)))\
- return 1;\
- if (out[SIZE] != 42)\
- return 1;\
-}
-
-#define CALL_INDEX_OFFSET_TEST(SIGNED, TYPE)\
- CALL_INDEX_OFFSET_TEST_1 (SIGNED, TYPE, char) \
- CALL_INDEX_OFFSET_TEST_1 (SIGNED, TYPE, short) \
- CALL_INDEX_OFFSET_TEST_1 (SIGNED, TYPE, int) \
- CALL_INDEX_OFFSET_TEST_1 (SIGNED, TYPE, long)
-
-int
-main (void)
-{
- CALL_INDEX_OFFSET_TEST (signed, long)
- CALL_INDEX_OFFSET_TEST (unsigned, long)
- CALL_INDEX_OFFSET_TEST (signed, int)
- CALL_INDEX_OFFSET_TEST (unsigned, int)
- CALL_INDEX_OFFSET_TEST (signed, short)
- CALL_INDEX_OFFSET_TEST (unsigned, short)
- CALL_INDEX_OFFSET_TEST (signed, char)
- CALL_INDEX_OFFSET_TEST (unsigned, char)
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_ld1r_1.C b/gcc/testsuite/gcc.target/aarch64/sve_ld1r_1.C
deleted file mode 100644
index 4c196684364..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_ld1r_1.C
+++ /dev/null
@@ -1,56 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-std=c++11 -O3 -fno-inline -march=armv8-a+sve -fno-tree-loop-distribute-patterns" } */
-
-#include <stdint.h>
-
-#define NUM_ELEMS(TYPE) (1024 / sizeof (TYPE))
-
-#define DEF_LOAD_BROADCAST(TYPE)\
-void set_##TYPE (TYPE *__restrict__ a, TYPE *__restrict__ b)\
-{\
- for (int i = 0; i < NUM_ELEMS (TYPE); i++)\
- a[i] = *b;\
-}\
-
-#define DEF_LOAD_BROADCAST_IMM(TYPE,IMM,SUFFIX)\
-void set_##TYPE##SUFFIX (TYPE *__restrict__ a)\
-{\
- for (int i = 0; i < NUM_ELEMS (TYPE); i++)\
- a[i] = IMM;\
-}\
-
-/* --- VALID --- */
-
-DEF_LOAD_BROADCAST (int8_t)
-DEF_LOAD_BROADCAST (int16_t)
-DEF_LOAD_BROADCAST (int32_t)
-DEF_LOAD_BROADCAST (int64_t)
-
-DEF_LOAD_BROADCAST_IMM (int16_t, 129, imm_129)
-DEF_LOAD_BROADCAST_IMM (int32_t, 129, imm_129)
-DEF_LOAD_BROADCAST_IMM (int64_t, 129, imm_129)
-
-DEF_LOAD_BROADCAST_IMM (int16_t, -130, imm_m130)
-DEF_LOAD_BROADCAST_IMM (int32_t, -130, imm_m130)
-DEF_LOAD_BROADCAST_IMM (int64_t, -130, imm_m130)
-
-DEF_LOAD_BROADCAST_IMM (int16_t, 0x1234, imm_0x1234)
-DEF_LOAD_BROADCAST_IMM (int32_t, 0x1234, imm_0x1234)
-DEF_LOAD_BROADCAST_IMM (int64_t, 0x1234, imm_0x1234)
-
-DEF_LOAD_BROADCAST_IMM (int16_t, 0xFEDC, imm_0xFEDC)
-DEF_LOAD_BROADCAST_IMM (int32_t, 0xFEDC, imm_0xFEDC)
-DEF_LOAD_BROADCAST_IMM (int64_t, 0xFEDC, imm_0xFEDC)
-
-DEF_LOAD_BROADCAST_IMM (int32_t, 0x12345678, imm_0x12345678)
-DEF_LOAD_BROADCAST_IMM (int64_t, 0x12345678, imm_0x12345678)
-
-DEF_LOAD_BROADCAST_IMM (int32_t, 0xF2345678, imm_0xF2345678)
-DEF_LOAD_BROADCAST_IMM (int64_t, 0xF2345678, imm_0xF2345678)
-
-DEF_LOAD_BROADCAST_IMM (int64_t, int64_t (0xFEBA716B12371765), imm_FEBA716B12371765)
-
-/* { dg-final { scan-assembler-times {\tld1rb\tz[0-9]+\.b, p[0-7]/z, } 1 } } */
-/* { dg-final { scan-assembler-times {\tld1rh\tz[0-9]+\.h, p[0-7]/z, } 5 } } */
-/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, p[0-7]/z, } 7 } } */
-/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, p[0-7]/z, } 8 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_ld1r_1_run.C b/gcc/testsuite/gcc.target/aarch64/sve_ld1r_1_run.C
deleted file mode 100644
index 8e954f3e32c..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_ld1r_1_run.C
+++ /dev/null
@@ -1,64 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-std=c++11 -O3 -fno-inline -march=armv8-a+sve -fno-tree-loop-distribute-patterns" } */
-
-#include "sve_ld1r_1.C"
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#define TEST_LOAD_BROADCAST(TYPE,IMM)\
- {\
- TYPE v[NUM_ELEMS (TYPE)];\
- TYPE temp = 0;\
- set_##TYPE (v, IMM);\
- for (int i = 0; i < NUM_ELEMS (TYPE); i++ )\
- temp += v[i];\
- result += temp;\
- }\
-
-#define TEST_LOAD_BROADCAST_IMM(TYPE,IMM,SUFFIX)\
- {\
- TYPE v[NUM_ELEMS (TYPE)];\
- TYPE temp = 0;\
- set_##TYPE##SUFFIX (v);\
- for (int i = 0; i < NUM_ELEMS (TYPE); i++ )\
- temp += v[i];\
- result += temp;\
- }\
-
-int main (int argc, char **argv)
-{
- long long int result = 0;
-
- TEST_LOAD_BROADCAST_IMM (int16_t, 129, imm_129)
- TEST_LOAD_BROADCAST_IMM (int32_t, 129, imm_129)
- TEST_LOAD_BROADCAST_IMM (int64_t, 129, imm_129)
-
- TEST_LOAD_BROADCAST_IMM (int16_t, -130, imm_m130)
- TEST_LOAD_BROADCAST_IMM (int32_t, -130, imm_m130)
- TEST_LOAD_BROADCAST_IMM (int64_t, -130, imm_m130)
-
- TEST_LOAD_BROADCAST_IMM (int16_t, 0x1234, imm_0x1234)
- TEST_LOAD_BROADCAST_IMM (int32_t, 0x1234, imm_0x1234)
- TEST_LOAD_BROADCAST_IMM (int64_t, 0x1234, imm_0x1234)
-
- TEST_LOAD_BROADCAST_IMM (int16_t, int16_t (0xFEDC), imm_0xFEDC)
- TEST_LOAD_BROADCAST_IMM (int32_t, 0xFEDC, imm_0xFEDC)
- TEST_LOAD_BROADCAST_IMM (int64_t, 0xFEDC, imm_0xFEDC)
-
- TEST_LOAD_BROADCAST_IMM (int32_t, 0x12345678, imm_0x12345678)
- TEST_LOAD_BROADCAST_IMM (int64_t, 0x12345678, imm_0x12345678)
-
- TEST_LOAD_BROADCAST_IMM (int32_t, 0xF2345678, imm_0xF2345678)
- TEST_LOAD_BROADCAST_IMM (int64_t, 0xF2345678, imm_0xF2345678)
-
- TEST_LOAD_BROADCAST_IMM (int64_t, int64_t (0xFEBA716B12371765),
- imm_FEBA716B12371765)
-
- if (result != int64_t (6717319005707226880))
- {
- fprintf (stderr, "result = %lld\n", result);
- abort ();
- }
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_ld1r_2.c b/gcc/testsuite/gcc.target/aarch64/sve_ld1r_2.c
new file mode 100644
index 00000000000..89d5f4289de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_ld1r_2.c
@@ -0,0 +1,61 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=armv8-a+sve -fno-tree-loop-distribute-patterns" } */
+
+#include <stdint.h>
+
+#define NUM_ELEMS(TYPE) (1024 / sizeof (TYPE))
+
+#define DEF_LOAD_BROADCAST(TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ set_##TYPE (TYPE *restrict a, TYPE *restrict b) \
+ { \
+ for (int i = 0; i < NUM_ELEMS (TYPE); i++) \
+ a[i] = *b; \
+ }
+
+#define DEF_LOAD_BROADCAST_IMM(TYPE, IMM, SUFFIX) \
+ void __attribute__ ((noinline, noclone)) \
+ set_##TYPE##_##SUFFIX (TYPE *a) \
+ { \
+ for (int i = 0; i < NUM_ELEMS (TYPE); i++) \
+ a[i] = IMM; \
+ }
+
+#define FOR_EACH_LOAD_BROADCAST(T) \
+ T (int8_t) \
+ T (int16_t) \
+ T (int32_t) \
+ T (int64_t)
+
+#define FOR_EACH_LOAD_BROADCAST_IMM(T) \
+ T (int16_t, 129, imm_129) \
+ T (int32_t, 129, imm_129) \
+ T (int64_t, 129, imm_129) \
+ \
+ T (int16_t, -130, imm_m130) \
+ T (int32_t, -130, imm_m130) \
+ T (int64_t, -130, imm_m130) \
+ \
+ T (int16_t, 0x1234, imm_0x1234) \
+ T (int32_t, 0x1234, imm_0x1234) \
+ T (int64_t, 0x1234, imm_0x1234) \
+ \
+ T (int16_t, 0xFEDC, imm_0xFEDC) \
+ T (int32_t, 0xFEDC, imm_0xFEDC) \
+ T (int64_t, 0xFEDC, imm_0xFEDC) \
+ \
+ T (int32_t, 0x12345678, imm_0x12345678) \
+ T (int64_t, 0x12345678, imm_0x12345678) \
+ \
+ T (int32_t, 0xF2345678, imm_0xF2345678) \
+ T (int64_t, 0xF2345678, imm_0xF2345678) \
+ \
+ T (int64_t, (int64_t) 0xFEBA716B12371765, imm_FEBA716B12371765)
+
+FOR_EACH_LOAD_BROADCAST (DEF_LOAD_BROADCAST)
+FOR_EACH_LOAD_BROADCAST_IMM (DEF_LOAD_BROADCAST_IMM)
+
+/* { dg-final { scan-assembler-times {\tld1rb\tz[0-9]+\.b, p[0-7]/z, } 1 } } */
+/* { dg-final { scan-assembler-times {\tld1rh\tz[0-9]+\.h, p[0-7]/z, } 5 } } */
+/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, p[0-7]/z, } 7 } } */
+/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, p[0-7]/z, } 8 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_ld1r_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_ld1r_2_run.c
new file mode 100644
index 00000000000..510b2eca517
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_ld1r_2_run.c
@@ -0,0 +1,38 @@
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O3 -march=armv8-a+sve -fno-tree-loop-distribute-patterns" } */
+
+#include "sve_ld1r_2.c"
+
+#define TEST_LOAD_BROADCAST(TYPE) \
+ { \
+ TYPE v[NUM_ELEMS (TYPE)]; \
+ TYPE val = 99; \
+ set_##TYPE (v, &val); \
+ for (int i = 0; i < NUM_ELEMS (TYPE); i++) \
+ { \
+ if (v[i] != (TYPE) 99) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+#define TEST_LOAD_BROADCAST_IMM(TYPE, IMM, SUFFIX) \
+ { \
+ TYPE v[NUM_ELEMS (TYPE)]; \
+ set_##TYPE##_##SUFFIX (v); \
+ for (int i = 0; i < NUM_ELEMS (TYPE); i++ ) \
+ { \
+ if (v[i] != (TYPE) IMM) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int __attribute__ ((optimize (1)))
+main (int argc, char **argv)
+{
+ FOR_EACH_LOAD_BROADCAST (TEST_LOAD_BROADCAST)
+ FOR_EACH_LOAD_BROADCAST_IMM (TEST_LOAD_BROADCAST_IMM)
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_live_1.c b/gcc/testsuite/gcc.target/aarch64/sve_live_1.c
index 2d92708fbd2..407d1277c50 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_live_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_live_1.c
@@ -1,19 +1,41 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -fno-tree-scev-cprop -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-int
-liveloop (int start, int n, int *x)
-{
- int i = start;
- int j;
+#include <stdint.h>
- for (j = 0; j < n; ++j)
- {
- i += 1;
- x[j] = i;
- }
- return i;
-}
+#define EXTRACT_LAST(TYPE) \
+ TYPE __attribute__ ((noinline, noclone)) \
+ test_##TYPE (TYPE *x, int n, TYPE value) \
+ { \
+ TYPE last; \
+ for (int j = 0; j < n; ++j) \
+ { \
+ last = x[j]; \
+ x[j] = last * value; \
+ } \
+ return last; \
+ }
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Using a fully-masked loop" 1 "vect" } } */
+#define TEST_ALL(T) \
+ T (uint8_t) \
+ T (uint16_t) \
+ T (uint32_t) \
+ T (uint64_t) \
+ T (_Float16) \
+ T (float) \
+ T (double)
+
+TEST_ALL (EXTRACT_LAST)
+
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7].b, } 2 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7].h, } 4 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7].s, } 4 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7].d, } 4 } } */
+
+/* { dg-final { scan-assembler-times {\tlastb\tw[0-9]+, p[0-7], z[0-9]+\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlastb\tw[0-9]+, p[0-7], z[0-9]+\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlastb\tw[0-9]+, p[0-7], z[0-9]+\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlastb\tx[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlastb\th[0-9]+, p[0-7], z[0-9]+\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlastb\ts[0-9]+, p[0-7], z[0-9]+\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlastb\td[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_live_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_live_1_run.c
index 99f0be353aa..2a1f6df4788 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_live_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_live_1_run.c
@@ -1,29 +1,35 @@
/* { dg-do run { target { aarch64_sve_hw } } } */
/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-extern void abort(void);
-#include <string.h>
-
#include "sve_live_1.c"
-#define MAX 62
-#define START 27
-
-int main (void)
+#define N 107
+#define OP 70
+
+#define TEST_LOOP(TYPE) \
+ { \
+ TYPE a[N]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ a[i] = i * 2 + (i % 3); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ TYPE expected = a[N - 1]; \
+ TYPE res = test_##TYPE (a, N, OP); \
+ if (res != expected) \
+ __builtin_abort (); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE old = i * 2 + (i % 3); \
+ if (a[i] != (TYPE) (old * (TYPE) OP)) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int __attribute__ ((optimize (1)))
+main (void)
{
- int a[MAX];
- int i;
-
- memset (a, 0, MAX*sizeof (int));
-
- int ret = liveloop (START, MAX, a);
-
- if (ret != 89)
- abort ();
-
- for (i=0; i<MAX; i++)
- {
- if (a[i] != i+START+1)
- abort ();
- }
-} \ No newline at end of file
+ TEST_ALL (TEST_LOOP);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_live_2.c b/gcc/testsuite/gcc.target/aarch64/sve_live_2.c
deleted file mode 100644
index 06d95fa8ea6..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_live_2.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -fno-tree-scev-cprop -march=armv8-a+sve --save-temps -fdump-tree-vect-details" } */
-
-int
-liveloop (int start, int n, int * __restrict__ x, char * __restrict__ y)
-{
- int i = start;
- int j;
-
- for (j = 0; j < n; ++j)
- {
- i += 1;
- x[j] = y[j] + 1;
- }
- return i;
-}
-
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Can't use a fully-masked loop because ncopies is greater than 1" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_live_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_live_2_run.c
deleted file mode 100644
index e7924e020cb..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_live_2_run.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-extern void abort(void);
-#include <string.h>
-#include <stdio.h>
-
-#include "sve_live_2.c"
-
-#define MAX 193
-#define START 84
-
-int main (void)
-{
- int a[MAX];
- char b[MAX];
- int i;
-
- memset (a, 0, MAX*sizeof (int));
- memset (b, 23, MAX*sizeof (char));
-
- int ret = liveloop (START, MAX, a, b);
-
- if (ret != 277)
- abort ();
-
- for (i=0; i<MAX; i++)
- {
- if (a[i] != 24)
- abort ();
- }
-} \ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_1.c
index 0bc757907cf..882da83237e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_1.c
@@ -3,10 +3,10 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__ ((vector_size (32)));
-typedef int32_t v8si __attribute__ ((vector_size (32)));
-typedef int16_t v16hi __attribute__ ((vector_size (32)));
-typedef int8_t v32qi __attribute__ ((vector_size (32)));
+typedef int64_t vnx2di __attribute__ ((vector_size (32)));
+typedef int32_t vnx4si __attribute__ ((vector_size (32)));
+typedef int16_t vnx8hi __attribute__ ((vector_size (32)));
+typedef int8_t vnx16qi __attribute__ ((vector_size (32)));
#define TEST_TYPE(TYPE) \
void sve_load_##TYPE##_neg9 (TYPE *a) \
@@ -45,10 +45,10 @@ typedef int8_t v32qi __attribute__ ((vector_size (32)));
asm volatile ("" :: "w" (x)); \
}
-TEST_TYPE (v4di)
-TEST_TYPE (v8si)
-TEST_TYPE (v16hi)
-TEST_TYPE (v32qi)
+TEST_TYPE (vnx2di)
+TEST_TYPE (vnx4si)
+TEST_TYPE (vnx8hi)
+TEST_TYPE (vnx16qi)
/* { dg-final { scan-assembler-times {\tsub\tx[0-9]+, x0, #288\n} 4 } } */
/* { dg-final { scan-assembler-times {\tadd\tx[0-9]+, x0, 16\n} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_2.c b/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_2.c
index a0ced0d9be4..78cfc7a9bd8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_2.c
@@ -1,11 +1,11 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -save-temps -msve-vector-bits=256" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -save-temps" } */
void
-f (unsigned int *restrict a, unsigned char *restrict b, int n)
+f (unsigned int *restrict a, signed char *restrict b, signed char mask, int n)
{
for (int i = 0; i < n; ++i)
- a[i] += b[i];
+ a[i] += (signed char) (b[i] | mask);
}
/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_3.c b/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_3.c
index 00731d995c8..51732b03784 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_load_const_offset_3.c
@@ -1,12 +1,7 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -save-temps" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -save-temps -msve-vector-bits=256" } */
-void
-f (unsigned int *restrict a, unsigned char *restrict b, int n)
-{
- for (int i = 0; i < n; ++i)
- a[i] += b[i];
-}
+#include "sve_load_const_offset_2.c"
/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, #1, mul vl\]\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_load_scalar_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve_load_scalar_offset_1.c
index 9163702db1d..f1c37d388f9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_load_scalar_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_load_scalar_offset_1.c
@@ -3,65 +3,65 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__ ((vector_size (32)));
-typedef int32_t v8si __attribute__ ((vector_size (32)));
-typedef int16_t v16hi __attribute__ ((vector_size (32)));
-typedef int8_t v32qi __attribute__ ((vector_size (32)));
+typedef int64_t vnx2di __attribute__ ((vector_size (32)));
+typedef int32_t vnx4si __attribute__ ((vector_size (32)));
+typedef int16_t vnx8hi __attribute__ ((vector_size (32)));
+typedef int8_t vnx16qi __attribute__ ((vector_size (32)));
void sve_load_64_u_lsl (uint64_t *a)
{
register unsigned long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v4di *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx2di *)&a[i]));
}
void sve_load_64_s_lsl (int64_t *a)
{
register long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v4di *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx2di *)&a[i]));
}
void sve_load_32_u_lsl (uint32_t *a)
{
register unsigned long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v8si *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx4si *)&a[i]));
}
void sve_load_32_s_lsl (int32_t *a)
{
register long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v8si *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx4si *)&a[i]));
}
void sve_load_16_z_lsl (uint16_t *a)
{
register unsigned long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v16hi *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx8hi *)&a[i]));
}
void sve_load_16_s_lsl (int16_t *a)
{
register long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v16hi *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx8hi *)&a[i]));
}
void sve_load_8_z (uint8_t *a)
{
register unsigned long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v32qi *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx16qi *)&a[i]));
}
void sve_load_8_s (int8_t *a)
{
register long i asm("x1");
asm volatile ("" : "=r" (i));
- asm volatile ("" :: "w" (*(v32qi *)&a[i]));
+ asm volatile ("" :: "w" (*(vnx16qi *)&a[i]));
}
/* { dg-final { scan-assembler-times {\tld1d\tz0\.d, p[0-7]/z, \[x0, x1, lsl 3\]\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_loop_add_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_loop_add_4_run.c
index 2d11a221e93..0f918a4155f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_loop_add_4_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_loop_add_4_run.c
@@ -10,7 +10,10 @@
{ \
TYPE a[N]; \
for (int i = 0; i < N; ++i) \
- a[i] = i * i + i % 5; \
+ { \
+ a[i] = i * i + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
test_##TYPE##_##NAME (a, BASE, N); \
for (int i = 0; i < N; ++i) \
{ \
@@ -20,7 +23,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (TEST_LOOP)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mad_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mad_1.c
index ccb20b4191f..551b451495d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mad_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mad_1.c
@@ -3,10 +3,10 @@
#include <stdint.h>
-typedef int8_t v32qi __attribute__((vector_size(32)));
-typedef int16_t v16hi __attribute__((vector_size(32)));
-typedef int32_t v8si __attribute__((vector_size(32)));
-typedef int64_t v4di __attribute__((vector_size(32)));
+typedef int8_t vnx16qi __attribute__((vector_size(32)));
+typedef int16_t vnx8hi __attribute__((vector_size(32)));
+typedef int32_t vnx4si __attribute__((vector_size(32)));
+typedef int64_t vnx2di __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -23,10 +23,10 @@ void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v32qi)
-DO_OP (v16hi)
-DO_OP (v8si)
-DO_OP (v4di)
+DO_OP (vnx16qi)
+DO_OP (vnx8hi)
+DO_OP (vnx4si)
+DO_OP (vnx2di)
/* { dg-final { scan-assembler-times {\tmad\tz0\.b, p[0-7]/m, z2\.b, z4\.b} 1 } } */
/* { dg-final { scan-assembler-times {\tmad\tz0\.h, p[0-7]/m, z2\.h, z4\.h} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1.c
index 4d47bce14fd..469e3c670d3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1.c
@@ -1,37 +1,52 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve --save-temps" } */
#include <stdint.h>
-#define INVALID_INDEX(TYPE) ((TYPE) 107)
-#define IS_VALID_INDEX(TYPE, VAL) (VAL < INVALID_INDEX (TYPE))
-#define ODD(VAL) (VAL & 0x1)
-
-/* TODO: This is a bit ugly for floating point types as it involves FP<>INT
- conversions, but I can't find another way of auto-vectorizing the code to
- make use of SVE gather instructions. */
-#define DEF_MASK_GATHER_LOAD(OUTTYPE,LOOKUPTYPE,INDEXTYPE)\
-void fun_##OUTTYPE##LOOKUPTYPE##INDEXTYPE (OUTTYPE *__restrict out,\
- LOOKUPTYPE *__restrict lookup,\
- INDEXTYPE *__restrict index, int n)\
-{\
- int i;\
- for (i = 0; i < n; ++i)\
- {\
- INDEXTYPE x = index[i];\
- if (IS_VALID_INDEX (INDEXTYPE, x))\
- x = lookup[x];\
- out[i] = x;\
- }\
-}\
-
-DEF_MASK_GATHER_LOAD (int32_t, int32_t, int32_t)
-DEF_MASK_GATHER_LOAD (int64_t, int64_t, int64_t)
-DEF_MASK_GATHER_LOAD (uint32_t, uint32_t, uint32_t)
-DEF_MASK_GATHER_LOAD (uint64_t, uint64_t, uint64_t)
-DEF_MASK_GATHER_LOAD (float, float, int32_t)
-DEF_MASK_GATHER_LOAD (double, double, int64_t)
-
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, lsl 3\\\]" 3 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, uxtw 2\\\]" 1 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 2 } } */
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, CMP_TYPE, BITS) \
+ void \
+ f_##DATA_TYPE##_##CMP_TYPE \
+ (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ CMP_TYPE *cmp1, CMP_TYPE *cmp2, INDEX##BITS *indices, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ if (cmp1[i] == cmp2[i]) \
+ dest[i] += src[indices[i]]; \
+ }
+
+#define TEST32(T, DATA_TYPE) \
+ T (DATA_TYPE, int32_t, 32) \
+ T (DATA_TYPE, uint32_t, 32) \
+ T (DATA_TYPE, float, 32)
+
+#define TEST64(T, DATA_TYPE) \
+ T (DATA_TYPE, int64_t, 64) \
+ T (DATA_TYPE, uint64_t, 64) \
+ T (DATA_TYPE, double, 64)
+
+#define TEST_ALL(T) \
+ TEST32 (T, int32_t) \
+ TEST32 (T, uint32_t) \
+ TEST32 (T, float) \
+ TEST64 (T, int64_t) \
+ TEST64 (T, uint64_t) \
+ TEST64 (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, sxtw 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, x[0-9]+, lsl 2\]\n} 9 } } */
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d, lsl 3\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, x[0-9]+, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1_run.c
deleted file mode 100644
index 89ccf3e35a4..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_1_run.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include "sve_mask_gather_load_1.c"
-
-#include <stdio.h>
-
-extern void abort ();
-
-/* TODO: Support widening forms of gather loads and test them here. */
-
-#define NUM_ELEMS(TYPE) (32 / sizeof (TYPE))
-
-#define INDEX_VEC_INIT(INDEXTYPE)\
- INDEXTYPE index_##INDEXTYPE[NUM_ELEMS (INDEXTYPE)];\
-
-#define VEC_INIT(OUTTYPE,LOOKUPTYPE,INDEXTYPE)\
- LOOKUPTYPE lookup_##LOOKUPTYPE[NUM_ELEMS (LOOKUPTYPE)];\
- OUTTYPE out_##OUTTYPE[NUM_ELEMS (OUTTYPE)];\
- {\
- int i;\
- for (i = 0; i < NUM_ELEMS (INDEXTYPE); i++)\
- {\
- lookup_##LOOKUPTYPE [i] = i * 2;\
- index_##INDEXTYPE [i] = ODD (i) ? i : INVALID_INDEX (INDEXTYPE);\
- }\
- }
-
-#define TEST_MASK_GATHER_LOAD(OUTTYPE,LOOKUPTYPE,INDEXTYPE)\
- fun_##OUTTYPE##LOOKUPTYPE##INDEXTYPE\
- (out_##OUTTYPE, lookup_##LOOKUPTYPE, index_##INDEXTYPE,\
- NUM_ELEMS (INDEXTYPE));\
- {\
- int i;\
- for (i = 0; i < NUM_ELEMS (OUTTYPE); i++)\
- {\
- if (ODD (i) && out_##OUTTYPE[i] != (i * 2))\
- break;\
- else if (!ODD (i) && out_##OUTTYPE[i] != INVALID_INDEX (INDEXTYPE))\
- break;\
- }\
- if (i < NUM_ELEMS (OUTTYPE))\
- {\
- fprintf (stderr, "out_" # OUTTYPE "[%d] = %d\n",\
- i, (int) out_##OUTTYPE[i]);\
- abort ();\
- }\
- }
-
-int main()
-{
- INDEX_VEC_INIT (int32_t)
- INDEX_VEC_INIT (int64_t)
- INDEX_VEC_INIT (uint32_t)
- INDEX_VEC_INIT (uint64_t)
-
- VEC_INIT (int32_t, int32_t, int32_t)
- VEC_INIT (int64_t, int64_t, int64_t)
- VEC_INIT (uint32_t, uint32_t, uint32_t)
- VEC_INIT (uint64_t, uint64_t, uint64_t)
- VEC_INIT (float, float, int32_t)
- VEC_INIT (double, double, int64_t)
-
- TEST_MASK_GATHER_LOAD (int32_t, int32_t, int32_t)
- TEST_MASK_GATHER_LOAD (int64_t, int64_t, int64_t)
- TEST_MASK_GATHER_LOAD (uint32_t, uint32_t, uint32_t)
- TEST_MASK_GATHER_LOAD (uint64_t, uint64_t, uint64_t)
- TEST_MASK_GATHER_LOAD (float, float, int32_t)
- TEST_MASK_GATHER_LOAD (double, double, int64_t)
-
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2.c
index 48db58ffefd..8dd48462b51 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2.c
@@ -1,60 +1,19 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve --save-temps" } */
-#include <stdint.h>
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
-#define NUM_ELEMS(TYPE) (4 * (32 / sizeof (TYPE)))
-#define INVALID_INDEX(TYPE) ((TYPE) 107)
-#define IS_VALID_INDEX(TYPE, VAL) (VAL < INVALID_INDEX (TYPE))
+#include "sve_mask_gather_load_1.c"
-/* TODO: This is a bit ugly for floating point types as it involves FP<>INT
- conversions, but I can't find another way of auto-vectorizing the code to
- make use of SVE gather instructions. */
-#define DEF_MASK_GATHER_LOAD(OUTTYPE,LOOKUPTYPE,INDEXTYPE)\
-void fun_##OUTTYPE##LOOKUPTYPE##INDEXTYPE (OUTTYPE *__restrict out,\
- LOOKUPTYPE *__restrict lookup,\
- INDEXTYPE *__restrict index, INDEXTYPE n)\
-{\
- INDEXTYPE i;\
- for (i = 0; i < n; ++i)\
- {\
- LOOKUPTYPE x = index[i];\
- if (IS_VALID_INDEX (LOOKUPTYPE, x))\
- x = lookup[x];\
- out[i] = x;\
- }\
-}\
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, uxtw 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, x[0-9]+, lsl 2\]\n} 9 } } */
-DEF_MASK_GATHER_LOAD (int32_t, int32_t, int8_t)
-DEF_MASK_GATHER_LOAD (int64_t, int64_t, int8_t)
-DEF_MASK_GATHER_LOAD (int32_t, int32_t, int16_t)
-DEF_MASK_GATHER_LOAD (int64_t, int64_t, int16_t)
-DEF_MASK_GATHER_LOAD (int64_t, int64_t, int32_t)
-DEF_MASK_GATHER_LOAD (uint32_t, uint32_t, uint8_t)
-DEF_MASK_GATHER_LOAD (uint64_t, uint64_t, uint8_t)
-DEF_MASK_GATHER_LOAD (uint32_t, uint32_t, uint16_t)
-DEF_MASK_GATHER_LOAD (uint64_t, uint64_t, uint16_t)
-DEF_MASK_GATHER_LOAD (uint64_t, uint64_t, uint32_t)
-
-/* At present we only use predicate unpacks when the index type is
- half the size of the result type. */
-/* { dg-final { scan-assembler-times "\tpunpklo\\tp\[0-9\]+\.h, p\[0-9\]+\.b" 4 } } */
-/* { dg-final { scan-assembler-times "\tpunpkhi\\tp\[0-9\]+\.h, p\[0-9\]+\.b" 4 } } */
-
-/* { dg-final { scan-assembler-times "\tsunpklo\\tz\[0-9\]+\.h, z\[0-9\]+\.b" 2 } } */
-/* { dg-final { scan-assembler-times "\tsunpkhi\\tz\[0-9\]+\.h, z\[0-9\]+\.b" 2 } } */
-/* { dg-final { scan-assembler-times "\tsunpklo\\tz\[0-9\]+\.s, z\[0-9\]+\.h" 6 } } */
-/* { dg-final { scan-assembler-times "\tsunpkhi\\tz\[0-9\]+\.s, z\[0-9\]+\.h" 6 } } */
-/* { dg-final { scan-assembler-times "\tsunpklo\\tz\[0-9\]+\.d, z\[0-9\]+\.s" 7 } } */
-/* { dg-final { scan-assembler-times "\tsunpkhi\\tz\[0-9\]+\.d, z\[0-9\]+\.s" 7 } } */
-
-/* { dg-final { scan-assembler-times "\tuunpklo\\tz\[0-9\]+\.h, z\[0-9\]+\.b" 2 } } */
-/* { dg-final { scan-assembler-times "\tuunpkhi\\tz\[0-9\]+\.h, z\[0-9\]+\.b" 2 } } */
-/* { dg-final { scan-assembler-times "\tuunpklo\\tz\[0-9\]+\.s, z\[0-9\]+\.h" 6 } } */
-/* { dg-final { scan-assembler-times "\tuunpkhi\\tz\[0-9\]+\.s, z\[0-9\]+\.h" 6 } } */
-/* { dg-final { scan-assembler-times "\tuunpklo\\tz\[0-9\]+\.d, z\[0-9\]+\.s" 7 } } */
-/* { dg-final { scan-assembler-times "\tuunpkhi\\tz\[0-9\]+\.d, z\[0-9\]+\.s" 7 } } */
-
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, uxtw 2\\\]" 6 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 6 } } */
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d, lsl 3\\\]" 28 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d, lsl 3\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, x[0-9]+, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2_run.c
deleted file mode 100644
index c5280546206..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_2_run.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include "sve_mask_gather_load_2.c"
-
-#include <stdio.h>
-
-extern void abort ();
-
-#define ODD(VAL) (VAL & 0x1)
-#define INDEX_VEC_INIT(INDEXTYPE)\
- INDEXTYPE index_##INDEXTYPE[NUM_ELEMS (int8_t)];\
-
-#define VEC_INIT(OUTTYPE,LOOKUPTYPE,INDEXTYPE)\
- LOOKUPTYPE lookup_##LOOKUPTYPE[NUM_ELEMS (OUTTYPE)];\
- OUTTYPE out_##OUTTYPE[NUM_ELEMS (OUTTYPE)];\
- {\
- int i;\
- for (i = 0; i < NUM_ELEMS (OUTTYPE); i++)\
- {\
- lookup_##LOOKUPTYPE [i] = i * 2;\
- index_##INDEXTYPE [i] = ODD (i) ? i : INVALID_INDEX (INDEXTYPE);\
- }\
- }
-
-#define TEST_MASK_GATHER_LOAD(OUTTYPE,LOOKUPTYPE,INDEXTYPE)\
- fun_##OUTTYPE##LOOKUPTYPE##INDEXTYPE\
- (out_##OUTTYPE, lookup_##LOOKUPTYPE, index_##INDEXTYPE,\
- NUM_ELEMS (OUTTYPE));\
- {\
- int i;\
- for (i = 0; i < NUM_ELEMS (OUTTYPE); i++)\
- {\
- if (ODD (i) && out_##OUTTYPE[i] != (i * 2))\
- break;\
- else if (!ODD (i) && out_##OUTTYPE[i] != INVALID_INDEX (OUTTYPE))\
- break;\
- }\
- if (i < NUM_ELEMS (OUTTYPE))\
- {\
- fprintf (stderr, "out_" # OUTTYPE "[%d] = %d\n",\
- i, (int) out_##OUTTYPE[i]);\
- abort ();\
- }\
- }
-
-int main()
-{
- INDEX_VEC_INIT (int8_t)
- INDEX_VEC_INIT (int16_t)
- INDEX_VEC_INIT (int32_t)
- INDEX_VEC_INIT (uint8_t)
- INDEX_VEC_INIT (uint16_t)
- INDEX_VEC_INIT (uint32_t)
-
- {
- VEC_INIT (int32_t, int32_t, int8_t)
- TEST_MASK_GATHER_LOAD (int32_t, int32_t, int8_t)
- }
- {
- VEC_INIT (int64_t, int64_t, int8_t)
- TEST_MASK_GATHER_LOAD (int64_t, int64_t, int8_t)
- }
- {
- VEC_INIT (int32_t, int32_t, int16_t)
- TEST_MASK_GATHER_LOAD (int32_t, int32_t, int16_t)
- }
- {
- VEC_INIT (int64_t, int64_t, int16_t)
- TEST_MASK_GATHER_LOAD (int64_t, int64_t, int16_t)
- }
- {
- VEC_INIT (int64_t, int64_t, int32_t)
- TEST_MASK_GATHER_LOAD (int64_t, int64_t, int32_t)
- }
- {
- VEC_INIT (uint32_t, uint32_t, uint8_t)
- TEST_MASK_GATHER_LOAD (uint32_t, uint32_t, uint8_t)
- }
- {
- VEC_INIT (uint64_t, uint64_t, uint8_t)
- TEST_MASK_GATHER_LOAD (uint64_t, uint64_t, uint8_t)
- }
- {
- VEC_INIT (uint32_t, uint32_t, uint16_t)
- TEST_MASK_GATHER_LOAD (uint32_t, uint32_t, uint16_t)
- }
- {
- VEC_INIT (uint64_t, uint64_t, uint16_t)
- TEST_MASK_GATHER_LOAD (uint64_t, uint64_t, uint16_t)
- }
- {
- VEC_INIT (uint64_t, uint64_t, uint32_t)
- TEST_MASK_GATHER_LOAD (uint64_t, uint64_t, uint32_t)
- }
-
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3.c
index 2965760e058..b370f532f2c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3.c
@@ -1,29 +1,52 @@
/* { dg-do assemble } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math --save-temps" } */
-#define TEST_LOOP(NAME, DATA_TYPE, INDEX_TYPE) \
- DATA_TYPE __attribute__ ((noinline)) \
- NAME (char *data, INDEX_TYPE *indices, signed char n) \
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, CMP_TYPE, BITS) \
+ void \
+ f_##DATA_TYPE##_##CMP_TYPE \
+ (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ CMP_TYPE *cmp1, CMP_TYPE *cmp2, INDEX##BITS *indices, int n) \
{ \
- DATA_TYPE sum = 0; \
- for (signed char i = 0; i < n; ++i) \
- { \
- INDEX_TYPE index = indices[i]; \
- sum += (index & 16 ? *(DATA_TYPE *) (data + index) : 1); \
- } \
- return sum; \
+ for (int i = 0; i < n; ++i) \
+ if (cmp1[i] == cmp2[i]) \
+ dest[i] += *(DATA_TYPE *) ((char *) src + indices[i]); \
}
-TEST_LOOP (f_s32, int, unsigned int)
-TEST_LOOP (f_u32, unsigned int, unsigned int)
-TEST_LOOP (f_f32, float, unsigned int)
+#define TEST32(T, DATA_TYPE) \
+ T (DATA_TYPE, int32_t, 32) \
+ T (DATA_TYPE, uint32_t, 32) \
+ T (DATA_TYPE, float, 32)
+
+#define TEST64(T, DATA_TYPE) \
+ T (DATA_TYPE, int64_t, 64) \
+ T (DATA_TYPE, uint64_t, 64) \
+ T (DATA_TYPE, double, 64)
+
+#define TEST_ALL(T) \
+ TEST32 (T, int32_t) \
+ TEST32 (T, uint32_t) \
+ TEST32 (T, float) \
+ TEST64 (T, int64_t) \
+ TEST64 (T, uint64_t) \
+ TEST64 (T, double)
+
+TEST_ALL (TEST_LOOP)
-TEST_LOOP (f_s64_s64, long, long)
-TEST_LOOP (f_s64_u64, long, unsigned long)
-TEST_LOOP (f_u64_s64, unsigned long, long)
-TEST_LOOP (f_u64_u64, unsigned long, unsigned long)
-TEST_LOOP (f_f64_s64, double, long)
-TEST_LOOP (f_f64_u64, double, unsigned long)
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, sxtw\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, x[0-9]+, lsl 2\]\n} 9 } } */
-/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, uxtw\]} 3 } } */
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d\]} 6 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, x[0-9]+, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3_run.c
deleted file mode 100644
index aa73c81ffca..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_3_run.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math" } */
-
-#include "sve_mask_gather_load_3.c"
-
-extern void abort (void);
-
-#define N 57
-
-#undef TEST_LOOP
-#define TEST_LOOP(NAME, DATA_TYPE, INDEX_TYPE) \
- { \
- INDEX_TYPE indices[N]; \
- DATA_TYPE data[N * 2]; \
- for (int i = 0; i < N * 2; ++i) \
- data[i] = (i / 2) * 4 + i % 2; \
- DATA_TYPE sum = 0; \
- for (int i = 0; i < N; ++i) \
- { \
- INDEX_TYPE j = (i * 3 / 2) * sizeof (DATA_TYPE); \
- j &= (1ULL << (sizeof (INDEX_TYPE) * 8 - 1)) - 1; \
- if (j & 16) \
- sum += data[j / sizeof (DATA_TYPE)]; \
- else \
- sum += 1; \
- indices[i] = j; \
- } \
- DATA_TYPE res = NAME ((char *) data, indices, N); \
- if (res != sum) \
- abort (); \
- }
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST_LOOP (f_s32, int, unsigned int)
- TEST_LOOP (f_u32, unsigned int, unsigned int)
- TEST_LOOP (f_f32, float, unsigned int)
-
- TEST_LOOP (f_s64_s64, long, long)
- TEST_LOOP (f_s64_u64, long, unsigned long)
- TEST_LOOP (f_u64_s64, unsigned long, long)
- TEST_LOOP (f_u64_u64, unsigned long, unsigned long)
- TEST_LOOP (f_f64_s64, double, long)
- TEST_LOOP (f_f64_u64, double, unsigned long)
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4.c
index 38bb5275e59..0464e9343a3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4.c
@@ -1,18 +1,19 @@
/* { dg-do assemble } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math --save-temps" } */
-#define TEST_LOOP(NAME, TYPE) \
- TYPE __attribute__ ((noinline)) \
- NAME (TYPE **indices, long *mask, int n) \
- { \
- TYPE sum = 0; \
- for (int i = 0; i < n; ++i) \
- sum += mask[i] ? *indices[i] : 1; \
- return sum; \
- }
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
-TEST_LOOP (f_s64, long)
-TEST_LOOP (f_u64, unsigned long)
-TEST_LOOP (f_f64, double)
+#include "sve_mask_gather_load_3.c"
-/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[z[0-9]+\.d\]} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, uxtw\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, x[0-9]+, lsl 2\]\n} 9 } } */
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, x[0-9]+, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4_run.c
deleted file mode 100644
index 8a6320a002c..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_4_run.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math" } */
-
-#include "sve_mask_gather_load_4.c"
-
-extern void abort (void);
-
-#define N 57
-
-#undef TEST_LOOP
-#define TEST_LOOP(NAME, TYPE) \
- { \
- TYPE *ptrs[N]; \
- TYPE data[N * 2]; \
- long mask[N]; \
- for (int i = 0; i < N * 2; ++i) \
- data[i] = (i / 2) * 4 + i % 2; \
- TYPE sum = 0; \
- for (int i = 0; i < N; ++i) \
- { \
- mask[i] = i & 15; \
- ptrs[i] = &data[i * 3 / 2]; \
- sum += mask[i] ? *ptrs[i] : 1; \
- } \
- TYPE res = NAME (ptrs, mask, N); \
- if (res != sum) \
- abort (); \
- }
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST_LOOP (f_s64, long)
- TEST_LOOP (f_u64, unsigned long)
- TEST_LOOP (f_f64, double)
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5.c
index abb38e40f72..831d594654a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5.c
@@ -1,120 +1,38 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-
-#define MASK_GATHER_LOAD1(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-void mgather_load1##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- if (masks[i * STRIDE])\
- dst[i] = src[i * STRIDE];\
-}
-
-#define MASK_GATHER_LOAD2(OBJTYPE,MASKTYPE,STRIDETYPE)\
-void mgather_load2##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- if (masks[i * stride])\
- dst[i] = src[i * stride];\
-}
-
-#define MASK_GATHER_LOAD3(OBJTYPE,MASKTYPE,STRIDETYPE)\
-void mgather_load3s5##OBJTYPE##STRIDETYPE\
- (OBJTYPE * restrict d1, OBJTYPE * restrict d2, OBJTYPE * restrict d3,\
- OBJTYPE * restrict d4, OBJTYPE * restrict d5, OBJTYPE * restrict src,\
- MASKTYPE * restrict masks, STRIDETYPE count)\
-{\
- const STRIDETYPE STRIDE = 5;\
- for (STRIDETYPE i=0; i<count; i++)\
- if (masks[i * STRIDE])\
- {\
- d1[i] = src[0 + (i * STRIDE)];\
- d2[i] = src[1 + (i * STRIDE)];\
- d3[i] = src[2 + (i * STRIDE)];\
- d4[i] = src[3 + (i * STRIDE)];\
- d5[i] = src[4 + (i * STRIDE)];\
- }\
-}
-
-#define MASK_GATHER_LOAD4(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-void mgather_load4##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- if (masks[i * STRIDE])\
- *dst = *src;\
- dst += 1;\
- src += STRIDE;\
- }\
-}
-
-#define MASK_GATHER_LOAD5(OBJTYPE,MASKTYPE,STRIDETYPE)\
-void mgather_load5##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- if (masks[i * stride])\
- *dst = *src;\
- dst += 1;\
- src += stride;\
- }\
-}
-
-MASK_GATHER_LOAD1 (double, long, long, 5)
-MASK_GATHER_LOAD1 (double, long, long, 8)
-MASK_GATHER_LOAD1 (double, long, long, 21)
-MASK_GATHER_LOAD1 (double, long, long, 1009)
-
-MASK_GATHER_LOAD1 (float, int, int, 5)
-MASK_GATHER_LOAD1 (float, int, int, 8)
-MASK_GATHER_LOAD1 (float, int, int, 21)
-MASK_GATHER_LOAD1 (float, int, int, 1009)
-
-MASK_GATHER_LOAD2 (double, long, long)
-MASK_GATHER_LOAD2 (float, int, int)
-
-MASK_GATHER_LOAD3 (double, long, long)
-MASK_GATHER_LOAD3 (float, int, int)
-
-MASK_GATHER_LOAD4 (double, long, long, 5)
-
-/* NOTE: We can't vectorize MASK_GATHER_LOAD4 (float, int, int, 5) because we
- can't prove that the offsets used for the gather load won't overflow. */
-
-MASK_GATHER_LOAD5 (double, long, long)
-MASK_GATHER_LOAD5 (float, int, int)
-
-/* Widened forms. */
-MASK_GATHER_LOAD1 (double, long, int, 5)
-MASK_GATHER_LOAD1 (double, long, int, 8)
-MASK_GATHER_LOAD1 (double, long, short, 5)
-MASK_GATHER_LOAD1 (double, long, short, 8)
-
-MASK_GATHER_LOAD1 (float, int, short, 5)
-MASK_GATHER_LOAD1 (float, int, short, 8)
-
-MASK_GATHER_LOAD2 (double, long, int)
-MASK_GATHER_LOAD2 (float, int, short)
-
-MASK_GATHER_LOAD4 (double, long, int, 5)
-MASK_GATHER_LOAD4 (float, int, short, 5)
-
-MASK_GATHER_LOAD5 (double, long, int)
-
-/* Loads including masks. */
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d\\\]" 34 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 20 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\]" 6 } } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, CMP_TYPE) \
+ void \
+ f_##DATA_TYPE##_##CMP_TYPE \
+ (DATA_TYPE *restrict dest, DATA_TYPE *restrict *restrict src, \
+ CMP_TYPE *cmp1, CMP_TYPE *cmp2, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ if (cmp1[i] == cmp2[i]) \
+ dest[i] += *src[i]; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, int64_t) \
+ T (DATA_TYPE, uint64_t) \
+ T (DATA_TYPE, double)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[z[0-9]+\.d\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, x[0-9]+, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5_run.c
deleted file mode 100644
index 445c47f23ac..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_5_run.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include <unistd.h>
-
-extern void abort (void);
-extern void *memset(void *, int, size_t);
-
-#include "sve_mask_gather_load_5.c"
-
-#define NUM_DST_ELEMS 13
-#define NUM_SRC_ELEMS(STRIDE) (NUM_DST_ELEMS * STRIDE)
-
-#define MASKED_VALUE 3
-
-#define TEST_MASK_GATHER_LOAD_COMMON1(FUN,OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- MASKTYPE masks[NUM_SRC_ELEMS (STRIDE)];\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS (STRIDE)) * sizeof (OBJTYPE));\
- memset (masks, 0, (NUM_SRC_ELEMS (STRIDE)) * sizeof (MASKTYPE));\
- real_dst[0] = 0;\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- {\
- src[i * STRIDE] = i;\
- dst[i] = MASKED_VALUE;\
- masks[i * STRIDE] = i & 0x1;\
- }\
- FUN##OBJTYPE##STRIDETYPE##STRIDE \
- (dst, src, masks, NUM_DST_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- if (dst[i] != (masks[i * STRIDE] ? i : MASKED_VALUE))\
- abort ();\
-}
-
-#define TEST_MASK_GATHER_LOAD_COMMON2(FUN,OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- MASKTYPE masks[NUM_SRC_ELEMS (STRIDE)];\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS (STRIDE)) * sizeof (OBJTYPE));\
- memset (masks, 0, (NUM_SRC_ELEMS (STRIDE)) * sizeof (MASKTYPE));\
- real_dst[0] = 0;\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- {\
- src[i * STRIDE] = i;\
- dst[i] = MASKED_VALUE;\
- masks[i * STRIDE] = i & 0x1;\
- }\
- FUN##OBJTYPE##STRIDETYPE \
- (dst, src, masks, STRIDE, NUM_DST_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- if (dst[i] != (masks[i * STRIDE] ? i : MASKED_VALUE))\
- abort ();\
-}
-
-#define TEST_MASK_GATHER_LOAD1(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_GATHER_LOAD_COMMON1 (mgather_load1, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-#define TEST_MASK_GATHER_LOAD2(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_GATHER_LOAD_COMMON2 (mgather_load2, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-#define TEST_MASK_GATHER_LOAD3(OBJTYPE,MASKTYPE,STRIDETYPE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS (5)]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst1[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst2[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst3[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst4[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst5[1 + NUM_DST_ELEMS]\
- __attribute__((aligned (32)));\
- MASKTYPE masks[NUM_SRC_ELEMS (5)];\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS (5)) * sizeof (OBJTYPE));\
- memset (masks, 0, (NUM_SRC_ELEMS (5)) * sizeof (MASKTYPE));\
- real_dst1[0] = real_dst2[0] = real_dst3[0] = real_dst4[0] = real_dst5[0] = 0;\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst1 = &real_dst1[1];\
- OBJTYPE *dst2 = &real_dst2[1];\
- OBJTYPE *dst3 = &real_dst3[1];\
- OBJTYPE *dst4 = &real_dst4[1];\
- OBJTYPE *dst5 = &real_dst5[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS (5); i++)\
- src[i] = i;\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- {\
- dst1[i] = MASKED_VALUE;\
- dst2[i] = MASKED_VALUE;\
- dst3[i] = MASKED_VALUE;\
- dst4[i] = MASKED_VALUE;\
- dst5[i] = MASKED_VALUE;\
- masks[i * 5] = i & 0x1;\
- }\
- mgather_load3s5##OBJTYPE##STRIDETYPE \
- (dst1, dst2, dst3, dst4, dst5, src, masks, NUM_DST_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS; i++)\
- {\
- STRIDETYPE base = i * 5;\
- if (dst1[i] != (masks[base] ? base : MASKED_VALUE))\
- abort ();\
- if (dst2[i] != (masks[base] ? (base + 1) : MASKED_VALUE))\
- abort ();\
- if (dst3[i] != (masks[base] ? (base + 2) : MASKED_VALUE))\
- abort ();\
- if (dst4[i] != (masks[base] ? (base + 3) : MASKED_VALUE))\
- abort ();\
- if (dst5[i] != (masks[base] ? (base + 4) : MASKED_VALUE))\
- abort ();\
- }\
-}
-
-#define TEST_MASK_GATHER_LOAD4(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_GATHER_LOAD_COMMON1 (mgather_load4, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-#define TEST_MASK_GATHER_LOAD5(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_GATHER_LOAD_COMMON2 (mgather_load5, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-int main ()
-{
- TEST_MASK_GATHER_LOAD1 (double, long, long, 5);
- TEST_MASK_GATHER_LOAD1 (double, long, long, 8);
- TEST_MASK_GATHER_LOAD1 (double, long, long, 21);
-
- TEST_MASK_GATHER_LOAD1 (float, int, int, 5);
- TEST_MASK_GATHER_LOAD1 (float, int, int, 8);
- TEST_MASK_GATHER_LOAD1 (float, int, int, 21);
-
- TEST_MASK_GATHER_LOAD2 (double, long, long, 5);
- TEST_MASK_GATHER_LOAD2 (double, long, long, 8);
- TEST_MASK_GATHER_LOAD2 (double, long, long, 21);
-
- TEST_MASK_GATHER_LOAD3 (double, long, long);
- TEST_MASK_GATHER_LOAD3 (float, int, int);
-
- TEST_MASK_GATHER_LOAD4 (double, long, long, 5);
-
- TEST_MASK_GATHER_LOAD5 (double, long, long, 5);
- TEST_MASK_GATHER_LOAD5 (float, int, int, 5);
-
- /* Widened forms. */
- TEST_MASK_GATHER_LOAD1 (double, long, int, 5)
- TEST_MASK_GATHER_LOAD1 (double, long, int, 8)
- TEST_MASK_GATHER_LOAD1 (double, long, short, 5)
- TEST_MASK_GATHER_LOAD1 (double, long, short, 8)
-
- TEST_MASK_GATHER_LOAD1 (float, int, short, 5)
- TEST_MASK_GATHER_LOAD1 (float, int, short, 8)
-
- TEST_MASK_GATHER_LOAD2 (double, long, int, 5);
- TEST_MASK_GATHER_LOAD2 (double, long, int, 8);
- TEST_MASK_GATHER_LOAD2 (double, long, int, 21);
-
- TEST_MASK_GATHER_LOAD4 (double, long, int, 5);
- TEST_MASK_GATHER_LOAD4 (float, int, short, 5);
-
- TEST_MASK_GATHER_LOAD5 (double, long, int, 5);
-
- return 0;
-}
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_6.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_6.c
new file mode 100644
index 00000000000..64eb0c46278
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_6.c
@@ -0,0 +1,38 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, CMP_TYPE, INDEX_TYPE) \
+ void \
+ f_##DATA_TYPE##_##CMP_TYPE##_##INDEX_TYPE \
+ (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ CMP_TYPE *cmp1, CMP_TYPE *cmp2, INDEX_TYPE *indices, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ if (cmp1[i] == cmp2[i]) \
+ dest[i] += src[indices[i]]; \
+ }
+
+#define TEST32(T, DATA_TYPE) \
+ T (DATA_TYPE, int64_t, int32_t) \
+ T (DATA_TYPE, uint64_t, int32_t) \
+ T (DATA_TYPE, double, int32_t) \
+ T (DATA_TYPE, int64_t, uint32_t) \
+ T (DATA_TYPE, uint64_t, uint32_t) \
+ T (DATA_TYPE, double, uint32_t)
+
+#define TEST_ALL(T) \
+ TEST32 (T, int32_t) \
+ TEST32 (T, uint32_t) \
+ TEST32 (T, float)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 72 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 24 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, sxtw 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, uxtw 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, x[0-9]+, lsl 2\]\n} 18 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_7.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_7.c
new file mode 100644
index 00000000000..4a8b38e13af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_gather_load_7.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, CMP_TYPE, INDEX_TYPE) \
+ void \
+ f_##DATA_TYPE##_##CMP_TYPE##_##INDEX_TYPE \
+ (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ CMP_TYPE *cmp1, CMP_TYPE *cmp2, INDEX_TYPE *indices, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ if (cmp1[i] == cmp2[i]) \
+ dest[i] += src[indices[i]]; \
+ }
+
+#define TEST32(T, DATA_TYPE) \
+ T (DATA_TYPE, int16_t, int32_t) \
+ T (DATA_TYPE, uint16_t, int32_t) \
+ T (DATA_TYPE, _Float16, int32_t) \
+ T (DATA_TYPE, int16_t, uint32_t) \
+ T (DATA_TYPE, uint16_t, uint32_t) \
+ T (DATA_TYPE, _Float16, uint32_t)
+
+#define TEST64(T, DATA_TYPE) \
+ T (DATA_TYPE, int32_t, int64_t) \
+ T (DATA_TYPE, uint32_t, int64_t) \
+ T (DATA_TYPE, float, int64_t) \
+ T (DATA_TYPE, int32_t, uint64_t) \
+ T (DATA_TYPE, uint32_t, uint64_t) \
+ T (DATA_TYPE, float, uint64_t)
+
+#define TEST_ALL(T) \
+ TEST32 (T, int32_t) \
+ TEST32 (T, uint32_t) \
+ TEST32 (T, float) \
+ TEST64 (T, int64_t) \
+ TEST64 (T, uint64_t) \
+ TEST64 (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.h, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 1\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.h, p[0-7]/z, z[0-9]+\.h, z[0-9]+\.h\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.h, p[0-7]/z, z[0-9]+\.h, z[0-9]+\.h\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, sxtw 2\]\n} 18 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+\.s, uxtw 2\]\n} 18 } } */
+
+/* Also used for the TEST32 indices. */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 72 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+\.d, lsl 3\]\n} 36 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1.c
index a7f2995a6cd..562bdb720de 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1.c
@@ -1,124 +1,51 @@
/* { dg-do assemble } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-
-#define MASK_SCATTER_STORE1(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-void mscatter_store1##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- if (masks[i * STRIDE])\
- dst[i * STRIDE] = src[i];\
-}
-
-#define MASK_SCATTER_STORE2(OBJTYPE,MASKTYPE,STRIDETYPE)\
-void mscatter_store2##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- if (masks[i * stride])\
- dst[i * stride] = src[i];\
-}
-
-#define MASK_SCATTER_STORE3(OBJTYPE,MASKTYPE,STRIDETYPE)\
-void mscatter_store3s5##OBJTYPE##STRIDETYPE\
- (OBJTYPE * restrict dst, OBJTYPE * restrict s1, OBJTYPE * restrict s2,\
- OBJTYPE * restrict s3, OBJTYPE * restrict s4, OBJTYPE * restrict s5,\
- MASKTYPE * restrict masks, STRIDETYPE count)\
-{\
- const STRIDETYPE STRIDE = 5;\
- for (STRIDETYPE i=0; i<count; i++)\
- if (masks[i * STRIDE])\
- {\
- dst[0 + (i * STRIDE)] = s1[i];\
- dst[1 + (i * STRIDE)] = s2[i];\
- dst[2 + (i * STRIDE)] = s3[i];\
- dst[3 + (i * STRIDE)] = s4[i];\
- dst[4 + (i * STRIDE)] = s5[i];\
- }\
-}
-
-#define MASK_SCATTER_STORE4(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-void mscatter_store4##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- if (masks[i * STRIDE])\
- *dst = *src;\
- dst += STRIDE;\
- src += 1;\
- }\
-}
-
-#define MASK_SCATTER_STORE5(OBJTYPE,MASKTYPE,STRIDETYPE)\
-void mscatter_store5##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- MASKTYPE * restrict masks,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- if (masks[i * stride])\
- *dst = *src;\
- dst += stride;\
- src += 1;\
- }\
-}
-
-MASK_SCATTER_STORE1 (double, long, long, 5)
-MASK_SCATTER_STORE1 (double, long, long, 8)
-MASK_SCATTER_STORE1 (double, long, long, 21)
-MASK_SCATTER_STORE1 (double, long, long, 1009)
-
-MASK_SCATTER_STORE1 (float, int, int, 5)
-
-MASK_SCATTER_STORE1 (float, int, int, 8)
-MASK_SCATTER_STORE1 (float, int, int, 21)
-MASK_SCATTER_STORE1 (float, int, int, 1009)
-
-MASK_SCATTER_STORE2 (double, long, long)
-MASK_SCATTER_STORE2 (float, int, int)
-
-MASK_SCATTER_STORE3 (double, long, long)
-MASK_SCATTER_STORE3 (float, int, int)
-
-MASK_SCATTER_STORE4 (double, long, long, 5)
-/* NOTE: We can't vectorize MASK_SCATTER_STORE4 (float, int, int, 3) because we
- can't prove that the offsets used for the gather load won't overflow. */
-
-MASK_SCATTER_STORE5 (double, long, long)
-MASK_SCATTER_STORE5 (float, int, int)
-
-/* Widened forms. */
-MASK_SCATTER_STORE1 (double, long, int, 5)
-MASK_SCATTER_STORE1 (double, long, int, 8)
-MASK_SCATTER_STORE1 (double, long, short, 5)
-MASK_SCATTER_STORE1 (double, long, short, 8)
-
-MASK_SCATTER_STORE1 (float, int, short, 5)
-MASK_SCATTER_STORE1 (float, int, short, 8)
-
-MASK_SCATTER_STORE2 (double, long, int)
-MASK_SCATTER_STORE2 (float, int, short)
-
-MASK_SCATTER_STORE4 (double, long, int, 5)
-MASK_SCATTER_STORE4 (float, int, short, 5)
-
-MASK_SCATTER_STORE5 (double, long, int)
-
-/* Gather loads are for the masks. */
-/* { dg-final { scan-assembler-times "ld1d\\tz\[0-9\]+.d, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.d\\\]" 15 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 8 } } */
-/* { dg-final { scan-assembler-times "ld1w\\tz\[0-9\]+.s, p\[0-9\]+/z, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\]" 3 } } */
-
-/* { dg-final { scan-assembler-times "st1d\\tz\[0-9\]+.d, p\[0-9\]+, \\\[x\[0-9\]+, z\[0-9\]+.d\\\]" 19 } } */
-/* { dg-final { scan-assembler-times "st1w\\tz\[0-9\]+.s, p\[0-9\]+, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 12 } } */
-/* { dg-final { scan-assembler-times "st1w\\tz\[0-9\]+.s, p\[0-9\]+, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\]" 3 } } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, CMP_TYPE, BITS) \
+ void \
+ f_##DATA_TYPE##_##CMP_TYPE \
+ (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ CMP_TYPE *restrict cmp1, CMP_TYPE *restrict cmp2, \
+ INDEX##BITS *restrict indices, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ if (cmp1[i] == cmp2[i]) \
+ dest[indices[i]] = src[i] + 1; \
+ }
+
+#define TEST32(T, DATA_TYPE) \
+ T (DATA_TYPE, int32_t, 32) \
+ T (DATA_TYPE, uint32_t, 32) \
+ T (DATA_TYPE, float, 32)
+
+#define TEST64(T, DATA_TYPE) \
+ T (DATA_TYPE, int64_t, 64) \
+ T (DATA_TYPE, uint64_t, 64) \
+ T (DATA_TYPE, double, 64)
+
+#define TEST_ALL(T) \
+ TEST32 (T, int32_t) \
+ TEST32 (T, uint32_t) \
+ TEST32 (T, float) \
+ TEST64 (T, int64_t) \
+ TEST64 (T, uint64_t) \
+ TEST64 (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+\.s, sxtw 2\]\n} 9 } } */
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+\.d, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1_run.c
deleted file mode 100644
index 3222d420763..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_1_run.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include <unistd.h>
-#include <stdio.h>
-
-extern void abort (void);
-extern void *memset(void *, int, size_t);
-
-#include "sve_mask_scatter_store_1.c"
-
-#define NUM_SRC_ELEMS 13
-#define NUM_DST_ELEMS(STRIDE) (NUM_SRC_ELEMS * STRIDE)
-
-#define MASKED_VALUE 3
-
-#define TEST_MASK_SCATTER_STORE_COMMON1(FUN,OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- MASKTYPE masks[NUM_DST_ELEMS (STRIDE)];\
- memset (masks, 0, (NUM_DST_ELEMS (STRIDE)) * sizeof (MASKTYPE));\
- real_src[0] = 0;\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- {\
- src[i] = i;\
- masks[i * STRIDE] = i & 0x1;\
- }\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS (STRIDE); i++)\
- dst[i] = MASKED_VALUE;\
- FUN##OBJTYPE##STRIDETYPE##STRIDE (dst, src, masks, NUM_SRC_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- if (dst[i * STRIDE] != (masks[i * STRIDE] ? i : MASKED_VALUE))\
- abort ();\
-}
-
-#define TEST_MASK_SCATTER_STORE_COMMON2(FUN,OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- MASKTYPE masks[NUM_DST_ELEMS (STRIDE)];\
- memset (masks, 0, (NUM_DST_ELEMS (STRIDE)) * sizeof (MASKTYPE));\
- real_src[0] = 0;\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- {\
- src[i] = i;\
- masks[i * STRIDE] = i & 0x1;\
- }\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS (STRIDE); i++)\
- dst[i] = MASKED_VALUE;\
- FUN##OBJTYPE##STRIDETYPE (dst, src, masks, STRIDE, NUM_SRC_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- if (dst[i * STRIDE] != (masks[i * STRIDE] ? i : MASKED_VALUE))\
- abort ();\
-}
-
-#define TEST_MASK_SCATTER_STORE1(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_SCATTER_STORE_COMMON1 (mscatter_store1, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-#define TEST_MASK_SCATTER_STORE2(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_SCATTER_STORE_COMMON2 (mscatter_store2, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-#define TEST_MASK_SCATTER_STORE3(OBJTYPE,MASKTYPE,STRIDETYPE)\
-{\
- OBJTYPE real_src1[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src2[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src3[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src4[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src5[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS (5)]\
- __attribute__((aligned (32)));\
- MASKTYPE masks[NUM_DST_ELEMS (5)];\
- memset (masks, 0, (NUM_DST_ELEMS (5)) * sizeof (MASKTYPE));\
- real_src1[0] = real_src2[0] = real_src3[0] = real_src4[0] = real_src5[0] = 0;\
- OBJTYPE *src1 = &real_src1[1];\
- OBJTYPE *src2 = &real_src2[1];\
- OBJTYPE *src3 = &real_src3[1];\
- OBJTYPE *src4 = &real_src4[1];\
- OBJTYPE *src5 = &real_src5[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- {\
- STRIDETYPE base = i * 5;\
- src1[i] = base;\
- src2[i] = base + 1;\
- src3[i] = base + 2;\
- src4[i] = base + 3;\
- src5[i] = base + 4;\
- masks[i * 5] = i & 0x1;\
- }\
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS (5); i++)\
- dst[i] = MASKED_VALUE;\
- mscatter_store3s5##OBJTYPE##STRIDETYPE \
- (dst, src1, src2, src3, src4, src5, masks, NUM_SRC_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- {\
- STRIDETYPE base = i * 5;\
- if (dst[base] != (masks[i * 5] ? base : MASKED_VALUE))\
- abort ();\
- if (dst[base + 1] != (masks[i * 5] ? (base + 1) : MASKED_VALUE))\
- abort ();\
- if (dst[base + 2] != (masks[i * 5] ? (base + 2) : MASKED_VALUE))\
- abort ();\
- if (dst[base + 3] != (masks[i * 5] ? (base + 3) : MASKED_VALUE))\
- abort ();\
- if (dst[base + 4] != (masks[i * 5] ? (base + 4) : MASKED_VALUE))\
- abort ();\
- }\
-}
-
-#define TEST_MASK_SCATTER_STORE4(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_SCATTER_STORE_COMMON1 (mscatter_store4, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-#define TEST_MASK_SCATTER_STORE5(OBJTYPE,MASKTYPE,STRIDETYPE,STRIDE) \
- TEST_MASK_SCATTER_STORE_COMMON2 (mscatter_store5, OBJTYPE, MASKTYPE, \
- STRIDETYPE, STRIDE)
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST_MASK_SCATTER_STORE1 (double, long, long, 5);
-
- TEST_MASK_SCATTER_STORE1 (double, long, long, 8);
- TEST_MASK_SCATTER_STORE1 (double, long, long, 21);
-
- TEST_MASK_SCATTER_STORE1 (float, int, int, 5);
- TEST_MASK_SCATTER_STORE1 (float, int, int, 8);
- TEST_MASK_SCATTER_STORE1 (float, int, int, 21);
-
- TEST_MASK_SCATTER_STORE2 (double, long, long, 5);
- TEST_MASK_SCATTER_STORE2 (double, long, long, 8);
- TEST_MASK_SCATTER_STORE2 (double, long, long, 21);
-
- TEST_MASK_SCATTER_STORE2 (float, int, int, 5);
- TEST_MASK_SCATTER_STORE2 (float, int, int, 8);
- TEST_MASK_SCATTER_STORE2 (float, int, int, 21);
-
- TEST_MASK_SCATTER_STORE3 (double, long, long);
- TEST_MASK_SCATTER_STORE3 (float, int, int);
-
- TEST_MASK_SCATTER_STORE4 (double, long, long, 5);
-
- TEST_MASK_SCATTER_STORE5 (double, long, long, 5);
- TEST_MASK_SCATTER_STORE5 (float, int, int, 5);
-
- /* Widened forms. */
- TEST_MASK_SCATTER_STORE1 (double, long, int, 5)
- TEST_MASK_SCATTER_STORE1 (double, long, int, 8)
- TEST_MASK_SCATTER_STORE1 (double, long, short, 5)
- TEST_MASK_SCATTER_STORE1 (double, long, short, 8)
-
- TEST_MASK_SCATTER_STORE1 (float, int, short, 5)
- TEST_MASK_SCATTER_STORE1 (float, int, short, 8)
-
- TEST_MASK_SCATTER_STORE2 (double, long, int, 5);
- TEST_MASK_SCATTER_STORE2 (double, long, int, 8);
- TEST_MASK_SCATTER_STORE2 (double, long, int, 21);
-
- TEST_MASK_SCATTER_STORE2 (float, int, short, 5);
- TEST_MASK_SCATTER_STORE2 (float, int, short, 8);
- TEST_MASK_SCATTER_STORE2 (float, int, short, 21);
-
- TEST_MASK_SCATTER_STORE4 (double, long, int, 5);
- TEST_MASK_SCATTER_STORE4 (float, int, short, 5);
-
- TEST_MASK_SCATTER_STORE5 (double, long, int, 5);
-
- return 0;
-}
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_2.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_2.c
new file mode 100644
index 00000000000..c0f291673dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_scatter_store_2.c
@@ -0,0 +1,17 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve --save-temps" } */
+
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
+
+#include "sve_mask_scatter_store_1.c"
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.s, p[0-7]/z, z[0-9]+\.s, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+\.s, uxtw 2\]\n} 9 } } */
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 36 } } */
+/* { dg-final { scan-assembler-times {\tcmpeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tfcmeq\tp[0-7]\.d, p[0-7]/z, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+\.d, lsl 3\]\n} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1.c
index 4a6247db978..9eff539c1d8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_2 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -28,6 +28,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -44,10 +45,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
Out 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for half float)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tld2h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tld2h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1_run.c
index 626b78c29e1..72086145290 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_1_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#include "sve_mask_struct_load_1.c"
#define N 100
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
{ \
@@ -17,6 +15,7 @@ volatile int x;
{ \
out[i] = i * 7 / 2; \
mask[i] = i % 5 <= i % 3; \
+ asm volatile ("" ::: "memory"); \
} \
for (int i = 0; i < N * 2; ++i) \
in[i] = i * 9 / 2; \
@@ -27,11 +26,11 @@ volatile int x;
OUTTYPE if_false = i * 7 / 2; \
if (out[i] != (mask[i] ? if_true : if_false)) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2.c
index 0004e673d49..fe69b96e35a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_3 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -30,6 +30,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -46,10 +47,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
Out 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for _Float16)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tld3h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2_run.c
index 86219b4a191..a9784676efb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_2_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#include "sve_mask_struct_load_2.c"
#define N 100
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
{ \
@@ -17,6 +15,7 @@ volatile int x;
{ \
out[i] = i * 7 / 2; \
mask[i] = i % 5 <= i % 3; \
+ asm volatile ("" ::: "memory"); \
} \
for (int i = 0; i < N * 3; ++i) \
in[i] = i * 9 / 2; \
@@ -29,11 +28,11 @@ volatile int x;
OUTTYPE if_false = i * 7 / 2; \
if (out[i] != (mask[i] ? if_true : if_false)) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3.c
index 5f784e7dd36..b8bdd51459f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_4 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -31,6 +31,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -47,10 +48,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
Out 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for half float)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tld4h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3_run.c
index 51bd38e2890..f168d656af9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_3_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#include "sve_mask_struct_load_3.c"
#define N 100
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
{ \
@@ -17,6 +15,7 @@ volatile int x;
{ \
out[i] = i * 7 / 2; \
mask[i] = i % 5 <= i % 3; \
+ asm volatile ("" ::: "memory"); \
} \
for (int i = 0; i < N * 4; ++i) \
in[i] = i * 9 / 2; \
@@ -30,11 +29,11 @@ volatile int x;
OUTTYPE if_false = i * 7 / 2; \
if (out[i] != (mask[i] ? if_true : if_false)) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_4.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_4.c
index 6608558d3ff..2b319229d1f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_4.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void \
+ void __attribute__ ((noinline, noclone)) \
NAME##_3 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -28,6 +28,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -44,10 +45,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
Out 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for half float)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tld3h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_5.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_5.c
index 003cf650d7d..a81c647004f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_5.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void \
+ void __attribute__ ((noinline, noclone)) \
NAME##_4 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -28,6 +28,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -44,10 +45,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
Out 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for half float)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tld4h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_6.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_6.c
index a6161f31536..b6e3f55d7e8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_6.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void \
+ void __attribute__ ((noinline, noclone)) \
NAME##_2 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -28,6 +28,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_7.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_7.c
index 75a3e43f267..da97e2795a9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_7.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void \
+ void __attribute__ ((noinline, noclone)) \
NAME##_3 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -28,6 +28,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_8.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_8.c
index e87ad0bc074..c3884b0b074 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_load_8.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void \
+ void __attribute__ ((noinline, noclone)) \
NAME##_4 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -28,6 +28,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1.c
index 966968d4b91..9af479f478d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1.c
@@ -2,16 +2,19 @@
/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_2 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
- MASKTYPE *__restrict cond, int n) \
+ MASKTYPE *__restrict cond, INTYPE bias, int n) \
{ \
for (int i = 0; i < n; ++i) \
- if (cond[i]) \
- { \
- dest[i * 2] = src[i]; \
- dest[i * 2 + 1] = src[i]; \
- } \
+ { \
+ INTYPE value = src[i] + bias; \
+ if (cond[i]) \
+ { \
+ dest[i * 2] = value; \
+ dest[i * 2 + 1] = value; \
+ } \
+ } \
}
#define TEST2(NAME, OUTTYPE, INTYPE) \
@@ -31,6 +34,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -47,10 +51,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
In 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for _Float16)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tst2h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tst2h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1_run.c
index fd48a4c96f9..f472e1da01d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_1_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#include "sve_mask_struct_store_1.c"
#define N 100
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
{ \
@@ -17,21 +15,22 @@ volatile int x;
{ \
in[i] = i * 7 / 2; \
mask[i] = i % 5 <= i % 3; \
+ asm volatile ("" ::: "memory"); \
} \
for (int i = 0; i < N * 2; ++i) \
out[i] = i * 9 / 2; \
- NAME##_2 (out, in, mask, N); \
+ NAME##_2 (out, in, mask, 17, N); \
for (int i = 0; i < N * 2; ++i) \
{ \
- OUTTYPE if_true = in[i / 2]; \
+ OUTTYPE if_true = (INTYPE) (in[i / 2] + 17); \
OUTTYPE if_false = i * 9 / 2; \
if (out[i] != (mask[i / 2] ? if_true : if_false)) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2.c
index 5359c6a457a..b817a095abe 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2.c
@@ -2,17 +2,20 @@
/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_3 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
- MASKTYPE *__restrict cond, int n) \
+ MASKTYPE *__restrict cond, INTYPE bias, int n) \
{ \
for (int i = 0; i < n; ++i) \
- if (cond[i]) \
- { \
- dest[i * 3] = src[i]; \
- dest[i * 3 + 1] = src[i]; \
- dest[i * 3 + 2] = src[i]; \
- } \
+ { \
+ INTYPE value = src[i] + bias; \
+ if (cond[i]) \
+ { \
+ dest[i * 3] = value; \
+ dest[i * 3 + 1] = value; \
+ dest[i * 3 + 2] = value; \
+ } \
+ } \
}
#define TEST2(NAME, OUTTYPE, INTYPE) \
@@ -32,6 +35,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -48,10 +52,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
In 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for _Float16)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tst3h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tst3h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2_run.c
index f8845ebd7ec..c1771d52298 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_2_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#include "sve_mask_struct_store_2.c"
#define N 100
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
{ \
@@ -17,21 +15,22 @@ volatile int x;
{ \
in[i] = i * 7 / 2; \
mask[i] = i % 5 <= i % 3; \
+ asm volatile ("" ::: "memory"); \
} \
for (int i = 0; i < N * 3; ++i) \
out[i] = i * 9 / 2; \
- NAME##_3 (out, in, mask, N); \
+ NAME##_3 (out, in, mask, 11, N); \
for (int i = 0; i < N * 3; ++i) \
{ \
- OUTTYPE if_true = in[i / 3]; \
+ OUTTYPE if_true = (INTYPE) (in[i / 3] + 11); \
OUTTYPE if_false = i * 9 / 2; \
if (out[i] != (mask[i / 3] ? if_true : if_false)) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3.c
index cc614847e7e..d604bd77efe 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3.c
@@ -1,19 +1,22 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-vect-cost-model -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_4 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
- MASKTYPE *__restrict cond, int n) \
+ MASKTYPE *__restrict cond, INTYPE bias, int n) \
{ \
for (int i = 0; i < n; ++i) \
- if (cond[i]) \
- { \
- dest[i * 4] = src[i]; \
- dest[i * 4 + 1] = src[i]; \
- dest[i * 4 + 2] = src[i]; \
- dest[i * 4 + 3] = src[i]; \
- } \
+ { \
+ INTYPE value = src[i] + bias; \
+ if (cond[i]) \
+ { \
+ dest[i * 4] = value; \
+ dest[i * 4 + 1] = value; \
+ dest[i * 4 + 2] = value; \
+ dest[i * 4 + 3] = value; \
+ } \
+ } \
}
#define TEST2(NAME, OUTTYPE, INTYPE) \
@@ -33,6 +36,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
@@ -49,10 +53,10 @@ TEST (test)
/* Mask | 8 16 32 64
-------+------------
In 8 | 2 2 2 2
- 16 | 2 1 1 1
+ 16 | 2 1 1 1 x2 (for half float)
32 | 2 1 1 1
64 | 2 1 1 1. */
-/* { dg-final { scan-assembler-times {\tst4h\t.z[0-9]} 23 } } */
+/* { dg-final { scan-assembler-times {\tst4h\t.z[0-9]} 28 } } */
/* Mask | 8 16 32 64
-------+------------
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3_run.c
index f845818fa4d..cbac3da9db2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_3_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-tree-dce -ffast-math -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#include "sve_mask_struct_store_3.c"
#define N 100
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
{ \
@@ -17,21 +15,22 @@ volatile int x;
{ \
in[i] = i * 7 / 2; \
mask[i] = i % 5 <= i % 3; \
+ asm volatile ("" ::: "memory"); \
} \
for (int i = 0; i < N * 4; ++i) \
out[i] = i * 9 / 2; \
- NAME##_4 (out, in, mask, N); \
+ NAME##_4 (out, in, mask, 42, N); \
for (int i = 0; i < N * 4; ++i) \
{ \
- OUTTYPE if_true = in[i / 4]; \
+ OUTTYPE if_true = (INTYPE) (in[i / 4] + 42); \
OUTTYPE if_false = i * 9 / 2; \
if (out[i] != (mask[i / 4] ? if_true : if_false)) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_4.c b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_4.c
index ac2df82c539..9b4e75554f9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mask_struct_store_4.c
@@ -2,7 +2,7 @@
/* { dg-options "-O2 -ftree-vectorize -ffast-math -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, OUTTYPE, INTYPE, MASKTYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME##_2 (OUTTYPE *__restrict dest, INTYPE *__restrict src, \
MASKTYPE *__restrict cond, int n) \
{ \
@@ -32,6 +32,7 @@
TEST1 (NAME##_i16, unsigned short) \
TEST1 (NAME##_i32, int) \
TEST1 (NAME##_i64, unsigned long) \
+ TEST2 (NAME##_f16_f16, _Float16, _Float16) \
TEST2 (NAME##_f32_f32, float, float) \
TEST2 (NAME##_f64_f64, double, double)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mla_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mla_1.c
index a4d705e38ba..a2e671de3d3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mla_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mla_1.c
@@ -3,10 +3,10 @@
#include <stdint.h>
-typedef int8_t v32qi __attribute__((vector_size(32)));
-typedef int16_t v16hi __attribute__((vector_size(32)));
-typedef int32_t v8si __attribute__((vector_size(32)));
-typedef int64_t v4di __attribute__((vector_size(32)));
+typedef int8_t vnx16qi __attribute__((vector_size(32)));
+typedef int16_t vnx8hi __attribute__((vector_size(32)));
+typedef int32_t vnx4si __attribute__((vector_size(32)));
+typedef int64_t vnx2di __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -23,10 +23,10 @@ void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v32qi)
-DO_OP (v16hi)
-DO_OP (v8si)
-DO_OP (v4di)
+DO_OP (vnx16qi)
+DO_OP (vnx8hi)
+DO_OP (vnx4si)
+DO_OP (vnx2di)
/* { dg-final { scan-assembler-times {\tmla\tz0\.b, p[0-7]/m, z2\.b, z4\.b\n} 1 } } */
/* { dg-final { scan-assembler-times {\tmla\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mls_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mls_1.c
index b7cc1dba087..fb4454a1426 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mls_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mls_1.c
@@ -3,10 +3,10 @@
#include <stdint.h>
-typedef int8_t v32qi __attribute__((vector_size(32)));
-typedef int16_t v16hi __attribute__((vector_size(32)));
-typedef int32_t v8si __attribute__((vector_size(32)));
-typedef int64_t v4di __attribute__((vector_size(32)));
+typedef int8_t vnx16qi __attribute__((vector_size(32)));
+typedef int16_t vnx8hi __attribute__((vector_size(32)));
+typedef int32_t vnx4si __attribute__((vector_size(32)));
+typedef int64_t vnx2di __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -23,10 +23,10 @@ void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v32qi)
-DO_OP (v16hi)
-DO_OP (v8si)
-DO_OP (v4di)
+DO_OP (vnx16qi)
+DO_OP (vnx8hi)
+DO_OP (vnx4si)
+DO_OP (vnx2di)
/* { dg-final { scan-assembler-times {\tmls\tz0\.b, p[0-7]/m, z2\.b, z4\.b\n} 1 } } */
/* { dg-final { scan-assembler-times {\tmls\tz0\.h, p[0-7]/m, z2\.h, z4\.h\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_mov_rr_1.c b/gcc/testsuite/gcc.target/aarch64/sve_mov_rr_1.c
index a38375af017..756263253c0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_mov_rr_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_mov_rr_1.c
@@ -3,9 +3,9 @@
void sve_copy_rr (void)
{
- typedef int v8si __attribute__((vector_size(32)));
- register v8si x asm ("z1");
- register v8si y asm ("z2");
+ typedef int vnx4si __attribute__((vector_size(32)));
+ register vnx4si x asm ("z1");
+ register vnx4si y asm ("z2");
asm volatile ("#foo" : "=w" (x));
y = x;
asm volatile ("#foo" :: "w" (y));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_msb_1.c b/gcc/testsuite/gcc.target/aarch64/sve_msb_1.c
index fc05837a920..38aab512376 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_msb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_msb_1.c
@@ -3,10 +3,10 @@
#include <stdint.h>
-typedef int8_t v32qi __attribute__((vector_size(32)));
-typedef int16_t v16hi __attribute__((vector_size(32)));
-typedef int32_t v8si __attribute__((vector_size(32)));
-typedef int64_t v4di __attribute__((vector_size(32)));
+typedef int8_t vnx16qi __attribute__((vector_size(32)));
+typedef int16_t vnx8hi __attribute__((vector_size(32)));
+typedef int32_t vnx4si __attribute__((vector_size(32)));
+typedef int64_t vnx2di __attribute__((vector_size(32)));
#define DO_OP(TYPE) \
void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
@@ -23,10 +23,10 @@ void vmla_##TYPE (TYPE *x, TYPE y, TYPE z) \
*x = dst; \
}
-DO_OP (v32qi)
-DO_OP (v16hi)
-DO_OP (v8si)
-DO_OP (v4di)
+DO_OP (vnx16qi)
+DO_OP (vnx8hi)
+DO_OP (vnx4si)
+DO_OP (vnx2di)
/* { dg-final { scan-assembler-times {\tmsb\tz0\.b, p[0-7]/m, z2\.b, z4\.b} 1 } } */
/* { dg-final { scan-assembler-times {\tmsb\tz0\.h, p[0-7]/m, z2\.h, z4\.h} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_nopeel_1.c b/gcc/testsuite/gcc.target/aarch64/sve_nopeel_1.c
index 8f50308ebd5..a87fdd2aed2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_nopeel_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_nopeel_1.c
@@ -1,36 +1,39 @@
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256" } */
-#define TEST(NAME, TYPE, ITYPE) \
+#include <stdint.h>
+
+#define TEST(NAME, TYPE) \
void \
- NAME##1 (TYPE *x, ITYPE n) \
+ NAME##1 (TYPE *x, int n) \
{ \
- for (ITYPE i = 0; i < n; ++i) \
+ for (int i = 0; i < n; ++i) \
x[i] += 1; \
} \
TYPE NAME##_array[1024]; \
void \
NAME##2 (void) \
{ \
- for (ITYPE i = 1; i < 200; ++i) \
+ for (int i = 1; i < 200; ++i) \
NAME##_array[i] += 1; \
}
-TEST (sc, signed char, unsigned char)
-TEST (uc, unsigned char, unsigned char)
-TEST (ss, signed short, unsigned short)
-TEST (us, unsigned short, signed short)
-TEST (si, signed int, signed int)
-TEST (ui, unsigned int, unsigned int)
-TEST (sl, signed long, unsigned long)
-TEST (ul, unsigned long, signed long)
-TEST (f, float, int)
-TEST (d, double, long)
+TEST (s8, int8_t)
+TEST (u8, uint8_t)
+TEST (s16, int16_t)
+TEST (u16, uint16_t)
+TEST (s32, int32_t)
+TEST (u32, uint32_t)
+TEST (s64, int64_t)
+TEST (u64, uint64_t)
+TEST (f16, _Float16)
+TEST (f32, float)
+TEST (f64, double)
/* No scalar memory accesses. */
/* { dg-final { scan-assembler-not {[wx][0-9]*, \[} } } */
/* 2 for each NAME##1 test, one in the header and one in the main loop
and 1 for each NAME##2 test, in the main loop only. */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b,} 6 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h,} 6 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h,} 9 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s,} 9 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d,} 9 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1.c
index a39f8241f46..23b1b2a51e5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1.c
@@ -9,7 +9,7 @@
int x[N] __attribute__((aligned(32)));
-void __attribute__((weak))
+void __attribute__((noinline, noclone))
foo (void)
{
unsigned int v = 0;
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1_run.c
index 1ebaeea2bb9..6ed98ec075c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_1_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
/* { dg-options "-O3 -march=armv8-a+sve -mtune=thunderx" } */
-/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 -mtune=thunderx" { target aarch64_sve256_hw } } */
+/* { dg-options "-O3 -march=armv8-a+sve -mtune=thunderx -msve-vector-bits=256" { target aarch64_sve256_hw } } */
#include "sve_peel_ind_1.c"
-volatile int y;
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
foo ();
@@ -14,7 +12,7 @@ main (void)
{
if (x[i] != (i < START || i >= END ? 0 : (i - START) * 5))
__builtin_abort ();
- y++;
+ asm volatile ("" ::: "memory");
}
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2.c
index 9ef8c7f85e4..af1a5aaa0ec 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2.c
@@ -9,7 +9,7 @@
int x[N] __attribute__((aligned(32)));
-void __attribute__((weak))
+void __attribute__((noinline, noclone))
foo (void)
{
for (unsigned int i = START; i < END; ++i)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2_run.c
index b3e56bbbb7c..5565c32a888 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_2_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
/* { dg-options "-O3 -march=armv8-a+sve -mtune=thunderx" } */
-/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 -mtune=thunderx" { target aarch64_sve256_hw } } */
+/* { dg-options "-O3 -march=armv8-a+sve -mtune=thunderx -msve-vector-bits=256" { target aarch64_sve256_hw } } */
#include "sve_peel_ind_2.c"
-volatile int y;
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
foo ();
@@ -14,7 +12,7 @@ main (void)
{
if (x[i] != (i < START || i >= END ? 0 : i))
__builtin_abort ();
- y++;
+ asm volatile ("" ::: "memory");
}
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3.c
index 97a29f18361..a2602e781a1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3.c
@@ -9,7 +9,7 @@
int x[MAX_START][N] __attribute__((aligned(32)));
-void __attribute__((weak))
+void __attribute__((noinline, noclone))
foo (int start)
{
for (int i = start; i < start + COUNT; ++i)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3_run.c
index 9851c1cce64..ee8061a1163 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_3_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
/* { dg-options "-O3 -march=armv8-a+sve -mtune=thunderx" } */
-/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 -mtune=thunderx" { target aarch64_sve256_hw } } */
+/* { dg-options "-O3 -march=armv8-a+sve -mtune=thunderx -msve-vector-bits=256" { target aarch64_sve256_hw } } */
#include "sve_peel_ind_3.c"
-volatile int y;
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
for (int start = 0; start < MAX_START; ++start)
@@ -16,7 +14,7 @@ main (void)
{
if (x[start][i] != (i < start || i >= start + COUNT ? 0 : i))
__builtin_abort ();
- y++;
+ asm volatile ("" ::: "memory");
}
}
return 0;
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4.c
index e5c55877341..6ab089522fb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4.c
@@ -6,7 +6,7 @@
#define START 1
#define END 505
-void __attribute__((weak))
+void __attribute__((noinline, noclone))
foo (double *x)
{
double v = 10.0;
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4_run.c
index 60be4a038de..3764457ffcc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_peel_ind_4_run.c
@@ -1,17 +1,18 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-Ofast -march=armv8-a+sve -mtune=thunderx -fno-vect-cost-model" } */
-/* { dg-options "-Ofast -march=armv8-a+sve -msve-vector-bits=256 -mtune=thunderx -fno-vect-cost-model" { target aarch64_sve256_hw } } */
+/* { dg-options "-Ofast -march=armv8-a+sve -mtune=thunderx" } */
+/* { dg-options "-Ofast -march=armv8-a+sve -mtune=thunderx -mtune=thunderx" { target aarch64_sve256_hw } } */
#include "sve_peel_ind_4.c"
-volatile int y;
-
-int
+int __attribute__ ((optimize (1)))
main (void)
{
double x[END + 1];
for (int i = 0; i < END + 1; ++i)
- x[i] = i;
+ {
+ x[i] = i;
+ asm volatile ("" ::: "memory");
+ }
foo (x);
for (int i = 0; i < END + 1; ++i)
{
@@ -22,7 +23,7 @@ main (void)
expected = 10 + (i - START) * 5;
if (x[i] != expected)
__builtin_abort ();
- y++;
+ asm volatile ("" ::: "memory");
}
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.C b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.C
deleted file mode 100644
index 53e10bcea01..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.C
+++ /dev/null
@@ -1,48 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-std=c++11 -O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include <math.h>
-
-#define NUM_ELEMS(TYPE) (int)(5 * (256 / sizeof (TYPE)) + 3)
-
-#define DEF_REDUC_PLUS(TYPE)\
-TYPE reduc_plus_##TYPE (TYPE *__restrict__ a, TYPE *__restrict__ b)\
-{\
- TYPE r = 0, q = 3;\
- for (int i = 0; i < NUM_ELEMS(TYPE); i++)\
- {\
- r += a[i];\
- q -= b[i];\
- }\
- return r * q;\
-}\
-
-DEF_REDUC_PLUS (float)
-DEF_REDUC_PLUS (double)
-
-#define DEF_REDUC_MAXMIN(TYPE,FUN)\
-TYPE reduc_##FUN (TYPE *__restrict__ a, TYPE *__restrict__ b)\
-{\
- TYPE r = a[0], q = b[0];\
- for (int i = 0; i < NUM_ELEMS(TYPE); i++)\
- {\
- r = FUN (a[i], r);\
- q = FUN (b[i], q);\
- }\
- return r * q;\
-}\
-
-DEF_REDUC_MAXMIN (float, fmaxf)
-DEF_REDUC_MAXMIN (double, fmax)
-DEF_REDUC_MAXMIN (float, fminf)
-DEF_REDUC_MAXMIN (double, fmin)
-
-
-/* { dg-final { scan-assembler-times {\tfadda\ts[0-9]+, p[0-7], s[0-9]+, z[0-9]+\.s} 2 } } */
-/* { dg-final { scan-assembler-times {\tfadda\td[0-9]+, p[0-7], d[0-9]+, z[0-9]+\.d} 2 } } */
-
-/* { dg-final { scan-assembler-times {\tfmaxnmv\ts[0-9]+, p[0-7], z[0-9]+\.s} 2 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tfmaxnmv\td[0-9]+, p[0-7], z[0-9]+\.d} 2 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tfminnmv\ts[0-9]+, p[0-7], z[0-9]+\.s} 2 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tfminnmv\td[0-9]+, p[0-7], z[0-9]+\.d} 2 { xfail *-*-* } } } */
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.c b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.c
new file mode 100644
index 00000000000..eb3e7e656d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+
+#define NUM_ELEMS(TYPE) ((int)(5 * (256 / sizeof (TYPE)) + 3))
+
+#define DEF_REDUC_PLUS(TYPE) \
+ TYPE __attribute__ ((noinline, noclone)) \
+ reduc_plus_##TYPE (TYPE *a, TYPE *b) \
+ { \
+ TYPE r = 0, q = 3; \
+ for (int i = 0; i < NUM_ELEMS(TYPE); i++) \
+ { \
+ r += a[i]; \
+ q -= b[i]; \
+ } \
+ return r * q; \
+ }
+
+#define TEST_ALL(T) \
+ T (_Float16) \
+ T (float) \
+ T (double)
+
+TEST_ALL (DEF_REDUC_PLUS)
+
+/* { dg-final { scan-assembler-times {\tfadda\th[0-9]+, p[0-7], h[0-9]+, z[0-9]+\.h} 2 } } */
+/* { dg-final { scan-assembler-times {\tfadda\ts[0-9]+, p[0-7], s[0-9]+, z[0-9]+\.s} 2 } } */
+/* { dg-final { scan-assembler-times {\tfadda\td[0-9]+, p[0-7], d[0-9]+, z[0-9]+\.d} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.C b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.C
deleted file mode 100644
index 769d25165ea..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.C
+++ /dev/null
@@ -1,47 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-std=c++11 -O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include "sve_reduc_strict_1.C"
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#define DEF_INIT_VECTOR(TYPE)\
- TYPE a_##TYPE[NUM_ELEMS (TYPE)];\
- TYPE b_##TYPE[NUM_ELEMS (TYPE)];\
- for (int i = 0; i < NUM_ELEMS (TYPE); i++ )\
- {\
- a_##TYPE[i] = (i * 2) * (i & 1 ? 1 : -1);\
- b_##TYPE[i] = (i * 3) * (i & 1 ? 1 : -1);\
- }
-
-#define TEST_REDUC_PLUS(RES,TYPE) (RES) += reduc_plus_##TYPE (a_##TYPE, b_##TYPE);
-#define TEST_REDUC_MAX(RES,TYPE) (RES) += reduc_fmax (a_##TYPE, b_##TYPE);
-#define TEST_REDUC_MAXF(RES,TYPE) (RES) += reduc_fmaxf (a_##TYPE, b_##TYPE);
-#define TEST_REDUC_MIN(RES,TYPE) (RES) += reduc_fmin (a_##TYPE, b_##TYPE);
-#define TEST_REDUC_MINF(RES,TYPE) (RES) += reduc_fminf (a_##TYPE, b_##TYPE);
-
-int main ()
-{
- double result = 0.0;
- DEF_INIT_VECTOR (float)
- DEF_INIT_VECTOR (double)
-
- TEST_REDUC_PLUS (result, float)
- TEST_REDUC_PLUS (result, double)
-
- TEST_REDUC_MINF (result, float)
- TEST_REDUC_MIN (result, double)
-
- TEST_REDUC_MAXF (result, float)
- TEST_REDUC_MAX (result, double)
-
- if (result != double (1356996))
- {
- fprintf (stderr, "result = %1.16lf\n", result);
- abort ();
- }
-
- return 0;
-}
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.c
new file mode 100644
index 00000000000..4c810d4a337
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_1_run.c
@@ -0,0 +1,29 @@
+/* { dg-do run { target { aarch64_sve_hw } } } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+
+#include "sve_reduc_strict_1.c"
+
+#define TEST_REDUC_PLUS(TYPE) \
+ { \
+ TYPE a[NUM_ELEMS (TYPE)]; \
+ TYPE b[NUM_ELEMS (TYPE)]; \
+ TYPE r = 0, q = 3; \
+ for (int i = 0; i < NUM_ELEMS (TYPE); i++) \
+ { \
+ a[i] = (i * 0.1) * (i & 1 ? 1 : -1); \
+ b[i] = (i * 0.3) * (i & 1 ? 1 : -1); \
+ r += a[i]; \
+ q -= b[i]; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ TYPE res = reduc_plus_##TYPE (a, b); \
+ if (res != r * q) \
+ __builtin_abort (); \
+ }
+
+int __attribute__ ((optimize (1)))
+main ()
+{
+ TEST_ALL (TEST_REDUC_PLUS);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.C b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.C
deleted file mode 100644
index 542918abeb8..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.C
+++ /dev/null
@@ -1,48 +0,0 @@
-/* { dg-do compile } */
-/* FIXME: With -O3 we don't generate reductions as the compiler unrolls the outer loop
- and processes the rows in parallel, performing in order reductions on the inner loop. */
-/* { dg-options "-std=c++11 -O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include <math.h>
-
-#define NUM_ELEMS(TYPE) (int)(5 * (256 / sizeof (TYPE)) + 3)
-
-/* TODO: Test with inner loop = n * NUM_ELEMS(TYPE). */
-#define DEF_REDUC_PLUS(TYPE)\
-void reduc_plus_##TYPE (TYPE (*__restrict__ a)[NUM_ELEMS(TYPE)], TYPE *__restrict__ r, int n)\
-{\
- for (int i = 0; i < n; i++)\
- {\
- r[i] = 0;\
- for (int j = 0; j < NUM_ELEMS(TYPE); j++)\
- r[i] += a[i][j];\
- }\
-}\
-
-DEF_REDUC_PLUS (float)
-DEF_REDUC_PLUS (double)
-
-#define DEF_REDUC_MAXMIN(TYPE,FUN)\
-void reduc_##FUN (TYPE (*__restrict__ a)[NUM_ELEMS(TYPE)], TYPE *__restrict__ r, int n)\
-{\
- for (int i = 0; i < n; i++)\
- {\
- r[i] = a[i][0];\
- for (int j = 0; j < NUM_ELEMS(TYPE); j++)\
- r[i] = FUN (a[i][j], r[i]);\
- }\
-}\
-
-DEF_REDUC_MAXMIN (float, fmaxf)
-DEF_REDUC_MAXMIN (double, fmax)
-DEF_REDUC_MAXMIN (float, fminf)
-DEF_REDUC_MAXMIN (double, fmin)
-
-/* { dg-final { scan-assembler-times {\tfadda\ts[0-9]+, p[0-7], s[0-9]+, z[0-9]+\.s} 1 } } */
-/* { dg-final { scan-assembler-times {\tfadda\td[0-9]+, p[0-7], d[0-9]+, z[0-9]+\.d} 1 } } */
-
-/* { dg-final { scan-assembler-times {\tfmaxnmv\ts[0-9]+, p[0-7], z[0-9]+\.s} 1 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tfmaxnmv\td[0-9]+, p[0-7], z[0-9]+\.d} 1 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tfminnmv\ts[0-9]+, p[0-7], z[0-9]+\.s} 1 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tfminnmv\td[0-9]+, p[0-7], z[0-9]+\.d} 1 { xfail *-*-* } } } */
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.c b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.c
new file mode 100644
index 00000000000..672be8f793e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+
+#define NUM_ELEMS(TYPE) ((int) (5 * (256 / sizeof (TYPE)) + 3))
+
+#define DEF_REDUC_PLUS(TYPE) \
+void __attribute__ ((noinline, noclone)) \
+reduc_plus_##TYPE (TYPE (*restrict a)[NUM_ELEMS(TYPE)], \
+ TYPE *restrict r, int n) \
+{ \
+ for (int i = 0; i < n; i++) \
+ { \
+ r[i] = 0; \
+ for (int j = 0; j < NUM_ELEMS(TYPE); j++) \
+ r[i] += a[i][j]; \
+ } \
+}
+
+#define TEST_ALL(T) \
+ T (_Float16) \
+ T (float) \
+ T (double)
+
+TEST_ALL (DEF_REDUC_PLUS)
+
+/* { dg-final { scan-assembler-times {\tfadda\th[0-9]+, p[0-7], h[0-9]+, z[0-9]+\.h} 1 } } */
+/* { dg-final { scan-assembler-times {\tfadda\ts[0-9]+, p[0-7], s[0-9]+, z[0-9]+\.s} 1 } } */
+/* { dg-final { scan-assembler-times {\tfadda\td[0-9]+, p[0-7], d[0-9]+, z[0-9]+\.d} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.C b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.C
deleted file mode 100644
index 86a930c7d33..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.C
+++ /dev/null
@@ -1,59 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-std=c++11 -O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include "sve_reduc_strict_2.C"
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#define NROWS 5
-
-#define DEF_INIT_VECTOR(TYPE)\
- TYPE mat_##TYPE[NROWS][NUM_ELEMS (TYPE)];\
- TYPE r_##TYPE[NROWS];\
- for (int i = 0; i < NROWS; i++)\
- for (int j = 0; j < NUM_ELEMS (TYPE); j++ )\
- mat_##TYPE[i][j] = i + (j * 2) * (j & 1 ? 1 : -1);\
-
-#define TEST_REDUC_PLUS(TYPE) reduc_plus_##TYPE (mat_##TYPE, r_##TYPE, NROWS);
-#define TEST_REDUC_MAXF reduc_fmaxf (mat_float, r_float, NROWS);
-#define TEST_REDUC_MAX reduc_fmax (mat_double, r_double, NROWS);
-#define TEST_REDUC_MINF reduc_fminf (mat_float, r_float, NROWS);
-#define TEST_REDUC_MIN reduc_fmin (mat_double, r_double, NROWS);
-
-#define SUM_VECTOR(RES, TYPE)\
- for (int i = 0; i < NROWS; i++)\
- (RES) += r_##TYPE[i];
-
-#define SUM_FLOAT_RESULT(RES)\
- SUM_VECTOR (RES, float);\
- SUM_VECTOR (RES, double);\
-
-int main ()
-{
- double resultF = 0.0;
- DEF_INIT_VECTOR (float)
- DEF_INIT_VECTOR (double)
-
- TEST_REDUC_PLUS (float)
- TEST_REDUC_PLUS (double)
- SUM_FLOAT_RESULT (resultF);
-
- TEST_REDUC_MAXF
- TEST_REDUC_MAX
- SUM_FLOAT_RESULT (resultF);
-
- TEST_REDUC_MINF
- TEST_REDUC_MIN
- SUM_FLOAT_RESULT (resultF);
-
- if (resultF != double (2460))
- {
- fprintf (stderr, "resultF = %1.16lf\n", resultF);
- abort ();
- }
-
- return 0;
-}
-
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.c
new file mode 100644
index 00000000000..4741e6acb14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_2_run.c
@@ -0,0 +1,31 @@
+/* { dg-do run { target { aarch64_sve_hw } } } */
+/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+
+#include "sve_reduc_strict_2.c"
+
+#define NROWS 5
+
+#define TEST_REDUC_PLUS(TYPE) \
+ { \
+ TYPE a[NROWS][NUM_ELEMS (TYPE)]; \
+ TYPE r[NROWS]; \
+ TYPE expected[NROWS] = {}; \
+ for (int i = 0; i < NROWS; ++i) \
+ for (int j = 0; j < NUM_ELEMS (TYPE); ++j) \
+ { \
+ a[i][j] = (i * 0.1 + j * 0.6) * (j & 1 ? 1 : -1); \
+ expected[i] += a[i][j]; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ reduc_plus_##TYPE (a, r, NROWS); \
+ for (int i = 0; i < NROWS; ++i) \
+ if (r[i] != expected[i]) \
+ __builtin_abort (); \
+ }
+
+int __attribute__ ((optimize (1)))
+main ()
+{
+ TEST_ALL (TEST_REDUC_PLUS);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_3.C b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_3.c
index 338aa614b47..ebed8e697c1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_3.C
+++ b/gcc/testsuite/gcc.target/aarch64/sve_reduc_strict_3.c
@@ -1,12 +1,13 @@
/* { dg-do compile } */
-/* { dg-options "-std=c++11 -O2 -ftree-vectorize -fno-inline -march=armv8-a+sve -msve-vector-bits=256 -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve -msve-vector-bits=256 -fdump-tree-vect-details" } */
double mat[100][4];
double mat2[100][8];
double mat3[100][12];
double mat4[100][3];
-double slp_reduc_plus (int n)
+double
+slp_reduc_plus (int n)
{
double tmp = 0.0;
for (int i = 0; i < n; i++)
@@ -19,7 +20,8 @@ double slp_reduc_plus (int n)
return tmp;
}
-double slp_reduc_plus2 (int n)
+double
+slp_reduc_plus2 (int n)
{
double tmp = 0.0;
for (int i = 0; i < n; i++)
@@ -36,7 +38,8 @@ double slp_reduc_plus2 (int n)
return tmp;
}
-double slp_reduc_plus3 (int n)
+double
+slp_reduc_plus3 (int n)
{
double tmp = 0.0;
for (int i = 0; i < n; i++)
@@ -57,7 +60,8 @@ double slp_reduc_plus3 (int n)
return tmp;
}
-void slp_non_chained_reduc (int n, double * __restrict__ out)
+void
+slp_non_chained_reduc (int n, double * restrict out)
{
for (int i = 0; i < 3; i++)
out[i] = 0;
@@ -73,7 +77,8 @@ void slp_non_chained_reduc (int n, double * __restrict__ out)
/* Strict FP reductions shouldn't be used for the outer loops, only the
inner loops. */
-float double_reduc1 (float (*__restrict__ i)[16])
+float
+double_reduc1 (float (*restrict i)[16])
{
float l = 0;
@@ -83,7 +88,8 @@ float double_reduc1 (float (*__restrict__ i)[16])
return l;
}
-float double_reduc2 (float *__restrict__ i)
+float
+double_reduc2 (float *restrict i)
{
float l = 0;
@@ -98,7 +104,8 @@ float double_reduc2 (float *__restrict__ i)
return l;
}
-float double_reduc3 (float *__restrict__ i, float *__restrict__ j)
+float
+double_reduc3 (float *restrict i, float *restrict j)
{
float k = 0, l = 0;
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_rev_1.c b/gcc/testsuite/gcc.target/aarch64/sve_rev_1.c
new file mode 100644
index 00000000000..7c4290a2dc3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_rev_1.c
@@ -0,0 +1,49 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
+
+#include <stdint.h>
+
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+
+#define MASK_2(X, Y) (Y) - 1 - (X), (Y) - 2 - (X)
+#define MASK_4(X, Y) MASK_2 (X, Y), MASK_2 (X + 2, Y)
+#define MASK_8(X, Y) MASK_4 (X, Y), MASK_4 (X + 4, Y)
+#define MASK_16(X, Y) MASK_8 (X, Y), MASK_8 (X + 8, Y)
+#define MASK_32(X, Y) MASK_16 (X, Y), MASK_16 (X + 16, Y)
+
+#define INDEX_32 vnx16qi
+#define INDEX_16 vnx8hi
+#define INDEX_8 vnx4si
+#define INDEX_4 vnx2di
+
+#define PERMUTE(TYPE, NUNITS) \
+ TYPE permute_##TYPE (TYPE values1, TYPE values2) \
+ { \
+ return __builtin_shuffle \
+ (values1, values2, \
+ ((INDEX_##NUNITS) { MASK_##NUNITS (0, NUNITS) })); \
+ }
+
+#define TEST_ALL(T) \
+ T (vnx16qi, 32) \
+ T (vnx8hi, 16) \
+ T (vnx4si, 8) \
+ T (vnx2di, 4) \
+ T (vnx8hf, 16) \
+ T (vnx4sf, 8) \
+ T (vnx2df, 4)
+
+TEST_ALL (PERMUTE)
+
+/* { dg-final { scan-assembler-not {\ttbl\t} } } */
+
+/* { dg-final { scan-assembler-times {\trev\tz[0-9]+\.b, z[0-9]+\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\trev\tz[0-9]+\.h, z[0-9]+\.h\n} 2 } } */
+/* { dg-final { scan-assembler-times {\trev\tz[0-9]+\.s, z[0-9]+\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\trev\tz[0-9]+\.d, z[0-9]+\.d\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_revb_1.c b/gcc/testsuite/gcc.target/aarch64/sve_revb_1.c
index 9307200fb05..709fd3b37b4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_revb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_revb_1.c
@@ -3,7 +3,7 @@
#include <stdint.h>
-typedef int8_t v32qi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
#define MASK_2(X, Y) (X) ^ (Y), (X + 1) ^ (Y)
#define MASK_4(X, Y) MASK_2 (X, Y), MASK_2 (X + 2, Y)
@@ -11,7 +11,7 @@ typedef int8_t v32qi __attribute__((vector_size (32)));
#define MASK_16(X, Y) MASK_8 (X, Y), MASK_8 (X + 8, Y)
#define MASK_32(X, Y) MASK_16 (X, Y), MASK_16 (X + 16, Y)
-#define INDEX_32 v32qi
+#define INDEX_32 vnx16qi
#define PERMUTE(TYPE, NUNITS, REV_NUNITS) \
TYPE permute_##TYPE##_##REV_NUNITS (TYPE values1, TYPE values2) \
@@ -22,9 +22,9 @@ typedef int8_t v32qi __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v32qi, 32, 2) \
- T (v32qi, 32, 4) \
- T (v32qi, 32, 8)
+ T (vnx16qi, 32, 2) \
+ T (vnx16qi, 32, 4) \
+ T (vnx16qi, 32, 8)
TEST_ALL (PERMUTE)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_revh_1.c b/gcc/testsuite/gcc.target/aarch64/sve_revh_1.c
index fb238373c4e..fe3533cf6db 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_revh_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_revh_1.c
@@ -3,8 +3,8 @@
#include <stdint.h>
-typedef uint16_t v16hi __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef uint16_t vnx8hi __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define MASK_2(X, Y) (X) ^ (Y), (X + 1) ^ (Y)
#define MASK_4(X, Y) MASK_2 (X, Y), MASK_2 (X + 2, Y)
@@ -12,7 +12,7 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
#define MASK_16(X, Y) MASK_8 (X, Y), MASK_8 (X + 8, Y)
#define MASK_32(X, Y) MASK_16 (X, Y), MASK_16 (X + 16, Y)
-#define INDEX_16 v16hi
+#define INDEX_16 vnx8hi
#define PERMUTE(TYPE, NUNITS, REV_NUNITS) \
TYPE permute_##TYPE##_##REV_NUNITS (TYPE values1, TYPE values2) \
@@ -23,10 +23,10 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v16hi, 16, 2) \
- T (v16hi, 16, 4) \
- T (v16hf, 16, 2) \
- T (v16hf, 16, 4)
+ T (vnx8hi, 16, 2) \
+ T (vnx8hi, 16, 4) \
+ T (vnx8hf, 16, 2) \
+ T (vnx8hf, 16, 4)
TEST_ALL (PERMUTE)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_revw_1.c b/gcc/testsuite/gcc.target/aarch64/sve_revw_1.c
index 4834e2c2b01..a6b95f52880 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_revw_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_revw_1.c
@@ -3,14 +3,14 @@
#include <stdint.h>
-typedef uint32_t v8si __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
+typedef uint32_t vnx4si __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
#define MASK_2(X, Y) (X) ^ (Y), (X + 1) ^ (Y)
#define MASK_4(X, Y) MASK_2 (X, Y), MASK_2 (X + 2, Y)
#define MASK_8(X, Y) MASK_4 (X, Y), MASK_4 (X + 4, Y)
-#define INDEX_8 v8si
+#define INDEX_8 vnx4si
#define PERMUTE(TYPE, NUNITS, REV_NUNITS) \
TYPE permute_##TYPE##_##REV_NUNITS (TYPE values1, TYPE values2) \
@@ -21,8 +21,8 @@ typedef float v8sf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v8si, 8, 2) \
- T (v8sf, 8, 2)
+ T (vnx4si, 8, 2) \
+ T (vnx4sf, 8, 2)
TEST_ALL (PERMUTE)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1.c
index 2270be2bd29..43a7e831cae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1.c
@@ -1,109 +1,31 @@
/* { dg-do assemble } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define SCATTER_STORE1(OBJTYPE,STRIDETYPE,STRIDE)\
-void scatter_store1##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- dst[i * STRIDE] = src[i];\
-}
-
-#define SCATTER_STORE2(OBJTYPE,STRIDETYPE)\
-void scatter_store2##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- dst[i * stride] = src[i];\
-}
-
-#define SCATTER_STORE3(OBJTYPE,STRIDETYPE)\
-void scatter_store3s5##OBJTYPE##STRIDETYPE\
- (OBJTYPE * restrict dst, OBJTYPE * restrict s1, OBJTYPE * restrict s2,\
- OBJTYPE * restrict s3, OBJTYPE * restrict s4, OBJTYPE * restrict s5,\
- STRIDETYPE count)\
-{\
- const STRIDETYPE STRIDE = 5;\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- dst[0 + (i * STRIDE)] = s1[i];\
- dst[4 + (i * STRIDE)] = s5[i];\
- dst[1 + (i * STRIDE)] = s2[i];\
- dst[2 + (i * STRIDE)] = s3[i];\
- dst[3 + (i * STRIDE)] = s4[i];\
- }\
-}
-
-#define SCATTER_STORE4(OBJTYPE,STRIDETYPE,STRIDE)\
-void scatter_store4##OBJTYPE##STRIDETYPE##STRIDE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- *dst = *src;\
- dst += STRIDE;\
- src += 1;\
- }\
-}
-
-#define SCATTER_STORE5(OBJTYPE,STRIDETYPE)\
-void scatter_store5##OBJTYPE##STRIDETYPE (OBJTYPE * restrict dst,\
- OBJTYPE * restrict src,\
- STRIDETYPE stride,\
- STRIDETYPE count)\
-{\
- for (STRIDETYPE i=0; i<count; i++)\
- {\
- *dst = *src;\
- dst += stride;\
- src += 1;\
- }\
-}
-
-SCATTER_STORE1 (double, long, 5)
-SCATTER_STORE1 (double, long, 8)
-SCATTER_STORE1 (double, long, 21)
-SCATTER_STORE1 (double, long, 1009)
-
-SCATTER_STORE1 (float, int, 5)
-SCATTER_STORE1 (float, int, 8)
-SCATTER_STORE1 (float, int, 21)
-SCATTER_STORE1 (float, int, 1009)
-
-SCATTER_STORE2 (double, long)
-SCATTER_STORE2 (float, int)
-
-SCATTER_STORE3 (double, long)
-SCATTER_STORE3 (float, int)
-
-SCATTER_STORE4 (double, long, 5)
-/* NOTE: We can't vectorize SCATTER_STORE4 (float, int, 5) because we can't
- prove that the offsets used for the gather load won't overflow. */
-
-SCATTER_STORE5 (double, long)
-SCATTER_STORE5 (float, int)
-
-/* Widened forms. */
-SCATTER_STORE1 (double, int, 5)
-SCATTER_STORE1 (double, int, 8)
-SCATTER_STORE1 (double, short, 5)
-SCATTER_STORE1 (double, short, 8)
-
-SCATTER_STORE1 (float, short, 5)
-SCATTER_STORE1 (float, short, 8)
-
-SCATTER_STORE2 (double, int)
-SCATTER_STORE2 (float, short)
-
-SCATTER_STORE4 (double, int, 5)
-SCATTER_STORE4 (float, short, 5)
-
-SCATTER_STORE5 (double, int)
-
-/* { dg-final { scan-assembler-times "st1d\\tz\[0-9\]+.d, p\[0-9\]+, \\\[x\[0-9\]+, z\[0-9\]+.d\\\]" 19 } } */
-/* { dg-final { scan-assembler-times "st1w\\tz\[0-9\]+.s, p\[0-9\]+, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw 2\\\]" 12 } } */
-/* { dg-final { scan-assembler-times "st1w\\tz\[0-9\]+.s, p\[0-9\]+, \\\[x\[0-9\]+, z\[0-9\]+.s, sxtw\\\]" 3 } } */
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ INDEX##BITS *indices, int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ dest[indices[i]] = src[i] + 1; \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, 32) \
+ T (uint32_t, 32) \
+ T (float, 32) \
+ T (int64_t, 64) \
+ T (uint64_t, 64) \
+ T (double, 64)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1_run.c
deleted file mode 100644
index 4d8cddc510f..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_1_run.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* { dg-do run { target { aarch64_sve_hw } } } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
-
-#include <unistd.h>
-
-extern void abort (void);
-extern void *memset(void *, int, size_t);
-
-#include "sve_scatter_store_1.c"
-
-#define NUM_SRC_ELEMS 13
-#define NUM_DST_ELEMS(STRIDE) (NUM_SRC_ELEMS * STRIDE)
-
-#define TEST_SCATTER_STORE_COMMON1(FUN,OBJTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst, 0, (1 + NUM_DST_ELEMS (STRIDE)) * sizeof (OBJTYPE));\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- src[i] = i;\
- FUN##OBJTYPE##STRIDETYPE##STRIDE (dst, src, NUM_SRC_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- if (dst[i * STRIDE] != i)\
- abort ();\
-}
-
-#define TEST_SCATTER_STORE_COMMON2(FUN,OBJTYPE,STRIDETYPE,STRIDE)\
-{\
- OBJTYPE real_src[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS (STRIDE)]\
- __attribute__((aligned (32)));\
- memset (real_src, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst, 0, (1 + NUM_DST_ELEMS (STRIDE)) * sizeof (OBJTYPE));\
- OBJTYPE *src = &real_src[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- src[i] = i;\
- FUN##OBJTYPE##STRIDETYPE (dst, src, STRIDE, NUM_SRC_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- if (dst[i * STRIDE] != i)\
- abort ();\
-}
-
-#define TEST_SCATTER_STORE1(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_SCATTER_STORE_COMMON1 (scatter_store1, OBJTYPE, STRIDETYPE, STRIDE)
-
-#define TEST_SCATTER_STORE2(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_SCATTER_STORE_COMMON2 (scatter_store2, OBJTYPE, STRIDETYPE, STRIDE)
-
-#define TEST_SCATTER_STORE3(OBJTYPE,STRIDETYPE)\
-{\
- OBJTYPE real_src1[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src2[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src3[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src4[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_src5[1 + NUM_SRC_ELEMS]\
- __attribute__((aligned (32)));\
- OBJTYPE real_dst[1 + NUM_DST_ELEMS (5)]\
- __attribute__((aligned (32)));\
- memset (real_src1, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_src2, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_src3, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_src4, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_src5, 0, (1 + NUM_SRC_ELEMS) * sizeof (OBJTYPE));\
- memset (real_dst, 0, (1 + NUM_DST_ELEMS (5)) * sizeof (OBJTYPE));\
- OBJTYPE *src1 = &real_src1[1];\
- OBJTYPE *src2 = &real_src2[1];\
- OBJTYPE *src3 = &real_src3[1];\
- OBJTYPE *src4 = &real_src4[1];\
- OBJTYPE *src5 = &real_src5[1];\
- OBJTYPE *dst = &real_dst[1];\
- for (STRIDETYPE i = 0; i < NUM_SRC_ELEMS; i++)\
- {\
- STRIDETYPE base = i * 5;\
- src1[i] = base;\
- src2[i] = base + 1;\
- src3[i] = base + 2;\
- src4[i] = base + 3;\
- src5[i] = base + 4;\
- }\
- scatter_store3s5##OBJTYPE##STRIDETYPE \
- (dst, src1, src2, src3, src4, src5, NUM_SRC_ELEMS); \
- for (STRIDETYPE i = 0; i < NUM_DST_ELEMS (5); i++)\
- if (dst[i] != i)\
- abort ();\
-}
-
-#define TEST_SCATTER_STORE4(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_SCATTER_STORE_COMMON1 (scatter_store4, OBJTYPE, STRIDETYPE, STRIDE)
-
-#define TEST_SCATTER_STORE5(OBJTYPE,STRIDETYPE,STRIDE) \
- TEST_SCATTER_STORE_COMMON2 (scatter_store5, OBJTYPE, STRIDETYPE, STRIDE)
-
-int __attribute__ ((optimize (1)))
-main ()
-{
- TEST_SCATTER_STORE1 (double, long, 5);
- TEST_SCATTER_STORE1 (double, long, 8);
- TEST_SCATTER_STORE1 (double, long, 21);
-
- TEST_SCATTER_STORE1 (float, int, 5);
- TEST_SCATTER_STORE1 (float, int, 8);
- TEST_SCATTER_STORE1 (float, int, 21);
-
- TEST_SCATTER_STORE2 (double, long, 5);
- TEST_SCATTER_STORE2 (double, long, 8);
- TEST_SCATTER_STORE2 (double, long, 21);
-
- TEST_SCATTER_STORE2 (float, int, 5);
- TEST_SCATTER_STORE2 (float, int, 8);
- TEST_SCATTER_STORE2 (float, int, 21);
-
- TEST_SCATTER_STORE3 (double, long);
- TEST_SCATTER_STORE3 (float, int);
-
- TEST_SCATTER_STORE4 (double, long, 5);
-
- TEST_SCATTER_STORE5 (double, long, 5);
- TEST_SCATTER_STORE5 (float, int, 5);
-
- /* Widened forms. */
- TEST_SCATTER_STORE1 (double, int, 5)
- TEST_SCATTER_STORE1 (double, int, 8)
- TEST_SCATTER_STORE1 (double, short, 5)
- TEST_SCATTER_STORE1 (double, short, 8)
-
- TEST_SCATTER_STORE1 (float, short, 5)
- TEST_SCATTER_STORE1 (float, short, 8)
-
- TEST_SCATTER_STORE2 (double, int, 5);
- TEST_SCATTER_STORE2 (double, int, 8);
- TEST_SCATTER_STORE2 (double, int, 21);
-
- TEST_SCATTER_STORE2 (float, short, 5);
- TEST_SCATTER_STORE2 (float, short, 8);
- TEST_SCATTER_STORE2 (float, short, 21);
-
- TEST_SCATTER_STORE4 (double, int, 5);
- TEST_SCATTER_STORE4 (float, short, 5);
-
- TEST_SCATTER_STORE5 (double, int, 5);
-
- return 0;
-}
-
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_2.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_2.c
new file mode 100644
index 00000000000..dcc96f07fc5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_2.c
@@ -0,0 +1,10 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
+
+#include "sve_scatter_store_1.c"
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, uxtw 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_3.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_3.c
new file mode 100644
index 00000000000..d09c4015aa0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_3.c
@@ -0,0 +1,32 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ INDEX##BITS *indices, int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ *(DATA_TYPE *) ((char *) dest + indices[i]) = src[i] + 1; \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, 32) \
+ T (uint32_t, 32) \
+ T (float, 32) \
+ T (int64_t, 64) \
+ T (uint64_t, 64) \
+ T (double, 64)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_4.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_4.c
new file mode 100644
index 00000000000..c4f2dae481b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_4.c
@@ -0,0 +1,10 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
+
+#include "sve_scatter_store_3.c"
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, uxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_5.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_5.c
new file mode 100644
index 00000000000..7b117bc0b2b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_5.c
@@ -0,0 +1,23 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict *dest, DATA_TYPE *restrict src, \
+ int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ *dest[i] = src[i] + 1; \
+ }
+
+#define TEST_ALL(T) \
+ T (int64_t) \
+ T (uint64_t) \
+ T (double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[z[0-9]+.d\]\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_6.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_6.c
new file mode 100644
index 00000000000..14e68267c9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_6.c
@@ -0,0 +1,36 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -fwrapv -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX32
+#define INDEX16 int16_t
+#define INDEX32 int32_t
+#endif
+
+/* Invoked 18 times for each data size. */
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE (DATA_TYPE *restrict dest, DATA_TYPE *restrict src, \
+ INDEX##BITS *indices, INDEX##BITS mask, int n) \
+ { \
+ for (int i = 9; i < n; ++i) \
+ dest[(INDEX##BITS) (indices[i] | mask)] = src[i] + 1; \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, 16) \
+ T (uint32_t, 16) \
+ T (float, 16) \
+ T (int64_t, 32) \
+ T (uint64_t, 32) \
+ T (double, 32)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tsunpkhi\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tsunpklo\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tsunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tsunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_7.c b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_7.c
new file mode 100644
index 00000000000..89e2d305c29
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_scatter_store_7.c
@@ -0,0 +1,15 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#define INDEX16 uint16_t
+#define INDEX32 uint32_t
+
+#include "sve_scatter_store_6.c"
+
+/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.s, z[0-9]+\.h\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 3 } } */
+/* Either extension type is OK here. */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, [us]xtw 2\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_1.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_1.c
index 460359e4be3..23327a7a152 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_1.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE b, TYPE c, int n) \
{ \
for (int i = 0; i < n; ++i) \
@@ -23,15 +23,18 @@ vec_slp_##TYPE (TYPE *restrict a, TYPE b, TYPE c, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
TEST_ALL (VEC_PERM)
-/* We should use one DUP for each of the 8-, 16- and 32-bit types.
- We should use two DUPs for each of the three 64-bit types. */
+/* We should use one DUP for each of the 8-, 16- and 32-bit types,
+ although we currently use LD1RW for _Float16. We should use two
+ DUPs for each of the three 64-bit types. */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.h, [hw]} 2 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.s, [sw]} 2 } } */
+/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, } 1 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, [dx]} 9 } } */
/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
/* { dg-final { scan-assembler-not {\tzip2\t} } } */
@@ -39,17 +42,18 @@ TEST_ALL (VEC_PERM)
/* The loop should be fully-masked. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tst1h\t} 2 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1h\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 3 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 3 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 4 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 6 } } */
/* { dg-final { scan-assembler-not {\tldr} } } */
-/* { dg-final { scan-assembler-not {\tstr} } } */
+/* { dg-final { scan-assembler-times {\tstr} 2 } } */
+/* { dg-final { scan-assembler-times {\tstr\th[0-9]+} 2 } } */
/* { dg-final { scan-assembler-not {\tuqdec} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_10.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_10.c
index 7dd3640966a..0c10d934259 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_10.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_10.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
{ \
for (int i = 0; i < n; ++i) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_10_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_10_run.c
index c1aeaf9b06e..08cad65ab63 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_10_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_10_run.c
@@ -47,7 +47,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_11.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_11.c
index 3db5769deed..ce6060a52df 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_11.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_11.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE1, TYPE2) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE1##_##TYPE2 (TYPE1 *restrict a, \
TYPE2 *restrict b, int n) \
{ \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_11_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_11_run.c
index c302ef6fb76..aa49952b470 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_11_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_11_run.c
@@ -38,7 +38,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_12.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_12.c
index 9afe7e59ef2..77bf7b72454 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_12.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_12.c
@@ -6,7 +6,7 @@
#define N1 (19 * 2)
#define VEC_PERM(TYPE) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b) \
{ \
for (int i = 0; i < N1; ++i) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_12_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_12_run.c
index 8c854d4207c..e926de602bd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_12_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_12_run.c
@@ -46,7 +46,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_13.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_13.c
index f3ecbd7adbc..ff3046e127d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_13.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_13.c
@@ -5,7 +5,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, int n) \
{ \
TYPE res = 0; \
@@ -26,6 +26,7 @@ vec_slp_##TYPE (TYPE *restrict a, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
@@ -35,7 +36,7 @@ TEST_ALL (VEC_PERM)
/* ??? We don't treat the uint loops as SLP. */
/* The loop should be fully-masked. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 3 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 2 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 3 { xfail *-*-* } } } */
@@ -43,7 +44,7 @@ TEST_ALL (VEC_PERM)
/* { dg-final { scan-assembler-not {\tldr} { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 4 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 6 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 6 } } */
@@ -51,6 +52,7 @@ TEST_ALL (VEC_PERM)
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.h\n} 2 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.s\n} 2 } } */
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.d\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tfadda\th[0-9]+, p[0-7], h[0-9]+, z[0-9]+\.h\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfadda\ts[0-9]+, p[0-7], s[0-9]+, z[0-9]+\.s\n} 1 } } */
/* { dg-final { scan-assembler-times {\tfadda\td[0-9]+, p[0-7], d[0-9]+, z[0-9]+\.d\n} 1 } } */
/* { dg-final { scan-assembler-not {\tfadd\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_13_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_13_run.c
index 282f1ae2310..2824073cf14 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_13_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_13_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include "sve_slp_13.c"
@@ -21,7 +21,7 @@
__builtin_abort (); \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_1_run.c
index 6c1b38277ec..3971acde999 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_1_run.c
@@ -9,7 +9,10 @@
{ \
TYPE a[N], b[2] = { 3, 11 }; \
for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
vec_slp_##TYPE (a, b[0], b[1], N / 2); \
for (unsigned int i = 0; i < N; ++i) \
{ \
@@ -20,7 +23,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_2.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_2.c
index 3e71596021f..ba3506ab4e4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_2.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, int n) \
{ \
for (int i = 0; i < n; ++i) \
@@ -23,13 +23,14 @@ vec_slp_##TYPE (TYPE *restrict a, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
TEST_ALL (VEC_PERM)
/* { dg-final { scan-assembler-times {\tld1rh\tz[0-9]+\.h, } 2 } } */
-/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, } 2 } } */
+/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, } 3 } } */
/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 5 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #10\n} 2 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #17\n} 2 } } */
@@ -39,14 +40,14 @@ TEST_ALL (VEC_PERM)
/* The loop should be fully-masked. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tst1h\t} 2 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1h\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 3 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 3 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 4 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 6 } } */
/* { dg-final { scan-assembler-not {\tldr} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_2_run.c
index 7d4d5e8ca3d..c0411459b94 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_2_run.c
@@ -9,7 +9,10 @@
{ \
TYPE a[N], b[2] = { 10, 17 }; \
for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
vec_slp_##TYPE (a, N / 2); \
for (unsigned int i = 0; i < N; ++i) \
{ \
@@ -20,7 +23,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_3.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_3.c
index 3ac0eebf422..326630f421f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_3.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, int n) \
{ \
for (int i = 0; i < n; ++i) \
@@ -25,6 +25,7 @@ vec_slp_##TYPE (TYPE *restrict a, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
@@ -33,7 +34,7 @@ TEST_ALL (VEC_PERM)
/* 1 for each 8-bit type. */
/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, } 2 } } */
/* 1 for each 16-bit type, 2 for each 32-bit type, and 4 for double. */
-/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 12 } } */
+/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 13 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #41\n} 2 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #25\n} 2 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #31\n} 2 } } */
@@ -49,14 +50,14 @@ TEST_ALL (VEC_PERM)
and stores each. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tst1h\t} 2 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1h\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 6 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 4 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 12 } } */
/* { dg-final { scan-assembler-not {\tldr} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_3_run.c
index 7306355b873..de33f41c2c1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_3_run.c
@@ -9,7 +9,10 @@
{ \
TYPE a[N], b[4] = { 41, 25, 31, 62 }; \
for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
vec_slp_##TYPE (a, N / 4); \
for (unsigned int i = 0; i < N; ++i) \
{ \
@@ -20,7 +23,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_4.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_4.c
index b0890fd934b..32c14ebe4bf 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_4.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, int n) \
{ \
for (int i = 0; i < n; ++i) \
@@ -29,6 +29,7 @@ vec_slp_##TYPE (TYPE *restrict a, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
@@ -36,7 +37,7 @@ TEST_ALL (VEC_PERM)
/* 1 for each 8-bit type, 2 for each 16-bit type, 4 for each 32-bit type
and 8 for double. */
-/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 26 } } */
+/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 28 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #99\n} 2 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #11\n} 2 } } */
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #17\n} 2 } } */
@@ -55,21 +56,21 @@ TEST_ALL (VEC_PERM)
ZIP1 ZIP1 ZIP1 ZIP1 (4 ZIP2s optimized away)
ZIP1 ZIP2 ZIP1 ZIP2
ZIP1 ZIP2 ZIP1 ZIP2. */
-/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 35 } } */
+/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 36 } } */
/* { dg-final { scan-assembler-times {\tzip2\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 15 } } */
/* The loop should be fully-masked. The 32-bit types need two loads
and stores each and the 64-bit types need four. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 } } */
-/* { dg-final { scan-assembler-times {\tst1h\t} 2 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1h\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 6 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 6 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 12 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 12 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 4 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 12 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 24 } } */
/* { dg-final { scan-assembler-not {\tldr} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_4_run.c
index 2eb2a5ff07e..e0fe656859d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_4_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_4_run.c
@@ -9,7 +9,10 @@
{ \
TYPE a[N], b[8] = { 99, 11, 17, 80, 63, 37, 24, 81 }; \
for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
vec_slp_##TYPE (a, N / 8); \
for (unsigned int i = 0; i < N; ++i) \
{ \
@@ -20,7 +23,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_5.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_5.c
index 0f8cf624e20..e0bacb0cad8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_5.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
{ \
TYPE x0 = b[0]; \
@@ -27,6 +27,7 @@ vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
@@ -37,9 +38,9 @@ TEST_ALL (VEC_PERM)
/* ??? At present we don't treat the int8_t and int16_t loops as
reductions. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 2 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 3 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-not {\tld2b\t} } } */
@@ -52,12 +53,14 @@ TEST_ALL (VEC_PERM)
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.h} 2 } } */
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.s} 4 } } */
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.d} 4 } } */
+/* { dg-final { scan-assembler-times {\tfaddv\th[0-9]+, p[0-7], z[0-9]+\.h} 2 } } */
/* { dg-final { scan-assembler-times {\tfaddv\ts[0-9]+, p[0-7], z[0-9]+\.s} 2 } } */
/* { dg-final { scan-assembler-times {\tfaddv\td[0-9]+, p[0-7], z[0-9]+\.d} 2 } } */
-/* Should be 4, if we used reductions for int8_t and int16_t. */
+/* Should be 4 and 6 respectively, if we used reductions for int8_t and
+ int16_t. */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 2 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 2 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_5_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_5_run.c
index 476b40cb0e9..bb5421700da 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_5_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_5_run.c
@@ -5,25 +5,30 @@
#define N (141 * 2)
-#define HARNESS(TYPE) \
- { \
- TYPE a[N], b[2] = { 40, 22 }; \
- for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
- vec_slp_##TYPE (a, b, N / 2); \
- TYPE x0 = 40; \
- TYPE x1 = 22; \
- for (unsigned int i = 0; i < N; i += 2) \
- { \
- x0 += a[i]; \
- x1 += a[i + 1]; \
- asm volatile (""); \
- } \
- if (x0 != b[0] || x1 != b[1]) \
- __builtin_abort (); \
+#define HARNESS(TYPE) \
+ { \
+ TYPE a[N], b[2] = { 40, 22 }; \
+ for (unsigned int i = 0; i < N; ++i) \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ vec_slp_##TYPE (a, b, N / 2); \
+ TYPE x0 = 40; \
+ TYPE x1 = 22; \
+ for (unsigned int i = 0; i < N; i += 2) \
+ { \
+ x0 += a[i]; \
+ x1 += a[i + 1]; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ /* _Float16 isn't precise enough for this. */ \
+ if ((TYPE) 0x1000 + 1 != (TYPE) 0x1000 \
+ && (x0 != b[0] || x1 != b[1])) \
+ __builtin_abort (); \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_6.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_6.c
index 8cdceb57dc6..b3bdb04e2ab 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_6.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
{ \
TYPE x0 = b[0]; \
@@ -30,6 +30,7 @@ vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_6_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_6_run.c
index a9ca327c907..e2ad116f91d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_6_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_6_run.c
@@ -5,27 +5,32 @@
#define N (77 * 3)
-#define HARNESS(TYPE) \
- { \
- TYPE a[N], b[3] = { 40, 22, 75 }; \
- for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
- vec_slp_##TYPE (a, b, N / 3); \
- TYPE x0 = 40; \
- TYPE x1 = 22; \
- TYPE x2 = 75; \
- for (unsigned int i = 0; i < N; i += 3) \
- { \
- x0 += a[i]; \
- x1 += a[i + 1]; \
- x2 += a[i + 2]; \
- asm volatile (""); \
- } \
- if (x0 != b[0] || x1 != b[1] || x2 != b[2]) \
- __builtin_abort (); \
+#define HARNESS(TYPE) \
+ { \
+ TYPE a[N], b[3] = { 40, 22, 75 }; \
+ for (unsigned int i = 0; i < N; ++i) \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ vec_slp_##TYPE (a, b, N / 3); \
+ TYPE x0 = 40; \
+ TYPE x1 = 22; \
+ TYPE x2 = 75; \
+ for (unsigned int i = 0; i < N; i += 3) \
+ { \
+ x0 += a[i]; \
+ x1 += a[i + 1]; \
+ x2 += a[i + 2]; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ /* _Float16 isn't precise enough for this. */ \
+ if ((TYPE) 0x1000 + 1 != (TYPE) 0x1000 \
+ && (x0 != b[0] || x1 != b[1] || x2 != b[2])) \
+ __builtin_abort (); \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_7.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_7.c
index 4dc9fafcdde..372c7575cdb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_7.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
{ \
TYPE x0 = b[0]; \
@@ -33,6 +33,7 @@ vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
T (uint32_t) \
T (int64_t) \
T (uint64_t) \
+ T (_Float16) \
T (float) \
T (double)
@@ -45,9 +46,9 @@ TEST_ALL (VEC_PERM)
/* ??? At present we don't treat the int8_t and int16_t loops as
reductions. */
/* { dg-final { scan-assembler-times {\tld1b\t} 2 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 2 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 3 { xfail *-*-* } } } */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 2 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 3 } } */
/* { dg-final { scan-assembler-times {\tld4d\t} 3 } } */
/* { dg-final { scan-assembler-not {\tld4b\t} } } */
@@ -60,12 +61,14 @@ TEST_ALL (VEC_PERM)
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.h} 4 } } */
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.s} 8 } } */
/* { dg-final { scan-assembler-times {\tuaddv\td[0-9]+, p[0-7], z[0-9]+\.d} 8 } } */
+/* { dg-final { scan-assembler-times {\tfaddv\th[0-9]+, p[0-7], z[0-9]+\.h} 4 } } */
/* { dg-final { scan-assembler-times {\tfaddv\ts[0-9]+, p[0-7], z[0-9]+\.s} 4 } } */
/* { dg-final { scan-assembler-times {\tfaddv\td[0-9]+, p[0-7], z[0-9]+\.d} 4 } } */
-/* Should be 4, if we used reductions for int8_t and int16_t. */
+/* Should be 4 and 6 respectively, if we used reductions for int8_t and
+ int16_t. */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b} 2 } } */
-/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 2 } } */
+/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h} 4 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s} 6 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_7_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_7_run.c
index 12446972fde..5a8bf99bc5b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_7_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_7_run.c
@@ -5,29 +5,34 @@
#define N (54 * 4)
-#define HARNESS(TYPE) \
- { \
- TYPE a[N], b[4] = { 40, 22, 75, 19 }; \
- for (unsigned int i = 0; i < N; ++i) \
- a[i] = i * 2 + i % 5; \
- vec_slp_##TYPE (a, b, N / 4); \
- TYPE x0 = 40; \
- TYPE x1 = 22; \
- TYPE x2 = 75; \
- TYPE x3 = 19; \
- for (unsigned int i = 0; i < N; i += 4) \
- { \
- x0 += a[i]; \
- x1 += a[i + 1]; \
- x2 += a[i + 2]; \
- x3 += a[i + 3]; \
- asm volatile (""); \
- } \
- if (x0 != b[0] || x1 != b[1] || x2 != b[2] || x3 != b[3]) \
- __builtin_abort (); \
+#define HARNESS(TYPE) \
+ { \
+ TYPE a[N], b[4] = { 40, 22, 75, 19 }; \
+ for (unsigned int i = 0; i < N; ++i) \
+ { \
+ a[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ vec_slp_##TYPE (a, b, N / 4); \
+ TYPE x0 = 40; \
+ TYPE x1 = 22; \
+ TYPE x2 = 75; \
+ TYPE x3 = 19; \
+ for (unsigned int i = 0; i < N; i += 4) \
+ { \
+ x0 += a[i]; \
+ x1 += a[i + 1]; \
+ x2 += a[i + 2]; \
+ x3 += a[i + 3]; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ /* _Float16 isn't precise enough for this. */ \
+ if ((TYPE) 0x1000 + 1 != (TYPE) 0x1000 \
+ && (x0 != b[0] || x1 != b[1] || x2 != b[2] || x3 != b[3])) \
+ __builtin_abort (); \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_8.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_8.c
index caae4528d82..d57457fbef0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_8.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
{ \
for (int i = 0; i < n; ++i) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_8_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_8_run.c
index 2717ca62de1..09a6d648c52 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_8_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_8_run.c
@@ -37,7 +37,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_9.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_9.c
index af06270b6f2..65e1cb8f044 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_9.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_9.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE1, TYPE2) \
-void __attribute__ ((weak)) \
+void __attribute__ ((noinline, noclone)) \
vec_slp_##TYPE1##_##TYPE2 (TYPE1 *restrict a, \
TYPE2 *restrict b, int n) \
{ \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_slp_9_run.c b/gcc/testsuite/gcc.target/aarch64/sve_slp_9_run.c
index 0bde3b6ea03..3e69a48580b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_slp_9_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_slp_9_run.c
@@ -32,7 +32,7 @@
} \
}
-int
+int __attribute__ ((noinline, noclone))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_speculative_3.c b/gcc/testsuite/gcc.target/aarch64/sve_speculative_3.c
index 25f3047444e..db35711a193 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_speculative_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_speculative_3.c
@@ -21,6 +21,6 @@ FPTYPE spec_fp_loop_##ARGTYPE##INDUCTYPE (ARGTYPE mask, ARGTYPE limit)\
SPEC_FP_LOOP (uint32_t, uint32_t, double)
-/* { dg-final { scan-tree-dump-times "Not vectorized: Multiple ncopies not supported" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "not vectorized: ncopies is greater than 1" 1 "vect" } } */
/* { dg-final { scan-assembler-not "brka\tp\[0-9\]*.b, p\[0-9\]*\/z, p\[0-9\]*.b" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_speculative_6.c b/gcc/testsuite/gcc.target/aarch64/sve_speculative_6.c
index 4765b22f014..1b71687a257 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_speculative_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_speculative_6.c
@@ -41,4 +41,4 @@ SPEC_LOOP (uint64_t, uint16_t)
SPEC_LOOP (uint64_t, uint32_t)
/* { dg-final { scan-tree-dump-not "LOOP VECTORIZED" "vect" } } */
-/* { dg-final { scan-tree-dump "Speculative loop mask load/stores not supported" "vect" } } */
+/* { dg-final { scan-tree-dump "speculative mask loads not supported" "vect" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_store_scalar_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve_store_scalar_offset_1.c
index 3e7367cd9fa..1a48f7b6080 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_store_scalar_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_store_scalar_offset_1.c
@@ -3,50 +3,50 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size(32)));
-typedef int32_t v8si __attribute__((vector_size(32)));
-typedef int16_t v16hi __attribute__((vector_size(32)));
-typedef int8_t v32qi __attribute__((vector_size(32)));
+typedef int64_t vnx2di __attribute__((vector_size(32)));
+typedef int32_t vnx4si __attribute__((vector_size(32)));
+typedef int16_t vnx8hi __attribute__((vector_size(32)));
+typedef int8_t vnx16qi __attribute__((vector_size(32)));
void sve_store_64_z_lsl (uint64_t *a, unsigned long i)
{
- asm volatile ("" : "=w" (*(v4di *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx2di *) &a[i]));
}
void sve_store_64_s_lsl (int64_t *a, signed long i)
{
- asm volatile ("" : "=w" (*(v4di *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx2di *) &a[i]));
}
void sve_store_32_z_lsl (uint32_t *a, unsigned long i)
{
- asm volatile ("" : "=w" (*(v8si *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx4si *) &a[i]));
}
void sve_store_32_s_lsl (int32_t *a, signed long i)
{
- asm volatile ("" : "=w" (*(v8si *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx4si *) &a[i]));
}
void sve_store_16_z_lsl (uint16_t *a, unsigned long i)
{
- asm volatile ("" : "=w" (*(v16hi *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx8hi *) &a[i]));
}
void sve_store_16_s_lsl (int16_t *a, signed long i)
{
- asm volatile ("" : "=w" (*(v16hi *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx8hi *) &a[i]));
}
/* ??? The other argument order leads to a redundant move. */
void sve_store_8_z (unsigned long i, uint8_t *a)
{
- asm volatile ("" : "=w" (*(v32qi *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx16qi *) &a[i]));
}
void sve_store_8_s (signed long i, int8_t *a)
{
- asm volatile ("" : "=w" (*(v32qi *) &a[i]));
+ asm volatile ("" : "=w" (*(vnx16qi *) &a[i]));
}
/* { dg-final { scan-assembler-times {\tst1d\tz0\.d, p[0-7], \[x0, x1, lsl 3\]\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_1.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_1.c
new file mode 100644
index 00000000000..b940ba9d4de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_1.c
@@ -0,0 +1,40 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX8
+#define INDEX8 int8_t
+#define INDEX16 int16_t
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##BITS (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, \
+ INDEX##BITS stride, INDEX##BITS n) \
+ { \
+ for (INDEX##BITS i = 0; i < n; ++i) \
+ dest[i] += src[i * stride]; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 8) \
+ T (DATA_TYPE, 16) \
+ T (DATA_TYPE, 32) \
+ T (DATA_TYPE, 64)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 12 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_2.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_2.c
new file mode 100644
index 00000000000..a834989091d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_2.c
@@ -0,0 +1,18 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#define INDEX8 uint8_t
+#define INDEX16 uint16_t
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
+
+#include "sve_strided_load_1.c"
+
+/* 8 and 16 bits are signed because the multiplication promotes to int.
+ Using uxtw for all 9 would be OK. */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 6 } } */
+/* The 32-bit loop needs to honor the defined overflow in uint32_t,
+ so we vectorize the offset calculation. This means that the
+ 64-bit version needs two copies. */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, uxtw 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_3.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_3.c
new file mode 100644
index 00000000000..8f0bfdd4bb8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_3.c
@@ -0,0 +1,32 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, OTHER_TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##BITS (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, \
+ OTHER_TYPE *restrict other, \
+ OTHER_TYPE mask, \
+ int stride, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ dest[i] = src[i * stride] + (OTHER_TYPE) (other[i] | mask); \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, int16_t) \
+ T (uint32_t, int16_t) \
+ T (float, int16_t) \
+ T (int64_t, int32_t) \
+ T (uint64_t, int32_t) \
+ T (double, int32_t)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.h, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 1\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 6 } } */
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_4.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_4.c
new file mode 100644
index 00000000000..b7dc12fb3c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_4.c
@@ -0,0 +1,33 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, NAME, SCALE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##NAME (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ dest[i] += src[i * SCALE]; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 5, 5) \
+ T (DATA_TYPE, 7, 7) \
+ T (DATA_TYPE, 11, 11) \
+ T (DATA_TYPE, 200, 200) \
+ T (DATA_TYPE, m100, -100)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 15 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_5.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_5.c
new file mode 100644
index 00000000000..6cbcc963595
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_5.c
@@ -0,0 +1,34 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, NAME, SCALE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##NAME (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, long n) \
+ { \
+ for (long i = 0; i < n; ++i) \
+ dest[i] += src[i * SCALE]; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 5, 5) \
+ T (DATA_TYPE, 7, 7) \
+ T (DATA_TYPE, 11, 11) \
+ T (DATA_TYPE, 200, 200) \
+ T (DATA_TYPE, m100, -100)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, uxtw\]\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_6.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_6.c
new file mode 100644
index 00000000000..aaf743b3d82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_6.c
@@ -0,0 +1,7 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=scalable --save-temps" } */
+
+#include "sve_strided_load_5.c"
+
+/* { dg-final { scan-assembler-not {\[x[0-9]+, z[0-9]+\.s} } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_7.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_7.c
new file mode 100644
index 00000000000..ddf6667e8c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_7.c
@@ -0,0 +1,34 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, NAME, SCALE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##NAME (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src) \
+ { \
+ for (long i = 0; i < 1000; ++i) \
+ dest[i] += src[i * SCALE]; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 5, 5) \
+ T (DATA_TYPE, 7, 7) \
+ T (DATA_TYPE, 11, 11) \
+ T (DATA_TYPE, 200, 200) \
+ T (DATA_TYPE, m100, -100)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, uxtw\]\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, z[0-9]+.s, sxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, z[0-9]+.d\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_load_8.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_8.c
new file mode 100644
index 00000000000..788aeb08df2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_load_8.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+
+void
+foo (double *x, int m)
+{
+ for (int i = 0; i < 256; ++i)
+ x[i * m] += x[i * m];
+}
+
+/* { dg-final { scan-assembler-times {\tcbz\tw1,} 1 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, } 1 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, } 1 } } */
+/* { dg-final { scan-assembler-times {\tldr\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tstr\t} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_1.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_1.c
new file mode 100644
index 00000000000..4f84b3fdec5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_1.c
@@ -0,0 +1,40 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#ifndef INDEX8
+#define INDEX8 int8_t
+#define INDEX16 int16_t
+#define INDEX32 int32_t
+#define INDEX64 int64_t
+#endif
+
+#define TEST_LOOP(DATA_TYPE, BITS) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##BITS (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, \
+ INDEX##BITS stride, INDEX##BITS n) \
+ { \
+ for (INDEX##BITS i = 0; i < n; ++i) \
+ dest[i * stride] = src[i] + 1; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 8) \
+ T (DATA_TYPE, 16) \
+ T (DATA_TYPE, 32) \
+ T (DATA_TYPE, 64)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 12 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_2.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_2.c
new file mode 100644
index 00000000000..1a8df604ead
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_2.c
@@ -0,0 +1,18 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#define INDEX8 uint8_t
+#define INDEX16 uint16_t
+#define INDEX32 uint32_t
+#define INDEX64 uint64_t
+
+#include "sve_strided_store_1.c"
+
+/* 8 and 16 bits are signed because the multiplication promotes to int.
+ Using uxtw for all 9 would be OK. */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 6 } } */
+/* The 32-bit loop needs to honor the defined overflow in uint32_t,
+ so we vectorize the offset calculation. This means that the
+ 64-bit version needs two copies. */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, uxtw 2\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_3.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_3.c
new file mode 100644
index 00000000000..19454565f97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_3.c
@@ -0,0 +1,33 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, OTHER_TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##BITS (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, \
+ OTHER_TYPE *restrict other, \
+ OTHER_TYPE mask, \
+ int stride, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ dest[i * stride] = src[i] + (OTHER_TYPE) (other[i] | mask); \
+ }
+
+#define TEST_ALL(T) \
+ T (int32_t, int16_t) \
+ T (uint32_t, int16_t) \
+ T (float, int16_t) \
+ T (int64_t, int32_t) \
+ T (uint64_t, int32_t) \
+ T (double, int32_t)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.h, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 1\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 2\]\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 6 } } */
+
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]/z, \[x[0-9]+, x[0-9]+, lsl 3\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_4.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_4.c
new file mode 100644
index 00000000000..23f1329c69b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_4.c
@@ -0,0 +1,33 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, NAME, SCALE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##NAME (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ dest[i * SCALE] = src[i] + 1; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 5, 5) \
+ T (DATA_TYPE, 7, 7) \
+ T (DATA_TYPE, 11, 11) \
+ T (DATA_TYPE, 200, 200) \
+ T (DATA_TYPE, m100, -100)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw 2\]\n} 15 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d, lsl 3\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_5.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_5.c
new file mode 100644
index 00000000000..68f2a539c27
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_5.c
@@ -0,0 +1,34 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, NAME, SCALE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##NAME (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src, long n) \
+ { \
+ for (long i = 0; i < n; ++i) \
+ dest[i * SCALE] = src[i] + 1; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 5, 5) \
+ T (DATA_TYPE, 7, 7) \
+ T (DATA_TYPE, 11, 11) \
+ T (DATA_TYPE, 200, 200) \
+ T (DATA_TYPE, m100, -100)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, uxtw\]\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_6.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_6.c
new file mode 100644
index 00000000000..da124b7348b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_6.c
@@ -0,0 +1,7 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=scalable --save-temps" } */
+
+#include "sve_strided_store_5.c"
+
+/* { dg-final { scan-assembler-not {\[x[0-9]+, z[0-9]+\.s} } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_strided_store_7.c b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_7.c
new file mode 100644
index 00000000000..a76ac359f01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_strided_store_7.c
@@ -0,0 +1,34 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(DATA_TYPE, NAME, SCALE) \
+ void __attribute__ ((noinline, noclone)) \
+ f_##DATA_TYPE##_##NAME (DATA_TYPE *restrict dest, \
+ DATA_TYPE *restrict src) \
+ { \
+ for (long i = 0; i < 1000; ++i) \
+ dest[i * SCALE] = src[i] + 1; \
+ }
+
+#define TEST_TYPE(T, DATA_TYPE) \
+ T (DATA_TYPE, 5, 5) \
+ T (DATA_TYPE, 7, 7) \
+ T (DATA_TYPE, 11, 11) \
+ T (DATA_TYPE, 200, 200) \
+ T (DATA_TYPE, m100, -100)
+
+#define TEST_ALL(T) \
+ TEST_TYPE (T, int32_t) \
+ TEST_TYPE (T, uint32_t) \
+ TEST_TYPE (T, float) \
+ TEST_TYPE (T, int64_t) \
+ TEST_TYPE (T, uint64_t) \
+ TEST_TYPE (T, double)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, uxtw\]\n} 12 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7], \[x[0-9]+, z[0-9]+.s, sxtw\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7], \[x[0-9]+, z[0-9]+.d\]\n} 15 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_1.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_1.c
index bb23f9886c6..e9ac4790c7b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_1.c
@@ -1,32 +1,35 @@
-/* { dg-do compile } */
-/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256" } */
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 -mbig-endian --save-temps" } */
-typedef char v32qi __attribute__((vector_size(32)));
-typedef struct { v32qi a[2]; } v64qi;
+typedef char vnx16qi __attribute__((vector_size(32)));
+typedef struct { vnx16qi a[2]; } vnx32qi;
-typedef short v16hi __attribute__((vector_size(32)));
-typedef struct { v16hi a[2]; } v32hi;
+typedef short vnx8hi __attribute__((vector_size(32)));
+typedef struct { vnx8hi a[2]; } vnx16hi;
-typedef int v8si __attribute__((vector_size(32)));
-typedef struct { v8si a[2]; } v16si;
+typedef int vnx4si __attribute__((vector_size(32)));
+typedef struct { vnx4si a[2]; } vnx8si;
-typedef long v4di __attribute__((vector_size(32)));
-typedef struct { v4di a[2]; } v8di;
+typedef long vnx2di __attribute__((vector_size(32)));
+typedef struct { vnx2di a[2]; } vnx4di;
-typedef float v8sf __attribute__((vector_size(32)));
-typedef struct { v8sf a[2]; } v16sf;
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef struct { vnx8hf a[2]; } vnx16hf;
-typedef double v4df __attribute__((vector_size(32)));
-typedef struct { v4df a[2]; } v8df;
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef struct { vnx4sf a[2]; } vnx8sf;
+
+typedef double vnx2df __attribute__((vector_size(32)));
+typedef struct { vnx2df a[2]; } vnx4df;
#define TEST_TYPE(TYPE, REG1, REG2) \
void \
f1_##TYPE (TYPE *a) \
{ \
register TYPE x asm (#REG1) = a[0]; \
- asm volatile ("# test " #TYPE " 1 %0" :: "w" (x)); \
+ asm volatile ("# test " #TYPE " 1 %S0" :: "w" (x)); \
register TYPE y asm (#REG2) = x; \
- asm volatile ("# test " #TYPE " 2 %0, %1, %2" \
+ asm volatile ("# test " #TYPE " 2 %S0, %S1, %S2" \
: "=&w" (x) : "0" (x), "w" (y)); \
a[1] = x; \
} \
@@ -54,63 +57,73 @@ typedef struct { v4df a[2]; } v8df;
asm volatile ("# %0" :: "w" (x)); \
}
-TEST_TYPE (v64qi, z0, z2)
-TEST_TYPE (v32hi, z5, z7)
-TEST_TYPE (v16si, z10, z12)
-TEST_TYPE (v8di, z15, z17)
-TEST_TYPE (v16sf, z20, z23)
-TEST_TYPE (v8df, z28, z30)
+TEST_TYPE (vnx32qi, z0, z2)
+TEST_TYPE (vnx16hi, z5, z7)
+TEST_TYPE (vnx8si, z10, z12)
+TEST_TYPE (vnx4di, z15, z17)
+TEST_TYPE (vnx16hf, z18, z20)
+TEST_TYPE (vnx8sf, z21, z23)
+TEST_TYPE (vnx4df, z28, z30)
/* { dg-final { scan-assembler {\tld1b\tz0.b, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1b\tz1.b, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v64qi 1 z0\n} } } */
+/* { dg-final { scan-assembler { test vnx32qi 1 z0\n} } } */
/* { dg-final { scan-assembler {\tmov\tz2.d, z0.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz3.d, z1.d\n} } } */
-/* { dg-final { scan-assembler { test v64qi 2 z0, z0, z2\n} } } */
+/* { dg-final { scan-assembler { test vnx32qi 2 z0, z0, z2\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz0.b, p[0-7], \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz1.b, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1h\tz5.h, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1h\tz6.h, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v32hi 1 z5\n} } } */
+/* { dg-final { scan-assembler { test vnx16hi 1 z5\n} } } */
/* { dg-final { scan-assembler {\tmov\tz7.d, z5.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz8.d, z6.d\n} } } */
-/* { dg-final { scan-assembler { test v32hi 2 z5, z5, z7\n} } } */
+/* { dg-final { scan-assembler { test vnx16hi 2 z5, z5, z7\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz5.h, p[0-7], \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz6.h, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz10.s, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz11.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v16si 1 z10\n} } } */
+/* { dg-final { scan-assembler { test vnx8si 1 z10\n} } } */
/* { dg-final { scan-assembler {\tmov\tz12.d, z10.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz13.d, z11.d\n} } } */
-/* { dg-final { scan-assembler { test v16si 2 z10, z10, z12\n} } } */
+/* { dg-final { scan-assembler { test vnx8si 2 z10, z10, z12\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz10.s, p[0-7], \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz11.s, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz15.d, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz16.d, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v8di 1 z15\n} } } */
+/* { dg-final { scan-assembler { test vnx4di 1 z15\n} } } */
/* { dg-final { scan-assembler {\tmov\tz17.d, z15.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz18.d, z16.d\n} } } */
-/* { dg-final { scan-assembler { test v8di 2 z15, z15, z17\n} } } */
+/* { dg-final { scan-assembler { test vnx4di 2 z15, z15, z17\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz15.d, p[0-7], \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz16.d, p[0-7], \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler {\tld1w\tz20.s, p[0-7]/z, \[x0\]\n} } } */
-/* { dg-final { scan-assembler {\tld1w\tz21.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v16sf 1 z20\n} } } */
-/* { dg-final { scan-assembler {\tmov\tz23.d, z20.d\n} } } */
-/* { dg-final { scan-assembler {\tmov\tz24.d, z21.d\n} } } */
-/* { dg-final { scan-assembler { test v16sf 2 z20, z20, z23\n} } } */
-/* { dg-final { scan-assembler {\tst1w\tz20.s, p[0-7], \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler {\tst1w\tz21.s, p[0-7], \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz18.h, p[0-7]/z, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz19.h, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx16hf 1 z18\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz20.d, z18.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz21.d, z19.d\n} } } */
+/* { dg-final { scan-assembler { test vnx16hf 2 z18, z18, z20\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz18.h, p[0-7], \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz19.h, p[0-7], \[x0, #3, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tld1w\tz21.s, p[0-7]/z, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tld1w\tz22.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx8sf 1 z21\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz23.d, z21.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz24.d, z22.d\n} } } */
+/* { dg-final { scan-assembler { test vnx8sf 2 z21, z21, z23\n} } } */
+/* { dg-final { scan-assembler {\tst1w\tz21.s, p[0-7], \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1w\tz22.s, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz28.d, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz29.d, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v8df 1 z28\n} } } */
+/* { dg-final { scan-assembler { test vnx4df 1 z28\n} } } */
/* { dg-final { scan-assembler {\tmov\tz30.d, z28.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz31.d, z29.d\n} } } */
-/* { dg-final { scan-assembler { test v8df 2 z28, z28, z30\n} } } */
+/* { dg-final { scan-assembler { test vnx4df 2 z28, z28, z30\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz28.d, p[0-7], \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz29.d, p[0-7], \[x0, #3, mul vl\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_2.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_2.c
index d36aa75483a..faf503c35e1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_2.c
@@ -1,51 +1,55 @@
-/* { dg-do compile } */
-/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256" } */
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 -mbig-endian --save-temps" } */
-typedef char v32qi __attribute__((vector_size(32)));
-typedef struct { v32qi a[3]; } v96qi;
+typedef char vnx16qi __attribute__((vector_size(32)));
+typedef struct { vnx16qi a[3]; } vnx48qi;
-typedef short v16hi __attribute__((vector_size(32)));
-typedef struct { v16hi a[3]; } v48hi;
+typedef short vnx8hi __attribute__((vector_size(32)));
+typedef struct { vnx8hi a[3]; } vnx24hi;
-typedef int v8si __attribute__((vector_size(32)));
-typedef struct { v8si a[3]; } v24si;
+typedef int vnx4si __attribute__((vector_size(32)));
+typedef struct { vnx4si a[3]; } vnx12si;
-typedef long v4di __attribute__((vector_size(32)));
-typedef struct { v4di a[3]; } v12di;
+typedef long vnx2di __attribute__((vector_size(32)));
+typedef struct { vnx2di a[3]; } vnx6di;
-typedef float v8sf __attribute__((vector_size(32)));
-typedef struct { v8sf a[3]; } v24sf;
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef struct { vnx8hf a[3]; } vnx24hf;
-typedef double v4df __attribute__((vector_size(32)));
-typedef struct { v4df a[3]; } v12df;
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef struct { vnx4sf a[3]; } vnx12sf;
+
+typedef double vnx2df __attribute__((vector_size(32)));
+typedef struct { vnx2df a[3]; } vnx6df;
#define TEST_TYPE(TYPE, REG1, REG2) \
void \
f_##TYPE (TYPE *a) \
{ \
register TYPE x asm (#REG1) = a[0]; \
- asm volatile ("# test " #TYPE " 1 %0" :: "w" (x)); \
+ asm volatile ("# test " #TYPE " 1 %S0" :: "w" (x)); \
register TYPE y asm (#REG2) = x; \
- asm volatile ("# test " #TYPE " 2 %0, %1, %2" \
+ asm volatile ("# test " #TYPE " 2 %S0, %S1, %S2" \
: "=&w" (x) : "0" (x), "w" (y)); \
a[1] = x; \
}
-TEST_TYPE (v96qi, z0, z3)
-TEST_TYPE (v48hi, z6, z2)
-TEST_TYPE (v24si, z12, z15)
-TEST_TYPE (v12di, z16, z13)
-TEST_TYPE (v24sf, z20, z23)
-TEST_TYPE (v12df, z26, z29)
+TEST_TYPE (vnx48qi, z0, z3)
+TEST_TYPE (vnx24hi, z6, z2)
+TEST_TYPE (vnx12si, z12, z15)
+TEST_TYPE (vnx6di, z16, z13)
+TEST_TYPE (vnx24hf, z18, z1)
+TEST_TYPE (vnx12sf, z20, z23)
+TEST_TYPE (vnx6df, z26, z29)
/* { dg-final { scan-assembler {\tld1b\tz0.b, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1b\tz1.b, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1b\tz2.b, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v96qi 1 z0\n} } } */
+/* { dg-final { scan-assembler { test vnx48qi 1 z0\n} } } */
/* { dg-final { scan-assembler {\tmov\tz3.d, z0.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz4.d, z1.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz5.d, z2.d\n} } } */
-/* { dg-final { scan-assembler { test v96qi 2 z0, z0, z3\n} } } */
+/* { dg-final { scan-assembler { test vnx48qi 2 z0, z0, z3\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz0.b, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz1.b, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz2.b, p[0-7], \[x0, #5, mul vl\]\n} } } */
@@ -53,11 +57,11 @@ TEST_TYPE (v12df, z26, z29)
/* { dg-final { scan-assembler {\tld1h\tz6.h, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1h\tz7.h, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1h\tz8.h, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v48hi 1 z6\n} } } */
+/* { dg-final { scan-assembler { test vnx24hi 1 z6\n} } } */
/* { dg-final { scan-assembler {\tmov\tz2.d, z6.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz3.d, z7.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz4.d, z8.d\n} } } */
-/* { dg-final { scan-assembler { test v48hi 2 z6, z6, z2\n} } } */
+/* { dg-final { scan-assembler { test vnx24hi 2 z6, z6, z2\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz6.h, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz7.h, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz8.h, p[0-7], \[x0, #5, mul vl\]\n} } } */
@@ -65,11 +69,11 @@ TEST_TYPE (v12df, z26, z29)
/* { dg-final { scan-assembler {\tld1w\tz12.s, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz13.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz14.s, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v24si 1 z12\n} } } */
+/* { dg-final { scan-assembler { test vnx12si 1 z12\n} } } */
/* { dg-final { scan-assembler {\tmov\tz15.d, z12.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz16.d, z13.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz17.d, z14.d\n} } } */
-/* { dg-final { scan-assembler { test v24si 2 z12, z12, z15\n} } } */
+/* { dg-final { scan-assembler { test vnx12si 2 z12, z12, z15\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz12.s, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz13.s, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz14.s, p[0-7], \[x0, #5, mul vl\]\n} } } */
@@ -77,23 +81,35 @@ TEST_TYPE (v12df, z26, z29)
/* { dg-final { scan-assembler {\tld1d\tz16.d, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz17.d, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz18.d, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v12di 1 z16\n} } } */
+/* { dg-final { scan-assembler { test vnx6di 1 z16\n} } } */
/* { dg-final { scan-assembler {\tmov\tz13.d, z16.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz14.d, z17.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz15.d, z18.d\n} } } */
-/* { dg-final { scan-assembler { test v12di 2 z16, z16, z13\n} } } */
+/* { dg-final { scan-assembler { test vnx6di 2 z16, z16, z13\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz16.d, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz17.d, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz18.d, p[0-7], \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz18.h, p[0-7]/z, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz19.h, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz20.h, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx24hf 1 z18\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz1.d, z18.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz2.d, z19.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz3.d, z20.d\n} } } */
+/* { dg-final { scan-assembler { test vnx24hf 2 z18, z18, z1\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz18.h, p[0-7], \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz19.h, p[0-7], \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz20.h, p[0-7], \[x0, #5, mul vl\]\n} } } */
+
/* { dg-final { scan-assembler {\tld1w\tz20.s, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz21.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz22.s, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v24sf 1 z20\n} } } */
+/* { dg-final { scan-assembler { test vnx12sf 1 z20\n} } } */
/* { dg-final { scan-assembler {\tmov\tz23.d, z20.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz24.d, z21.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz25.d, z22.d\n} } } */
-/* { dg-final { scan-assembler { test v24sf 2 z20, z20, z23\n} } } */
+/* { dg-final { scan-assembler { test vnx12sf 2 z20, z20, z23\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz20.s, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz21.s, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz22.s, p[0-7], \[x0, #5, mul vl\]\n} } } */
@@ -101,11 +117,11 @@ TEST_TYPE (v12df, z26, z29)
/* { dg-final { scan-assembler {\tld1d\tz26.d, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz27.d, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz28.d, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v12df 1 z26\n} } } */
+/* { dg-final { scan-assembler { test vnx6df 1 z26\n} } } */
/* { dg-final { scan-assembler {\tmov\tz29.d, z26.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz30.d, z27.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz31.d, z28.d\n} } } */
-/* { dg-final { scan-assembler { test v12df 2 z26, z26, z29\n} } } */
+/* { dg-final { scan-assembler { test vnx6df 2 z26, z26, z29\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz26.d, p[0-7], \[x0, #3, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz27.d, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz28.d, p[0-7], \[x0, #5, mul vl\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_3.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_3.c
index d97d6973359..101a33701a5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_3.c
@@ -1,53 +1,57 @@
-/* { dg-do compile } */
-/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256" } */
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 -mbig-endian --save-temps" } */
-typedef char v32qi __attribute__((vector_size(32)));
-typedef struct { v32qi a[4]; } v128qi;
+typedef char vnx16qi __attribute__((vector_size(32)));
+typedef struct { vnx16qi a[4]; } vnx64qi;
-typedef short v16hi __attribute__((vector_size(32)));
-typedef struct { v16hi a[4]; } v64hi;
+typedef short vnx8hi __attribute__((vector_size(32)));
+typedef struct { vnx8hi a[4]; } vnx32hi;
-typedef int v8si __attribute__((vector_size(32)));
-typedef struct { v8si a[4]; } v32si;
+typedef int vnx4si __attribute__((vector_size(32)));
+typedef struct { vnx4si a[4]; } vnx16si;
-typedef long v4di __attribute__((vector_size(32)));
-typedef struct { v4di a[4]; } v16di;
+typedef long vnx2di __attribute__((vector_size(32)));
+typedef struct { vnx2di a[4]; } vnx8di;
-typedef float v8sf __attribute__((vector_size(32)));
-typedef struct { v8sf a[4]; } v32sf;
+typedef _Float16 vnx8hf __attribute__((vector_size(32)));
+typedef struct { vnx8hf a[4]; } vnx32hf;
-typedef double v4df __attribute__((vector_size(32)));
-typedef struct { v4df a[4]; } v16df;
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef struct { vnx4sf a[4]; } vnx16sf;
+
+typedef double vnx2df __attribute__((vector_size(32)));
+typedef struct { vnx2df a[4]; } vnx8df;
#define TEST_TYPE(TYPE, REG1, REG2) \
void \
f_##TYPE (TYPE *a) \
{ \
register TYPE x asm (#REG1) = a[0]; \
- asm volatile ("# test " #TYPE " 1 %0" :: "w" (x)); \
+ asm volatile ("# test " #TYPE " 1 %S0" :: "w" (x)); \
register TYPE y asm (#REG2) = x; \
- asm volatile ("# test " #TYPE " 2 %0, %1, %2" \
+ asm volatile ("# test " #TYPE " 2 %S0, %S1, %S2" \
: "=&w" (x) : "0" (x), "w" (y)); \
a[1] = x; \
}
-TEST_TYPE (v128qi, z0, z4)
-TEST_TYPE (v64hi, z6, z2)
-TEST_TYPE (v32si, z12, z16)
-TEST_TYPE (v16di, z17, z13)
-TEST_TYPE (v32sf, z20, z16)
-TEST_TYPE (v16df, z24, z28)
+TEST_TYPE (vnx64qi, z0, z4)
+TEST_TYPE (vnx32hi, z6, z2)
+TEST_TYPE (vnx16si, z12, z16)
+TEST_TYPE (vnx8di, z17, z13)
+TEST_TYPE (vnx32hf, z18, z1)
+TEST_TYPE (vnx16sf, z20, z16)
+TEST_TYPE (vnx8df, z24, z28)
/* { dg-final { scan-assembler {\tld1b\tz0.b, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1b\tz1.b, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1b\tz2.b, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1b\tz3.b, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v128qi 1 z0\n} } } */
+/* { dg-final { scan-assembler { test vnx64qi 1 z0\n} } } */
/* { dg-final { scan-assembler {\tmov\tz4.d, z0.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz5.d, z1.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz6.d, z2.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz7.d, z3.d\n} } } */
-/* { dg-final { scan-assembler { test v128qi 2 z0, z0, z4\n} } } */
+/* { dg-final { scan-assembler { test vnx64qi 2 z0, z0, z4\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz0.b, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz1.b, p[0-7], \[x0, #5, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1b\tz2.b, p[0-7], \[x0, #6, mul vl\]\n} } } */
@@ -57,12 +61,12 @@ TEST_TYPE (v16df, z24, z28)
/* { dg-final { scan-assembler {\tld1h\tz7.h, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1h\tz8.h, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1h\tz9.h, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v64hi 1 z6\n} } } */
+/* { dg-final { scan-assembler { test vnx32hi 1 z6\n} } } */
/* { dg-final { scan-assembler {\tmov\tz2.d, z6.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz3.d, z7.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz4.d, z8.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz5.d, z9.d\n} } } */
-/* { dg-final { scan-assembler { test v64hi 2 z6, z6, z2\n} } } */
+/* { dg-final { scan-assembler { test vnx32hi 2 z6, z6, z2\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz6.h, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz7.h, p[0-7], \[x0, #5, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1h\tz8.h, p[0-7], \[x0, #6, mul vl\]\n} } } */
@@ -72,12 +76,12 @@ TEST_TYPE (v16df, z24, z28)
/* { dg-final { scan-assembler {\tld1w\tz13.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz14.s, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz15.s, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v32si 1 z12\n} } } */
+/* { dg-final { scan-assembler { test vnx16si 1 z12\n} } } */
/* { dg-final { scan-assembler {\tmov\tz16.d, z12.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz17.d, z13.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz18.d, z14.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz19.d, z15.d\n} } } */
-/* { dg-final { scan-assembler { test v32si 2 z12, z12, z16\n} } } */
+/* { dg-final { scan-assembler { test vnx16si 2 z12, z12, z16\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz12.s, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz13.s, p[0-7], \[x0, #5, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz14.s, p[0-7], \[x0, #6, mul vl\]\n} } } */
@@ -87,27 +91,42 @@ TEST_TYPE (v16df, z24, z28)
/* { dg-final { scan-assembler {\tld1d\tz18.d, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz19.d, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz20.d, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v16di 1 z17\n} } } */
+/* { dg-final { scan-assembler { test vnx8di 1 z17\n} } } */
/* { dg-final { scan-assembler {\tmov\tz13.d, z17.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz14.d, z18.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz15.d, z19.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz16.d, z20.d\n} } } */
-/* { dg-final { scan-assembler { test v16di 2 z17, z17, z13\n} } } */
+/* { dg-final { scan-assembler { test vnx8di 2 z17, z17, z13\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz17.d, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz18.d, p[0-7], \[x0, #5, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz19.d, p[0-7], \[x0, #6, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz20.d, p[0-7], \[x0, #7, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz18.h, p[0-7]/z, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz19.h, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz20.h, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tld1h\tz21.h, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx32hf 1 z18\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz1.d, z18.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz2.d, z19.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz3.d, z20.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz4.d, z21.d\n} } } */
+/* { dg-final { scan-assembler { test vnx32hf 2 z18, z18, z1\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz18.h, p[0-7], \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz19.h, p[0-7], \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz20.h, p[0-7], \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tst1h\tz21.h, p[0-7], \[x0, #7, mul vl\]\n} } } */
+
/* { dg-final { scan-assembler {\tld1w\tz20.s, p[0-7]/z, \[x0\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz21.s, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz22.s, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1w\tz23.s, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v32sf 1 z20\n} } } */
+/* { dg-final { scan-assembler { test vnx16sf 1 z20\n} } } */
/* { dg-final { scan-assembler {\tmov\tz16.d, z20.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz17.d, z21.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz18.d, z22.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz19.d, z23.d\n} } } */
-/* { dg-final { scan-assembler { test v32sf 2 z20, z20, z16\n} } } */
+/* { dg-final { scan-assembler { test vnx16sf 2 z20, z20, z16\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz20.s, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz21.s, p[0-7], \[x0, #5, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1w\tz22.s, p[0-7], \[x0, #6, mul vl\]\n} } } */
@@ -117,12 +136,12 @@ TEST_TYPE (v16df, z24, z28)
/* { dg-final { scan-assembler {\tld1d\tz25.d, p[0-7]/z, \[x0, #1, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz26.d, p[0-7]/z, \[x0, #2, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tld1d\tz27.d, p[0-7]/z, \[x0, #3, mul vl\]\n} } } */
-/* { dg-final { scan-assembler { test v16df 1 z24\n} } } */
+/* { dg-final { scan-assembler { test vnx8df 1 z24\n} } } */
/* { dg-final { scan-assembler {\tmov\tz28.d, z24.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz29.d, z25.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz30.d, z26.d\n} } } */
/* { dg-final { scan-assembler {\tmov\tz31.d, z27.d\n} } } */
-/* { dg-final { scan-assembler { test v16df 2 z24, z24, z28\n} } } */
+/* { dg-final { scan-assembler { test vnx8df 2 z24, z24, z28\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz24.d, p[0-7], \[x0, #4, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz25.d, p[0-7], \[x0, #5, mul vl\]\n} } } */
/* { dg-final { scan-assembler {\tst1d\tz26.d, p[0-7], \[x0, #6, mul vl\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_4.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_4.c
new file mode 100644
index 00000000000..40ec0481e84
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_4.c
@@ -0,0 +1,116 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 -mlittle-endian --save-temps" } */
+
+typedef char vnx16qi __attribute__((vector_size(32)));
+typedef struct { vnx16qi a[2]; } vnx32qi;
+
+typedef short vnx8hi __attribute__((vector_size(32)));
+typedef struct { vnx8hi a[2]; } vnx16hi;
+
+typedef int vnx4si __attribute__((vector_size(32)));
+typedef struct { vnx4si a[2]; } vnx8si;
+
+typedef long vnx2di __attribute__((vector_size(32)));
+typedef struct { vnx2di a[2]; } vnx4di;
+
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef struct { vnx4sf a[2]; } vnx8sf;
+
+typedef double vnx2df __attribute__((vector_size(32)));
+typedef struct { vnx2df a[2]; } vnx4df;
+
+#define TEST_TYPE(TYPE, REG1, REG2) \
+ void \
+ f1_##TYPE (TYPE *a) \
+ { \
+ register TYPE x asm (#REG1) = a[0]; \
+ asm volatile ("# test " #TYPE " 1 %S0" :: "w" (x)); \
+ register TYPE y asm (#REG2) = x; \
+ asm volatile ("# test " #TYPE " 2 %S0, %S1, %S2" \
+ : "=&w" (x) : "0" (x), "w" (y)); \
+ a[1] = x; \
+ } \
+ /* This must compile, but we don't care how. */ \
+ void \
+ f2_##TYPE (TYPE *a) \
+ { \
+ TYPE x = a[0]; \
+ x.a[0][3] = 1; \
+ x.a[1][2] = 12; \
+ asm volatile ("# %0" :: "w" (x)); \
+ } \
+ void \
+ f3_##TYPE (TYPE *a, int i) \
+ { \
+ TYPE x = a[0]; \
+ x.a[0][i] = 1; \
+ asm volatile ("# %0" :: "w" (x)); \
+ } \
+ void \
+ f4_##TYPE (TYPE *a, int i, int j) \
+ { \
+ TYPE x = a[0]; \
+ x.a[i][j] = 44; \
+ asm volatile ("# %0" :: "w" (x)); \
+ }
+
+TEST_TYPE (vnx32qi, z0, z2)
+TEST_TYPE (vnx16hi, z5, z7)
+TEST_TYPE (vnx8si, z10, z12)
+TEST_TYPE (vnx4di, z15, z17)
+TEST_TYPE (vnx8sf, z20, z23)
+TEST_TYPE (vnx4df, z28, z30)
+
+/* { dg-final { scan-assembler {\tldr\tz0, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz1, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx32qi 1 z0\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz2.d, z0.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz3.d, z1.d\n} } } */
+/* { dg-final { scan-assembler { test vnx32qi 2 z0, z0, z2\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz0, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz1, \[x0, #3, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz5, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz6, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx16hi 1 z5\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz7.d, z5.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz8.d, z6.d\n} } } */
+/* { dg-final { scan-assembler { test vnx16hi 2 z5, z5, z7\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz5, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz6, \[x0, #3, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz10, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz11, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx8si 1 z10\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz12.d, z10.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz13.d, z11.d\n} } } */
+/* { dg-final { scan-assembler { test vnx8si 2 z10, z10, z12\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz10, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz11, \[x0, #3, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz15, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz16, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx4di 1 z15\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz17.d, z15.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz18.d, z16.d\n} } } */
+/* { dg-final { scan-assembler { test vnx4di 2 z15, z15, z17\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz15, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz16, \[x0, #3, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz20, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz21, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx8sf 1 z20\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz23.d, z20.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz24.d, z21.d\n} } } */
+/* { dg-final { scan-assembler { test vnx8sf 2 z20, z20, z23\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz20, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz21, \[x0, #3, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz28, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz29, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx4df 1 z28\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz30.d, z28.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz31.d, z29.d\n} } } */
+/* { dg-final { scan-assembler { test vnx4df 2 z28, z28, z30\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz28, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz29, \[x0, #3, mul vl\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_5.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_5.c
new file mode 100644
index 00000000000..ee04c3e0f23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_5.c
@@ -0,0 +1,111 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 -mlittle-endian --save-temps" } */
+
+typedef char vnx16qi __attribute__((vector_size(32)));
+typedef struct { vnx16qi a[3]; } vnx48qi;
+
+typedef short vnx8hi __attribute__((vector_size(32)));
+typedef struct { vnx8hi a[3]; } vnx24hi;
+
+typedef int vnx4si __attribute__((vector_size(32)));
+typedef struct { vnx4si a[3]; } vnx12si;
+
+typedef long vnx2di __attribute__((vector_size(32)));
+typedef struct { vnx2di a[3]; } vnx6di;
+
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef struct { vnx4sf a[3]; } vnx12sf;
+
+typedef double vnx2df __attribute__((vector_size(32)));
+typedef struct { vnx2df a[3]; } vnx6df;
+
+#define TEST_TYPE(TYPE, REG1, REG2) \
+ void \
+ f_##TYPE (TYPE *a) \
+ { \
+ register TYPE x asm (#REG1) = a[0]; \
+ asm volatile ("# test " #TYPE " 1 %S0" :: "w" (x)); \
+ register TYPE y asm (#REG2) = x; \
+ asm volatile ("# test " #TYPE " 2 %S0, %S1, %S2" \
+ : "=&w" (x) : "0" (x), "w" (y)); \
+ a[1] = x; \
+ }
+
+TEST_TYPE (vnx48qi, z0, z3)
+TEST_TYPE (vnx24hi, z6, z2)
+TEST_TYPE (vnx12si, z12, z15)
+TEST_TYPE (vnx6di, z16, z13)
+TEST_TYPE (vnx12sf, z20, z23)
+TEST_TYPE (vnx6df, z26, z29)
+
+/* { dg-final { scan-assembler {\tldr\tz0, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz1, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz2, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx48qi 1 z0\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz3.d, z0.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz4.d, z1.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz5.d, z2.d\n} } } */
+/* { dg-final { scan-assembler { test vnx48qi 2 z0, z0, z3\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz0, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz1, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz2, \[x0, #5, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz6, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz7, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz8, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx24hi 1 z6\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz2.d, z6.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz3.d, z7.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz4.d, z8.d\n} } } */
+/* { dg-final { scan-assembler { test vnx24hi 2 z6, z6, z2\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz6, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz7, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz8, \[x0, #5, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz12, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz13, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz14, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx12si 1 z12\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz15.d, z12.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz16.d, z13.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz17.d, z14.d\n} } } */
+/* { dg-final { scan-assembler { test vnx12si 2 z12, z12, z15\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz12, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz13, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz14, \[x0, #5, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz16, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz17, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz18, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx6di 1 z16\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz13.d, z16.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz14.d, z17.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz15.d, z18.d\n} } } */
+/* { dg-final { scan-assembler { test vnx6di 2 z16, z16, z13\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz16, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz17, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz18, \[x0, #5, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz20, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz21, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz22, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx12sf 1 z20\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz23.d, z20.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz24.d, z21.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz25.d, z22.d\n} } } */
+/* { dg-final { scan-assembler { test vnx12sf 2 z20, z20, z23\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz20, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz21, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz22, \[x0, #5, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz26, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz27, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz28, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx6df 1 z26\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz29.d, z26.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz30.d, z27.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz31.d, z28.d\n} } } */
+/* { dg-final { scan-assembler { test vnx6df 2 z26, z26, z29\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz26, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz27, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz28, \[x0, #5, mul vl\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_move_6.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_6.c
new file mode 100644
index 00000000000..8bfd9f6d1af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_move_6.c
@@ -0,0 +1,129 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -march=armv8-a+sve -msve-vector-bits=256 -mlittle-endian --save-temps" } */
+
+typedef char vnx16qi __attribute__((vector_size(32)));
+typedef struct { vnx16qi a[4]; } vnx64qi;
+
+typedef short vnx8hi __attribute__((vector_size(32)));
+typedef struct { vnx8hi a[4]; } vnx32hi;
+
+typedef int vnx4si __attribute__((vector_size(32)));
+typedef struct { vnx4si a[4]; } vnx16si;
+
+typedef long vnx2di __attribute__((vector_size(32)));
+typedef struct { vnx2di a[4]; } vnx8di;
+
+typedef float vnx4sf __attribute__((vector_size(32)));
+typedef struct { vnx4sf a[4]; } vnx16sf;
+
+typedef double vnx2df __attribute__((vector_size(32)));
+typedef struct { vnx2df a[4]; } vnx8df;
+
+#define TEST_TYPE(TYPE, REG1, REG2) \
+ void \
+ f_##TYPE (TYPE *a) \
+ { \
+ register TYPE x asm (#REG1) = a[0]; \
+ asm volatile ("# test " #TYPE " 1 %S0" :: "w" (x)); \
+ register TYPE y asm (#REG2) = x; \
+ asm volatile ("# test " #TYPE " 2 %S0, %S1, %S2" \
+ : "=&w" (x) : "0" (x), "w" (y)); \
+ a[1] = x; \
+ }
+
+TEST_TYPE (vnx64qi, z0, z4)
+TEST_TYPE (vnx32hi, z6, z2)
+TEST_TYPE (vnx16si, z12, z16)
+TEST_TYPE (vnx8di, z17, z13)
+TEST_TYPE (vnx16sf, z20, z16)
+TEST_TYPE (vnx8df, z24, z28)
+
+/* { dg-final { scan-assembler {\tldr\tz0, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz1, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz2, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz3, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx64qi 1 z0\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz4.d, z0.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz5.d, z1.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz6.d, z2.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz7.d, z3.d\n} } } */
+/* { dg-final { scan-assembler { test vnx64qi 2 z0, z0, z4\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz0, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz1, \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz2, \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz3, \[x0, #7, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz6, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz7, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz8, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz9, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx32hi 1 z6\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz2.d, z6.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz3.d, z7.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz4.d, z8.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz5.d, z9.d\n} } } */
+/* { dg-final { scan-assembler { test vnx32hi 2 z6, z6, z2\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz6, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz7, \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz8, \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz9, \[x0, #7, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz12, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz13, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz14, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz15, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx16si 1 z12\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz16.d, z12.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz17.d, z13.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz18.d, z14.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz19.d, z15.d\n} } } */
+/* { dg-final { scan-assembler { test vnx16si 2 z12, z12, z16\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz12, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz13, \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz14, \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz15, \[x0, #7, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz17, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz18, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz19, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz20, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx8di 1 z17\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz13.d, z17.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz14.d, z18.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz15.d, z19.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz16.d, z20.d\n} } } */
+/* { dg-final { scan-assembler { test vnx8di 2 z17, z17, z13\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz17, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz18, \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz19, \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz20, \[x0, #7, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz20, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz21, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz22, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz23, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx16sf 1 z20\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz16.d, z20.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz17.d, z21.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz18.d, z22.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz19.d, z23.d\n} } } */
+/* { dg-final { scan-assembler { test vnx16sf 2 z20, z20, z16\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz20, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz21, \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz22, \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz23, \[x0, #7, mul vl\]\n} } } */
+
+/* { dg-final { scan-assembler {\tldr\tz24, \[x0\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz25, \[x0, #1, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz26, \[x0, #2, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tldr\tz27, \[x0, #3, mul vl\]\n} } } */
+/* { dg-final { scan-assembler { test vnx8df 1 z24\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz28.d, z24.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz29.d, z25.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz30.d, z26.d\n} } } */
+/* { dg-final { scan-assembler {\tmov\tz31.d, z27.d\n} } } */
+/* { dg-final { scan-assembler { test vnx8df 2 z24, z24, z28\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz24, \[x0, #4, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz25, \[x0, #5, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz26, \[x0, #6, mul vl\]\n} } } */
+/* { dg-final { scan-assembler {\tstr\tz27, \[x0, #7, mul vl\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1.c
index 6d7b5fecbce..3405bd76eb1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#ifndef TYPE
#define TYPE unsigned char
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10.c
index 7ae718ada2c..dff9e963e06 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned long
#define ITYPE long
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10_run.c
index 5ab3ff68bda..611cbbda078 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_10_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned long
#define ITYPE long
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11.c
index 6771938131b..80e69463e18 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11.c
@@ -1,13 +1,13 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE float
-#define ITYPE int
+#define TYPE _Float16
+#define ITYPE short
#include "sve_struct_vect_7.c"
-/* { dg-final { scan-assembler {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tld4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tst2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tst3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tst4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11_run.c
index f9c129801fc..bfab53d9b6b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_11_run.c
@@ -1,6 +1,6 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE float
-#define ITYPE int
+#define TYPE _Float16
+#define ITYPE short
#include "sve_struct_vect_7_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12.c
index 37c11b3b29a..47279e0a80e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12.c
@@ -1,13 +1,13 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE double
-#define ITYPE long
+#define TYPE float
+#define ITYPE int
#include "sve_struct_vect_7.c"
-/* { dg-final { scan-assembler {\tld2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tld3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tld4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tst2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tst3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} } } */
-/* { dg-final { scan-assembler {\tst4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12_run.c
index c7ed3fe2806..74007a938b7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_12_run.c
@@ -1,6 +1,6 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
-#define TYPE double
-#define ITYPE long
+#define TYPE float
+#define ITYPE int
#include "sve_struct_vect_7_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13.c
index 3e3b9d733e4..5ebf5d8ee38 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13.c
@@ -1,66 +1,13 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256" } */
-
-#define TYPE unsigned char
-#define NAME(X) qi_##X
-#include "sve_struct_vect_1.c"
-#undef NAME
-#undef TYPE
-
-#define TYPE unsigned short
-#define NAME(X) hi_##X
-#include "sve_struct_vect_1.c"
-#undef NAME
-#undef TYPE
-
-#define TYPE unsigned int
-#define NAME(X) si_##X
-#include "sve_struct_vect_1.c"
-#undef NAME
-#undef TYPE
-
-#define TYPE unsigned long
-#define NAME(X) di_##X
-#include "sve_struct_vect_1.c"
-#undef NAME
-#undef TYPE
-
-#define TYPE float
-#define NAME(X) sf_##X
-#include "sve_struct_vect_1.c"
-#undef NAME
-#undef TYPE
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE double
-#define NAME(X) df_##X
-#include "sve_struct_vect_1.c"
-#undef NAME
-#undef TYPE
-
-/* { dg-final { scan-assembler-times {\tld2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-
-/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-
-/* { dg-final { scan-assembler-times {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tld4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tst2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tst3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tst4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-
-/* { dg-final { scan-assembler-times {\tld2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tld3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tld4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tst2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tst3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tst4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+#define ITYPE long
+#include "sve_struct_vect_7.c"
+
+/* { dg-final { scan-assembler {\tld2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tld4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} } } */
+/* { dg-final { scan-assembler {\tst4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13_run.c
new file mode 100644
index 00000000000..6fb5329913b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_13_run.c
@@ -0,0 +1,6 @@
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
+
+#define TYPE double
+#define ITYPE long
+#include "sve_struct_vect_7_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_14.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_14.c
index c3e81f500e0..46126e841dc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_14.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_14.c
@@ -1,7 +1,47 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=512" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */
-#include "sve_struct_vect_13.c"
+#define TYPE unsigned char
+#define NAME(X) qi_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
+
+#define TYPE unsigned short
+#define NAME(X) hi_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
+
+#define TYPE unsigned int
+#define NAME(X) si_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
+
+#define TYPE unsigned long
+#define NAME(X) di_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
+
+#define TYPE _Float16
+#define NAME(X) hf_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
+
+#define TYPE float
+#define NAME(X) sf_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
+
+#define TYPE double
+#define NAME(X) df_##X
+#include "sve_struct_vect_1.c"
+#undef NAME
+#undef TYPE
/* { dg-final { scan-assembler-times {\tld2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tld3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
@@ -10,12 +50,12 @@
/* { dg-final { scan-assembler-times {\tst3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tst4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_15.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_15.c
index 635910e11a0..c1ccf7f09bb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_15.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_15.c
@@ -1,7 +1,7 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=1024" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=512 --save-temps" } */
-#include "sve_struct_vect_13.c"
+#include "sve_struct_vect_14.c"
/* { dg-final { scan-assembler-times {\tld2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tld3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
@@ -10,12 +10,12 @@
/* { dg-final { scan-assembler-times {\tst3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tst4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_16.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_16.c
index 9afc0708fb1..61985f98974 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_16.c
@@ -1,7 +1,7 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=2048" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=1024 --save-temps" } */
-#include "sve_struct_vect_13.c"
+#include "sve_struct_vect_14.c"
/* { dg-final { scan-assembler-times {\tld2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tld3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
@@ -10,12 +10,12 @@
/* { dg-final { scan-assembler-times {\tst3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {\tst4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17.c
index 80c99961791..6dd2878c552 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17.c
@@ -1,47 +1,32 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=2048 --save-temps" } */
-#define N 2000
+#include "sve_struct_vect_14.c"
-#define TEST_LOOP(NAME, TYPE) \
- void __attribute__((weak)) \
- NAME (TYPE *restrict dest, TYPE *restrict src) \
- { \
- for (int i = 0; i < N; ++i) \
- dest[i] += src[i * 2]; \
- }
+/* { dg-final { scan-assembler-times {\tld2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7]/z, \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tst2b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tst3b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tst4b\t{z[0-9]+.b - z[0-9]+.b}, p[0-7], \[x[0-9]+\]\n} 1 } } */
-#define TEST(NAME) \
- TEST_LOOP (NAME##_i8, signed char) \
- TEST_LOOP (NAME##_i16, unsigned short) \
- TEST_LOOP (NAME##_f32, float) \
- TEST_LOOP (NAME##_f64, double)
+/* { dg-final { scan-assembler-times {\tld2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst2h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst3h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst4h\t{z[0-9]+.h - z[0-9]+.h}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-TEST (test)
+/* { dg-final { scan-assembler-times {\tld2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst2w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst3w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst4w\t{z[0-9]+.s - z[0-9]+.s}, p[0-7], \[x[0-9]+\]\n} 2 } } */
-/* Check the vectorized loop. */
-/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
-
-/* Check the scalar tail. */
-/* { dg-final { scan-assembler-times {\tldrb\tw} 2 } } */
-/* { dg-final { scan-assembler-times {\tstrb\tw} 1 } } */
-/* { dg-final { scan-assembler-times {\tldrh\tw} 2 } } */
-/* { dg-final { scan-assembler-times {\tstrh\tw} 1 } } */
-/* { dg-final { scan-assembler-times {\tldr\ts} 2 } } */
-/* { dg-final { scan-assembler-times {\tstr\ts} 1 } } */
-/* { dg-final { scan-assembler-times {\tldr\td} 2 } } */
-/* { dg-final { scan-assembler-times {\tstr\td} 1 } } */
-
-/* The only branches should be in the vectorized loop. */
-/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 4 } } */
+/* { dg-final { scan-assembler-times {\tld2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tld4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7]/z, \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst2d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst3d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tst4d\t{z[0-9]+.d - z[0-9]+.d}, p[0-7], \[x[0-9]+\]\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17_run.c
deleted file mode 100644
index 970c6de6f08..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_17_run.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
-
-#include "sve_struct_vect_17.c"
-
-volatile int x;
-
-#undef TEST_LOOP
-#define TEST_LOOP(NAME, TYPE) \
- { \
- TYPE out[N]; \
- TYPE in[N * 2]; \
- for (int i = 0; i < N; ++i) \
- out[i] = i * 7 / 2; \
- for (int i = 0; i < N * 2; ++i) \
- in[i] = i * 9 / 2; \
- NAME (out, in); \
- for (int i = 0; i < N; ++i) \
- { \
- TYPE expected = i * 7 / 2 + in[i * 2]; \
- if (out[i] != expected) \
- __builtin_abort (); \
- x += 1; \
- } \
- }
-
-int
-main (void)
-{
- TEST (test);
- return 0;
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18.c
index 90e0b53c7df..fd0ce83ffac 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18.c
@@ -4,11 +4,11 @@
#define N 2000
#define TEST_LOOP(NAME, TYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME (TYPE *restrict dest, TYPE *restrict src) \
{ \
for (int i = 0; i < N; ++i) \
- dest[i] += src[i * 4]; \
+ dest[i] += src[i * 3]; \
}
#define TEST(NAME) \
@@ -21,16 +21,16 @@ TEST (test)
/* Check the vectorized loop. */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3d\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
/* Check the scalar tail. */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18_run.c
index f7db5aea413..6467fa23b83 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_18_run.c
@@ -3,28 +3,32 @@
#include "sve_struct_vect_18.c"
-volatile int x;
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, TYPE) \
{ \
TYPE out[N]; \
- TYPE in[N * 4]; \
+ TYPE in[N * 3]; \
for (int i = 0; i < N; ++i) \
- out[i] = i * 7 / 2; \
- for (int i = 0; i < N * 4; ++i) \
- in[i] = i * 9 / 2; \
+ { \
+ out[i] = i * 7 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ for (int i = 0; i < N * 3; ++i) \
+ { \
+ in[i] = i * 9 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
NAME (out, in); \
for (int i = 0; i < N; ++i) \
{ \
- TYPE expected = i * 7 / 2 + in[i * 4]; \
+ TYPE expected = i * 7 / 2 + in[i * 3]; \
if (out[i] != expected) \
__builtin_abort (); \
- x += 1; \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19.c
index 3430459a2f3..2a099d05d65 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19.c
@@ -2,11 +2,11 @@
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#define TEST_LOOP(NAME, TYPE) \
- void __attribute__((weak)) \
+ void __attribute__ ((noinline, noclone)) \
NAME (TYPE *restrict dest, TYPE *restrict src, int n) \
{ \
for (int i = 0; i < n; ++i) \
- dest[i] += src[i * 2]; \
+ dest[i] += src[i * 3]; \
}
#define TEST(NAME) \
@@ -19,16 +19,16 @@ TEST (test)
/* Check the vectorized loop. */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld2d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld3d\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
/* Check the scalar tail. */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19_run.c
index 94593cef684..f9bf095d3a5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_19_run.c
@@ -3,37 +3,41 @@
#include "sve_struct_vect_19.c"
-volatile int x;
-
#define N 1000
#undef TEST_LOOP
-#define TEST_LOOP(NAME, TYPE) \
- { \
- TYPE out[N]; \
- TYPE in[N * 2]; \
- int counts[] = { 0, 1, N - 1 }; \
- for (int j = 0; j < 3; ++j) \
- { \
- int count = counts[j]; \
- for (int i = 0; i < N; ++i) \
- out[i] = i * 7 / 2; \
- for (int i = 0; i < N * 2; ++i) \
- in[i] = i * 9 / 2; \
- NAME (out, in, count); \
- for (int i = 0; i < N; ++i) \
- { \
- TYPE expected = i * 7 / 2; \
- if (i < count) \
- expected += in[i * 2]; \
- if (out[i] != expected) \
- __builtin_abort (); \
- x += 1; \
- } \
- } \
+#define TEST_LOOP(NAME, TYPE) \
+ { \
+ TYPE out[N]; \
+ TYPE in[N * 3]; \
+ int counts[] = { 0, 1, N - 1 }; \
+ for (int j = 0; j < 3; ++j) \
+ { \
+ int count = counts[j]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ out[i] = i * 7 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ for (int i = 0; i < N * 3; ++i) \
+ { \
+ in[i] = i * 9 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ NAME (out, in, count); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = i * 7 / 2; \
+ if (i < count) \
+ expected += in[i * 3]; \
+ if (out[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ } \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1_run.c
index 1f99c676586..a94142f2c9e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_1_run.c
@@ -1,10 +1,8 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#include "sve_struct_vect_1.c"
-extern void abort() __attribute__((noreturn));
-
TYPE a[N], b[N], c[N], d[N], e[N * 4];
void __attribute__ ((noinline, noclone))
@@ -19,10 +17,10 @@ check_array (TYPE *array, int n, TYPE base, TYPE step)
{
for (int i = 0; i < n; ++i)
if (array[i] != (TYPE) (base + step * i))
- abort ();
+ __builtin_abort ();
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
init_array (e, 2 * N, 11, 5);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2.c
index 8e5a96361f6..0d51808552e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned short
#include "sve_struct_vect_1.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20.c
index aad0e104379..3a2907f4ad9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20.c
@@ -1,12 +1,14 @@
/* { dg-do compile } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+#define N 2000
+
#define TEST_LOOP(NAME, TYPE) \
- void __attribute__((weak)) \
- NAME (TYPE *restrict dest, TYPE *restrict src, int n) \
+ void __attribute__ ((noinline, noclone)) \
+ NAME (TYPE *restrict dest, TYPE *restrict src) \
{ \
- for (int i = 0; i < n; ++i) \
- dest[i] += src[i * 4]; \
+ for (int i = 0; i < N; ++i) \
+ dest[i] += src[i * 2]; \
}
#define TEST(NAME) \
@@ -19,16 +21,16 @@ TEST (test)
/* Check the vectorized loop. */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld4d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2d\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
/* Check the scalar tail. */
@@ -41,7 +43,5 @@ TEST (test)
/* { dg-final { scan-assembler-times {\tldr\td} 2 } } */
/* { dg-final { scan-assembler-times {\tstr\td} 1 } } */
-/* Each function should have three branches: one directly to the exit
- (n <= 0), one to the single scalar epilogue iteration (n == 1),
- and one branch-back for the vectorized loop. */
-/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 12 } } */
+/* The only branches should be in the vectorized loop. */
+/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20_run.c
index 3be63364455..de563c98c1f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_20_run.c
@@ -3,37 +3,32 @@
#include "sve_struct_vect_20.c"
-volatile int x;
-
-#define N 1000
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, TYPE) \
{ \
TYPE out[N]; \
- TYPE in[N * 4]; \
- int counts[] = { 0, 1, N - 1 }; \
- for (int j = 0; j < 3; ++j) \
+ TYPE in[N * 2]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ out[i] = i * 7 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ for (int i = 0; i < N * 2; ++i) \
+ { \
+ in[i] = i * 9 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ NAME (out, in); \
+ for (int i = 0; i < N; ++i) \
{ \
- int count = counts[j]; \
- for (int i = 0; i < N; ++i) \
- out[i] = i * 7 / 2; \
- for (int i = 0; i < N * 4; ++i) \
- in[i] = i * 9 / 2; \
- NAME (out, in, count); \
- for (int i = 0; i < N; ++i) \
- { \
- TYPE expected = i * 7 / 2; \
- if (i < count) \
- expected += in[i * 4]; \
- if (out[i] != expected) \
- __builtin_abort (); \
- x += 1; \
- } \
+ TYPE expected = i * 7 / 2 + in[i * 2]; \
+ if (out[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21.c
index ac3a7dd2383..bb29747b0c1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21.c
@@ -1,14 +1,12 @@
/* { dg-do compile } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
-#define N 2000
-
#define TEST_LOOP(NAME, TYPE) \
- void __attribute__((weak)) \
- NAME (TYPE *restrict dest, TYPE *restrict src) \
+ void __attribute__ ((noinline, noclone)) \
+ NAME (TYPE *restrict dest, TYPE *restrict src, int n) \
{ \
- for (int i = 0; i < N; ++i) \
- dest[i] += src[i * 3]; \
+ for (int i = 0; i < n; ++i) \
+ dest[i] += src[i * 2]; \
}
#define TEST(NAME) \
@@ -21,16 +19,16 @@ TEST (test)
/* Check the vectorized loop. */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld2d\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
/* Check the scalar tail. */
@@ -43,5 +41,7 @@ TEST (test)
/* { dg-final { scan-assembler-times {\tldr\td} 2 } } */
/* { dg-final { scan-assembler-times {\tstr\td} 1 } } */
-/* The only branches should be in the vectorized loop. */
-/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 4 } } */
+/* Each function should have three branches: one directly to the exit
+ (n <= 0), one to the single scalar epilogue iteration (n == 1),
+ and one branch-back for the vectorized loop. */
+/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 12 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21_run.c
index 94d72d1835a..6f9a4e3dc32 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_21_run.c
@@ -3,28 +3,41 @@
#include "sve_struct_vect_21.c"
-volatile int x;
+#define N 1000
#undef TEST_LOOP
-#define TEST_LOOP(NAME, TYPE) \
- { \
- TYPE out[N]; \
- TYPE in[N * 3]; \
- for (int i = 0; i < N; ++i) \
- out[i] = i * 7 / 2; \
- for (int i = 0; i < N * 3; ++i) \
- in[i] = i * 9 / 2; \
- NAME (out, in); \
- for (int i = 0; i < N; ++i) \
- { \
- TYPE expected = i * 7 / 2 + in[i * 3]; \
- if (out[i] != expected) \
- __builtin_abort (); \
- x += 1; \
- } \
+#define TEST_LOOP(NAME, TYPE) \
+ { \
+ TYPE out[N]; \
+ TYPE in[N * 2]; \
+ int counts[] = { 0, 1, N - 1 }; \
+ for (int j = 0; j < 3; ++j) \
+ { \
+ int count = counts[j]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ out[i] = i * 7 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ for (int i = 0; i < N * 2; ++i) \
+ { \
+ in[i] = i * 9 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ NAME (out, in, count); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = i * 7 / 2; \
+ if (i < count) \
+ expected += in[i * 2]; \
+ if (out[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ } \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22.c
index c17766c7d23..8ee25a0e279 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22.c
@@ -1,12 +1,14 @@
/* { dg-do compile } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+#define N 2000
+
#define TEST_LOOP(NAME, TYPE) \
- void __attribute__((weak)) \
- NAME (TYPE *restrict dest, TYPE *restrict src, int n) \
+ void __attribute__ ((noinline, noclone)) \
+ NAME (TYPE *restrict dest, TYPE *restrict src) \
{ \
- for (int i = 0; i < n; ++i) \
- dest[i] += src[i * 3]; \
+ for (int i = 0; i < N; ++i) \
+ dest[i] += src[i * 4]; \
}
#define TEST(NAME) \
@@ -19,16 +21,16 @@ TEST (test)
/* Check the vectorized loop. */
/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
-/* { dg-final { scan-assembler-times {\tld3d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4d\t} 1 } } */
/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
/* Check the scalar tail. */
@@ -41,7 +43,5 @@ TEST (test)
/* { dg-final { scan-assembler-times {\tldr\td} 2 } } */
/* { dg-final { scan-assembler-times {\tstr\td} 1 } } */
-/* Each function should have three branches: one directly to the exit
- (n <= 0), one to the single scalar epilogue iteration (n == 1),
- and one branch-back for the vectorized loop. */
-/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 12 } } */
+/* The only branches should be in the vectorized loop. */
+/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22_run.c
index 550364b16d1..1c3699292c0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_22_run.c
@@ -3,37 +3,32 @@
#include "sve_struct_vect_22.c"
-volatile int x;
-
-#define N 1000
-
#undef TEST_LOOP
#define TEST_LOOP(NAME, TYPE) \
{ \
TYPE out[N]; \
- TYPE in[N * 3]; \
- int counts[] = { 0, 1, N - 1 }; \
- for (int j = 0; j < 3; ++j) \
+ TYPE in[N * 4]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ out[i] = i * 7 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ for (int i = 0; i < N * 4; ++i) \
+ { \
+ in[i] = i * 9 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ NAME (out, in); \
+ for (int i = 0; i < N; ++i) \
{ \
- int count = counts[j]; \
- for (int i = 0; i < N; ++i) \
- out[i] = i * 7 / 2; \
- for (int i = 0; i < N * 3; ++i) \
- in[i] = i * 9 / 2; \
- NAME (out, in, count); \
- for (int i = 0; i < N; ++i) \
- { \
- TYPE expected = i * 7 / 2; \
- if (i < count) \
- expected += in[i * 3]; \
- if (out[i] != expected) \
- __builtin_abort (); \
- x += 1; \
- } \
+ TYPE expected = i * 7 / 2 + in[i * 4]; \
+ if (out[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST (test);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23.c
new file mode 100644
index 00000000000..7542e531624
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23.c
@@ -0,0 +1,47 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+
+#define TEST_LOOP(NAME, TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ NAME (TYPE *restrict dest, TYPE *restrict src, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ dest[i] += src[i * 4]; \
+ }
+
+#define TEST(NAME) \
+ TEST_LOOP (NAME##_i8, signed char) \
+ TEST_LOOP (NAME##_i16, unsigned short) \
+ TEST_LOOP (NAME##_f32, float) \
+ TEST_LOOP (NAME##_f64, double)
+
+TEST (test)
+
+/* Check the vectorized loop. */
+/* { dg-final { scan-assembler-times {\tld1b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1b\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld1h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1h\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld1w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1w\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld1d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tld4d\t} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1d\t} 1 } } */
+
+/* Check the scalar tail. */
+/* { dg-final { scan-assembler-times {\tldrb\tw} 2 } } */
+/* { dg-final { scan-assembler-times {\tstrb\tw} 1 } } */
+/* { dg-final { scan-assembler-times {\tldrh\tw} 2 } } */
+/* { dg-final { scan-assembler-times {\tstrh\tw} 1 } } */
+/* { dg-final { scan-assembler-times {\tldr\ts} 2 } } */
+/* { dg-final { scan-assembler-times {\tstr\ts} 1 } } */
+/* { dg-final { scan-assembler-times {\tldr\td} 2 } } */
+/* { dg-final { scan-assembler-times {\tstr\td} 1 } } */
+
+/* Each function should have three branches: one directly to the exit
+ (n <= 0), one to the single scalar epilogue iteration (n == 1),
+ and one branch-back for the vectorized loop. */
+/* { dg-final { scan-assembler-times {\tb[a-z]+\t} 12 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23_run.c
new file mode 100644
index 00000000000..83f13dd46cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_23_run.c
@@ -0,0 +1,45 @@
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+
+#include "sve_struct_vect_23.c"
+
+#define N 1000
+
+#undef TEST_LOOP
+#define TEST_LOOP(NAME, TYPE) \
+ { \
+ TYPE out[N]; \
+ TYPE in[N * 4]; \
+ int counts[] = { 0, 1, N - 1 }; \
+ for (int j = 0; j < 3; ++j) \
+ { \
+ int count = counts[j]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ out[i] = i * 7 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ for (int i = 0; i < N * 4; ++i) \
+ { \
+ in[i] = i * 9 / 2; \
+ asm volatile ("" ::: "memory"); \
+ } \
+ NAME (out, in, count); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = i * 7 / 2; \
+ if (i < count) \
+ expected += in[i * 4]; \
+ if (out[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ } \
+ }
+
+int __attribute__ ((optimize (1)))
+main (void)
+{
+ TEST (test);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2_run.c
index 6229b78b72e..0da23e144af 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_2_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned short
#include "sve_struct_vect_1_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3.c
index 3a29ae16701..b1e37e536e5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned int
#include "sve_struct_vect_1.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3_run.c
index 7703dc6c043..74a5bd3233b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_3_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned int
#include "sve_struct_vect_1_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4.c
index 0c526365829..af20d763bdd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned long
#include "sve_struct_vect_1.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4_run.c
index 4ea2cff9dd0..a8aedd188c8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_4_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned long
#include "sve_struct_vect_1_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5.c
index efc1c9d2e2c..4b1f8cd341a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE float
#include "sve_struct_vect_1.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5_run.c
index f0d56e87dcc..22ba35ff702 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_5_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE float
#include "sve_struct_vect_1_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6.c
index ff445c1fbb0..981c9d31950 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE double
#include "sve_struct_vect_1.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6_run.c
index b0b685c0789..dbcbae8259f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_6_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE double
#include "sve_struct_vect_1_run.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7.c
index 9712f89d171..8067d5ed169 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#ifndef TYPE
#define TYPE unsigned char
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7_run.c
index 5cfb7559a5c..8cc1993e997 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_7_run.c
@@ -1,12 +1,10 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#include "sve_struct_vect_7.c"
#define N 93
-extern void abort() __attribute__((noreturn));
-
TYPE a[N], b[N], c[N], d[N], e[N * 4];
void __attribute__ ((noinline, noclone))
@@ -21,10 +19,10 @@ check_array (TYPE *array, int n, TYPE base, TYPE step)
{
for (int i = 0; i < n; ++i)
if (array[i] != (TYPE) (base + step * i))
- abort ();
+ __builtin_abort ();
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
init_array (e, 2 * N, 11, 5);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8.c
index 57cb93de5d9..e807179a6a5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned short
#define ITYPE short
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8_run.c
index 59005a2f05b..954043fa874 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_8_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned short
#define ITYPE short
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9.c
index d897d556d05..a167a7b2caf 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9.c
@@ -1,5 +1,5 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-do assemble } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned int
#define ITYPE int
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9_run.c b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9_run.c
index ab694b4a971..4b94d383fec 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_struct_vect_9_run.c
@@ -1,5 +1,5 @@
/* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve --save-temps" } */
#define TYPE unsigned int
#define ITYPE int
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_trn1_1.c b/gcc/testsuite/gcc.target/aarch64/sve_trn1_1.c
index 0c7b887d232..754b188a206 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_trn1_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_trn1_1.c
@@ -7,13 +7,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define MASK_2(X, Y) X, Y + X
#define MASK_4(X, Y) MASK_2 (X, Y), MASK_2 (X + 2, Y)
@@ -21,10 +21,10 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
#define MASK_16(X, Y) MASK_8 (X, Y), MASK_8 (X + 8, Y)
#define MASK_32(X, Y) MASK_16 (X, Y), MASK_16 (X + 16, Y)
-#define INDEX_4 v4di
-#define INDEX_8 v8si
-#define INDEX_16 v16hi
-#define INDEX_32 v32qi
+#define INDEX_4 vnx2di
+#define INDEX_8 vnx4si
+#define INDEX_16 vnx8hi
+#define INDEX_32 vnx16qi
#define PERMUTE(TYPE, NUNITS) \
TYPE permute_##TYPE (TYPE values1, TYPE values2) \
@@ -35,13 +35,13 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v4di, 4) \
- T (v8si, 8) \
- T (v16hi, 16) \
- T (v32qi, 32) \
- T (v4df, 4) \
- T (v8sf, 8) \
- T (v16hf, 16)
+ T (vnx2di, 4) \
+ T (vnx4si, 8) \
+ T (vnx8hi, 16) \
+ T (vnx16qi, 32) \
+ T (vnx2df, 4) \
+ T (vnx4sf, 8) \
+ T (vnx8hf, 16)
TEST_ALL (PERMUTE)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1.c b/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1.c
index 4d345cf81e9..303276a64cf 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1.c
@@ -3,12 +3,12 @@
#include <stdint.h>
-#define UNPACK(TYPED, TYPES) \
-void __attribute__ ((noinline, noclone)) \
-unpack_##TYPED##_##TYPES (TYPED *d, TYPES *s, int size) \
-{ \
- for (int i = 0; i < size; i++) \
- d[i] = s[i] + 1; \
+#define UNPACK(TYPED, TYPES) \
+void __attribute__ ((noinline, noclone)) \
+unpack_##TYPED##_##TYPES (TYPED *d, TYPES *s, TYPES mask, int size) \
+{ \
+ for (int i = 0; i < size; i++) \
+ d[i] = (TYPES) (s[i] | mask); \
}
#define TEST_ALL(T) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1_run.c
index d183408d124..da29eda1434 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_unpack_signed_1_run.c
@@ -14,9 +14,9 @@
arrays[i] = (i - 10) * 3; \
asm volatile ("" ::: "memory"); \
} \
- unpack_##TYPED##_##TYPES (arrayd, arrays, ARRAY_SIZE); \
+ unpack_##TYPED##_##TYPES (arrayd, arrays, 7, ARRAY_SIZE); \
for (int i = 0; i < ARRAY_SIZE; i++) \
- if (arrayd[i] != (TYPED) ((TYPES) ((i - 10) * 3) + 1)) \
+ if (arrayd[i] != (TYPED) (TYPES) (((i - 10) * 3) | 7)) \
__builtin_abort (); \
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1.c b/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1.c
index fa8de963264..8c927873340 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1.c
@@ -8,7 +8,7 @@ void __attribute__ ((noinline, noclone)) \
unpack_##TYPED##_##TYPES (TYPED *d, TYPES *s, int size) \
{ \
for (int i = 0; i < size; i++) \
- d[i] = s[i] + 1; \
+ d[i] = (TYPES) (s[i] + 1); \
}
#define TEST_ALL(T) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1_run.c
index 3fa66220f17..d2df061e88d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_unpack_unsigned_1_run.c
@@ -16,7 +16,7 @@
} \
unpack_##TYPED##_##TYPES (arrayd, arrays, ARRAY_SIZE); \
for (int i = 0; i < ARRAY_SIZE; i++) \
- if (arrayd[i] != (TYPED) ((TYPES) ((i - 10) * 3) + 1)) \
+ if (arrayd[i] != (TYPED) (TYPES) (((i - 10) * 3) + 1)) \
__builtin_abort (); \
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1.c b/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1.c
index aaa4fdccbf0..36048f03f99 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define UZP1(TYPE, MASK) \
TYPE uzp1_##TYPE (TYPE values1, TYPE values2) \
@@ -18,18 +18,18 @@ TYPE uzp1_##TYPE (TYPE values1, TYPE values2) \
}
-UZP1 (v4di, ((v4di) { 0, 2, 4, 6 }));
-UZP1 (v8si, ((v8si) { 0, 2, 4, 6, 8, 10, 12, 14 }));
-UZP1 (v16hi, ((v16hi) { 0, 2, 4, 6, 8, 10, 12, 14,
- 16, 18, 20, 22, 24, 26, 28, 30 }));
-UZP1 (v32qi, ((v32qi) { 0, 2, 4, 6, 8, 10, 12, 14,
- 16, 18, 20, 22, 24, 26, 28, 30,
- 32, 34, 36, 38, 40, 42, 44, 46,
- 48, 50, 52, 54, 56, 58, 60, 62 }));
-UZP1 (v4df, ((v4di) { 0, 2, 4, 6 }));
-UZP1 (v8sf, ((v8si) { 0, 2, 4, 6, 8, 10, 12, 14 }));
-UZP1 (v16hf, ((v16hi) { 0, 2, 4, 6, 8, 10, 12, 14,
- 16, 18, 20, 22, 24, 26, 28, 30 }));
+UZP1 (vnx2di, ((vnx2di) { 0, 2, 4, 6 }));
+UZP1 (vnx4si, ((vnx4si) { 0, 2, 4, 6, 8, 10, 12, 14 }));
+UZP1 (vnx8hi, ((vnx8hi) { 0, 2, 4, 6, 8, 10, 12, 14,
+ 16, 18, 20, 22, 24, 26, 28, 30 }));
+UZP1 (vnx16qi, ((vnx16qi) { 0, 2, 4, 6, 8, 10, 12, 14,
+ 16, 18, 20, 22, 24, 26, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46,
+ 48, 50, 52, 54, 56, 58, 60, 62 }));
+UZP1 (vnx2df, ((vnx2di) { 0, 2, 4, 6 }));
+UZP1 (vnx4sf, ((vnx4si) { 0, 2, 4, 6, 8, 10, 12, 14 }));
+UZP1 (vnx8hf, ((vnx8hi) { 0, 2, 4, 6, 8, 10, 12, 14,
+ 16, 18, 20, 22, 24, 26, 28, 30 }));
/* { dg-final { scan-assembler-not {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} } } */
/* { dg-final { scan-assembler-not {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1_run.c
index d35dad0ffca..622f0d10f5f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_uzp1_1_run.c
@@ -16,48 +16,48 @@
int main (void)
{
- TEST_UZP1 (v4di,
- ((v4di) { 4, 6, 12, 36 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 12, 24, 36, 48 }));
- TEST_UZP1 (v8si,
- ((v8si) { 3, 5, 7, 9, 33, 35, 37, 39 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
- TEST_UZP1 (v16hi,
- ((v16hi) { 3, 5, 7, 9, 11, 13, 15, 17,
- 33, 35, 37, 39, 41, 43, 45, 47 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48 }));
- TEST_UZP1 (v32qi,
- ((v32qi) { 4, 6, 4, 6, 4, 6, 4, 6,
- 4, 6, 4, 6, 4, 6, 4, 6,
- 12, 36, 12, 36, 12, 36, 12, 36,
- 12, 36, 12, 36, 12, 36, 12, 36 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48 }));
- TEST_UZP1 (v4df,
- ((v4df) { 4.0, 6.0, 12.0, 36.0 }),
- ((v4df) { 4.0, 5.0, 6.0, 7.0 }),
- ((v4df) { 12.0, 24.0, 36.0, 48.0 }));
- TEST_UZP1 (v8sf,
- ((v8sf) { 3.0, 5.0, 7.0, 9.0, 33.0, 35.0, 37.0, 39.0 }),
- ((v8sf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0 }),
- ((v8sf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0 }));
- TEST_UZP1 (v16hf,
- ((v16hf) { 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0,
- 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
- 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
- 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
+ TEST_UZP1 (vnx2di,
+ ((vnx2di) { 4, 6, 12, 36 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 12, 24, 36, 48 }));
+ TEST_UZP1 (vnx4si,
+ ((vnx4si) { 3, 5, 7, 9, 33, 35, 37, 39 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
+ TEST_UZP1 (vnx8hi,
+ ((vnx8hi) { 3, 5, 7, 9, 11, 13, 15, 17,
+ 33, 35, 37, 39, 41, 43, 45, 47 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48 }));
+ TEST_UZP1 (vnx16qi,
+ ((vnx16qi) { 4, 6, 4, 6, 4, 6, 4, 6,
+ 4, 6, 4, 6, 4, 6, 4, 6,
+ 12, 36, 12, 36, 12, 36, 12, 36,
+ 12, 36, 12, 36, 12, 36, 12, 36 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48 }));
+ TEST_UZP1 (vnx2df,
+ ((vnx2df) { 4.0, 6.0, 12.0, 36.0 }),
+ ((vnx2df) { 4.0, 5.0, 6.0, 7.0 }),
+ ((vnx2df) { 12.0, 24.0, 36.0, 48.0 }));
+ TEST_UZP1 (vnx4sf,
+ ((vnx4sf) { 3.0, 5.0, 7.0, 9.0, 33.0, 35.0, 37.0, 39.0 }),
+ ((vnx4sf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0 }),
+ ((vnx4sf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0 }));
+ TEST_UZP1 (vnx8hf,
+ ((vnx8hf) { 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0,
+ 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
+ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1.c b/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1.c
index 1bb84d80eb0..a9e4a63fb4d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define UZP2(TYPE, MASK) \
TYPE uzp2_##TYPE (TYPE values1, TYPE values2) \
@@ -17,18 +17,18 @@ TYPE uzp2_##TYPE (TYPE values1, TYPE values2) \
return __builtin_shuffle (values1, values2, MASK); \
}
-UZP2 (v4di, ((v4di) { 1, 3, 5, 7 }));
-UZP2 (v8si, ((v8si) { 1, 3, 5, 7, 9, 11, 13, 15 }));
-UZP2 (v16hi, ((v16hi) { 1, 3, 5, 7, 9, 11, 13, 15,
- 17, 19, 21, 23, 25, 27, 29, 31 }));
-UZP2 (v32qi, ((v32qi) { 1, 3, 5, 7, 9, 11, 13, 15,
- 17, 19, 21, 23, 25, 27, 29, 31,
- 33, 35, 37, 39, 41, 43, 45, 47,
- 49, 51, 53, 55, 57, 59, 61, 63 }));
-UZP2 (v4df, ((v4di) { 1, 3, 5, 7 }));
-UZP2 (v8sf, ((v8si) { 1, 3, 5, 7, 9, 11, 13, 15 }));
-UZP2 (v16hf, ((v16hi) { 1, 3, 5, 7, 9, 11, 13, 15,
- 17, 19, 21, 23, 25, 27, 29, 31 }));
+UZP2 (vnx2di, ((vnx2di) { 1, 3, 5, 7 }));
+UZP2 (vnx4si, ((vnx4si) { 1, 3, 5, 7, 9, 11, 13, 15 }));
+UZP2 (vnx8hi, ((vnx8hi) { 1, 3, 5, 7, 9, 11, 13, 15,
+ 17, 19, 21, 23, 25, 27, 29, 31 }));
+UZP2 (vnx16qi, ((vnx16qi) { 1, 3, 5, 7, 9, 11, 13, 15,
+ 17, 19, 21, 23, 25, 27, 29, 31,
+ 33, 35, 37, 39, 41, 43, 45, 47,
+ 49, 51, 53, 55, 57, 59, 61, 63 }));
+UZP2 (vnx2df, ((vnx2di) { 1, 3, 5, 7 }));
+UZP2 (vnx4sf, ((vnx4si) { 1, 3, 5, 7, 9, 11, 13, 15 }));
+UZP2 (vnx8hf, ((vnx8hi) { 1, 3, 5, 7, 9, 11, 13, 15,
+ 17, 19, 21, 23, 25, 27, 29, 31 }));
/* { dg-final { scan-assembler-not {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} } } */
/* { dg-final { scan-assembler-not {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1_run.c
index d7a241c1258..05d82fe08c1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_uzp2_1_run.c
@@ -16,48 +16,48 @@
int main (void)
{
- TEST_UZP2 (v4di,
- ((v4di) { 5, 7, 24, 48 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 12, 24, 36, 48 }));
- TEST_UZP2 (v8si,
- ((v8si) { 4, 6, 8, 10, 34, 36, 38, 40 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
- TEST_UZP2 (v16hi,
- ((v16hi) { 4, 6, 8, 10, 12, 14, 16, 18,
- 34, 36, 38, 40, 42, 44, 46, 48 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48 }));
- TEST_UZP2 (v32qi,
- ((v32qi) { 5, 7, 5, 7, 5, 7, 5, 7,
- 5, 7, 5, 7, 5, 7, 5, 7,
- 24, 48, 24, 48, 24, 48, 24, 48,
- 24, 48, 24, 48, 24, 48, 24, 48 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48 }));
- TEST_UZP2 (v4df,
- ((v4df) { 5.0, 7.0, 24.0, 48.0 }),
- ((v4df) { 4.0, 5.0, 6.0, 7.0 }),
- ((v4df) { 12.0, 24.0, 36.0, 48.0 }));
- TEST_UZP2 (v8sf,
- ((v8sf) { 4.0, 6.0, 8.0, 10.0, 34.0, 36.0, 38.0, 40.0 }),
- ((v8sf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0 }),
- ((v8sf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0 }));
- TEST_UZP2 (v16hf,
- ((v16hf) { 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0,
- 34.0, 36.0, 38.0, 40.0, 42.0, 44.0, 46.0, 48.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
- 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
- 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
+ TEST_UZP2 (vnx2di,
+ ((vnx2di) { 5, 7, 24, 48 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 12, 24, 36, 48 }));
+ TEST_UZP2 (vnx4si,
+ ((vnx4si) { 4, 6, 8, 10, 34, 36, 38, 40 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
+ TEST_UZP2 (vnx8hi,
+ ((vnx8hi) { 4, 6, 8, 10, 12, 14, 16, 18,
+ 34, 36, 38, 40, 42, 44, 46, 48 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48 }));
+ TEST_UZP2 (vnx16qi,
+ ((vnx16qi) { 5, 7, 5, 7, 5, 7, 5, 7,
+ 5, 7, 5, 7, 5, 7, 5, 7,
+ 24, 48, 24, 48, 24, 48, 24, 48,
+ 24, 48, 24, 48, 24, 48, 24, 48 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48 }));
+ TEST_UZP2 (vnx2df,
+ ((vnx2df) { 5.0, 7.0, 24.0, 48.0 }),
+ ((vnx2df) { 4.0, 5.0, 6.0, 7.0 }),
+ ((vnx2df) { 12.0, 24.0, 36.0, 48.0 }));
+ TEST_UZP2 (vnx4sf,
+ ((vnx4sf) { 4.0, 6.0, 8.0, 10.0, 34.0, 36.0, 38.0, 40.0 }),
+ ((vnx4sf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0 }),
+ ((vnx4sf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0 }));
+ TEST_UZP2 (vnx8hf,
+ ((vnx8hf) { 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0,
+ 34.0, 36.0, 38.0, 40.0, 42.0, 44.0, 46.0, 48.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
+ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_var_stride_2.c b/gcc/testsuite/gcc.target/aarch64/sve_var_stride_2.c
index 958dce4262d..74acc7983b8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_var_stride_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_var_stride_2.c
@@ -16,7 +16,7 @@ f (TYPE *x, TYPE *y, unsigned short n, unsigned short m)
/* { dg-final { scan-assembler {\tldr\tw[0-9]+} } } */
/* { dg-final { scan-assembler {\tstr\tw[0-9]+} } } */
/* Should multiply by (257-1)*4 rather than (VF-1)*4. */
-/* { dg-final { scan-assembler-times {\tubfiz\tx[0-9]+, x[0-9]+, 10, 16} 2 } } */
+/* { dg-final { scan-assembler-times {\tadd\tx[0-9]+, x[0-9]+, x[0-9]+, lsl 10\n} 2 } } */
/* { dg-final { scan-assembler-not {\tcmp\tx[0-9]+, 0} } } */
/* { dg-final { scan-assembler-not {\tcmp\tw[0-9]+, 0} } } */
/* { dg-final { scan-assembler-not {\tcsel\tx[0-9]+} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_var_stride_4.c b/gcc/testsuite/gcc.target/aarch64/sve_var_stride_4.c
index 54d592d8ef1..f915e90b12e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_var_stride_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_var_stride_4.c
@@ -16,7 +16,7 @@ f (TYPE *x, TYPE *y, int n, int m)
/* { dg-final { scan-assembler {\tldr\tw[0-9]+} } } */
/* { dg-final { scan-assembler {\tstr\tw[0-9]+} } } */
/* Should multiply by (257-1)*4 rather than (VF-1)*4. */
-/* { dg-final { scan-assembler-times {\tsbfiz\tx[0-9]+, x[0-9]+, 10, 32} 2 } } */
+/* { dg-final { scan-assembler-times {\tlsl\tx[0-9]+, x[0-9]+, 10\n} 2 } } */
/* { dg-final { scan-assembler {\tcmp\tw2, 0} } } */
/* { dg-final { scan-assembler {\tcmp\tw3, 0} } } */
/* { dg-final { scan-assembler-times {\tcsel\tx[0-9]+} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vcond_1.C b/gcc/testsuite/gcc.target/aarch64/sve_vcond_1.C
index 9be09546c80..d0febc69533 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vcond_1.C
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vcond_1.C
@@ -3,10 +3,10 @@
#include <stdint.h>
-typedef int8_t v32qi __attribute__((vector_size(32)));
-typedef int16_t v16hi __attribute__((vector_size(32)));
-typedef int32_t v8si __attribute__((vector_size(32)));
-typedef int64_t v4di __attribute__((vector_size(32)));
+typedef int8_t vnx16qi __attribute__((vector_size(32)));
+typedef int16_t vnx8hi __attribute__((vector_size(32)));
+typedef int32_t vnx4si __attribute__((vector_size(32)));
+typedef int64_t vnx2di __attribute__((vector_size(32)));
typedef uint8_t v32qu __attribute__((vector_size(32)));
typedef uint16_t v16hu __attribute__((vector_size(32)));
@@ -30,10 +30,10 @@ TYPE vcond_imm_##TYPE##_##SUFFIX (TYPE x, TYPE y, TYPE a) \
}
#define TEST_COND_VAR_SIGNED_ALL(T, COND, SUFFIX) \
- T (v32qi, COND, SUFFIX) \
- T (v16hi, COND, SUFFIX) \
- T (v8si, COND, SUFFIX) \
- T (v4di, COND, SUFFIX)
+ T (vnx16qi, COND, SUFFIX) \
+ T (vnx8hi, COND, SUFFIX) \
+ T (vnx4si, COND, SUFFIX) \
+ T (vnx2di, COND, SUFFIX)
#define TEST_COND_VAR_UNSIGNED_ALL(T, COND, SUFFIX) \
T (v32qu, COND, SUFFIX) \
@@ -54,10 +54,10 @@ TYPE vcond_imm_##TYPE##_##SUFFIX (TYPE x, TYPE y, TYPE a) \
TEST_COND_VAR_ALL (T, !=, ne)
#define TEST_COND_IMM_SIGNED_ALL(T, COND, IMM, SUFFIX) \
- T (v32qi, COND, IMM, SUFFIX) \
- T (v16hi, COND, IMM, SUFFIX) \
- T (v8si, COND, IMM, SUFFIX) \
- T (v4di, COND, IMM, SUFFIX)
+ T (vnx16qi, COND, IMM, SUFFIX) \
+ T (vnx8hi, COND, IMM, SUFFIX) \
+ T (vnx4si, COND, IMM, SUFFIX) \
+ T (vnx2di, COND, IMM, SUFFIX)
#define TEST_COND_IMM_UNSIGNED_ALL(T, COND, IMM, SUFFIX) \
T (v32qu, COND, IMM, SUFFIX) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1.c
index 3b7c3e75775..d94cbb37b6a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1.c
@@ -1,57 +1,41 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -fno-inline -march=armv8-a+sve" } */
+/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve" } */
#include <stdint.h>
#include <stdbool.h>
-#define VEC_BOOL_CMPNE(VARTYPE, INDUCTYPE) \
-void \
-vec_bool_cmpne##VARTYPE##INDUCTYPE (VARTYPE *dst, VARTYPE *src, \
- INDUCTYPE start, INDUCTYPE n, \
- INDUCTYPE mask) \
+#define VEC_BOOL(NAME, OP, VARTYPE, INDUCTYPE) \
+void __attribute__ ((noinline, noclone)) \
+vec_bool_##NAME##_##VARTYPE##_##INDUCTYPE (VARTYPE *dst, VARTYPE *src, \
+ INDUCTYPE start, \
+ INDUCTYPE n, \
+ INDUCTYPE mask) \
{ \
- INDUCTYPE i; \
- for (i = 0; i < n; i++) \
+ for (INDUCTYPE i = 0; i < n; i++) \
{ \
bool lhs = i >= start; \
bool rhs = (i & mask) != 0x3D; \
- if (lhs != rhs) \
+ if (lhs OP rhs) \
dst[i] = src[i]; \
} \
}
-#define VEC_BOOL_CMPEQ(VARTYPE, INDUCTYPE) \
-void \
-vec_bool_cmpeq##VARTYPE##INDUCTYPE (VARTYPE *dst, VARTYPE *src, \
- INDUCTYPE start, INDUCTYPE n, \
- INDUCTYPE mask) \
-{ \
- INDUCTYPE i; \
- for (i = 0; i < n; i++) \
- { \
- bool lhs = i >= start; \
- bool rhs = (i & mask) != 0x3D; \
- if (lhs == rhs) \
- dst[i] = src[i]; \
- } \
-}
+#define TEST_OP(T, NAME, OP) \
+ T (NAME, OP, uint8_t, uint8_t) \
+ T (NAME, OP, uint16_t, uint16_t) \
+ T (NAME, OP, uint32_t, uint32_t) \
+ T (NAME, OP, uint64_t, uint64_t) \
+ T (NAME, OP, float, uint32_t) \
+ T (NAME, OP, double, uint64_t)
-VEC_BOOL_CMPNE (uint8_t, uint8_t)
-VEC_BOOL_CMPNE (uint16_t, uint16_t)
-VEC_BOOL_CMPNE (uint32_t, uint32_t)
-VEC_BOOL_CMPNE (uint64_t, uint64_t)
-VEC_BOOL_CMPNE (float, uint32_t)
-VEC_BOOL_CMPNE (double, uint64_t)
+#define TEST_ALL(T) \
+ TEST_OP (T, cmpeq, ==) \
+ TEST_OP (T, cmpne, !=)
-VEC_BOOL_CMPEQ (uint8_t, uint8_t)
-VEC_BOOL_CMPEQ (uint16_t, uint16_t)
-VEC_BOOL_CMPEQ (uint32_t, uint32_t)
-VEC_BOOL_CMPEQ (uint64_t, uint64_t)
-VEC_BOOL_CMPEQ (float, uint32_t)
-VEC_BOOL_CMPEQ (double, uint64_t)
+TEST_ALL (VEC_BOOL)
-/* Both CMPNE and CMPEQ loops will contain an exclusive predicate or. */
+/* Both cmpne and cmpeq loops will contain an exclusive predicate or. */
/* { dg-final { scan-assembler-times {\teors?\tp[0-9]*\.b, p[0-7]/z, p[0-9]*\.b, p[0-9]*\.b\n} 12 } } */
-/* CMPEQ will also contain a masked predicate not operation, which gets
+/* cmpeq will also contain a masked predicate not operation, which gets
folded to BIC. */
/* { dg-final { scan-assembler-times {\tbic\tp[0-9]+\.b, p[0-7]/z, p[0-9]+\.b, p[0-9]+\.b\n} 6 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1_run.c
index 8c341c0e932..092aa386c60 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_bool_cmp_1_run.c
@@ -3,32 +3,9 @@
#include "sve_vec_bool_cmp_1.c"
-extern void abort (void);
-
#define N 103
-#define TEST_VEC_BOOL_CMPNE(VARTYPE,INDUCTYPE) \
-{ \
- INDUCTYPE i; \
- VARTYPE src[N]; \
- VARTYPE dst[N]; \
- for (i = 0; i < N; i++) \
- { \
- src[i] = i; \
- dst[i] = i * 2; \
- } \
- vec_bool_cmpne##VARTYPE##INDUCTYPE (dst, src, 13, 97, 0xFF); \
- for (i = 0; i < 13; i++) \
- if (dst[i] != i) \
- abort (); \
- for (i = 13; i < N; i++) \
- if (i != 0x3D && dst[i] != (i * 2)) \
- abort (); \
- else if (i == 0x3D && dst[i] != 0x3D) \
- abort (); \
-}
-
-#define TEST_VEC_BOOL_CMPEQ(VARTYPE,INDUCTYPE) \
+#define TEST_VEC_BOOL(NAME, OP, VARTYPE, INDUCTYPE) \
{ \
INDUCTYPE i; \
VARTYPE src[N]; \
@@ -37,36 +14,24 @@ extern void abort (void);
{ \
src[i] = i; \
dst[i] = i * 2; \
+ asm volatile ("" ::: "memory"); \
} \
- vec_bool_cmpeq##VARTYPE##INDUCTYPE (dst, src, 13, 97, 0xFF); \
+ vec_bool_##NAME##_##VARTYPE##_##INDUCTYPE (dst, src, 13, \
+ 97, 0xFF); \
for (i = 0; i < 13; i++) \
- if (dst[i] != (i * 2)) \
- abort (); \
+ if (dst[i] != (VARTYPE) (0 OP 1 ? i : i * 2)) \
+ __builtin_abort (); \
for (i = 13; i < 97; i++) \
- if (i != 0x3D && dst[i] != i) \
- abort (); \
- else if (i == 0x3D && dst[i] != (0x3D) * 2) \
- abort (); \
+ if (dst[i] != (VARTYPE) (1 OP (i != 0x3D) ? i : i * 2)) \
+ __builtin_abort (); \
for (i = 97; i < N; i++) \
if (dst[i] != (i * 2)) \
- abort (); \
+ __builtin_abort (); \
}
-int main ()
+int __attribute__ ((optimize (1)))
+main ()
{
- TEST_VEC_BOOL_CMPNE (uint8_t, uint8_t);
- TEST_VEC_BOOL_CMPNE (uint16_t, uint16_t);
- TEST_VEC_BOOL_CMPNE (uint32_t, uint32_t);
- TEST_VEC_BOOL_CMPNE (uint64_t, uint64_t);
- TEST_VEC_BOOL_CMPNE (float, uint32_t);
- TEST_VEC_BOOL_CMPNE (double, uint64_t);
-
- TEST_VEC_BOOL_CMPEQ (uint8_t, uint8_t);
- TEST_VEC_BOOL_CMPEQ (uint16_t, uint16_t);
- TEST_VEC_BOOL_CMPEQ (uint32_t, uint32_t);
- TEST_VEC_BOOL_CMPEQ (uint64_t, uint64_t);
- TEST_VEC_BOOL_CMPEQ (float, uint32_t);
- TEST_VEC_BOOL_CMPEQ (double, uint64_t);
-
+ TEST_ALL (TEST_VEC_BOOL)
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_init_2.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_init_2.c
index 3d5b584e9e5..95b278e58f5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_init_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_init_2.c
@@ -1,10 +1,10 @@
/* { dg-do compile } */
/* { dg-options "-O2 -ftree-vectorize -march=armv8-a+sve -msve-vector-bits=256" } */
-typedef unsigned int v8si __attribute__ ((vector_size(32)));
+typedef unsigned int vnx4si __attribute__ ((vector_size(32)));
void
-f (v8si *ptr, int x)
+f (vnx4si *ptr, int x)
{
- *ptr += (v8si) { x, x, 1, 2, 3, x, x, 4 };
+ *ptr += (vnx4si) { x, x, 1, 2, 3, x, x, 4 };
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1.c
index ae8542f2c75..31283fcf424 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define VEC_PERM(TYPE, MASKTYPE) \
TYPE __attribute__ ((noinline, noclone)) \
@@ -18,13 +18,13 @@ vec_perm_##TYPE (TYPE values1, TYPE values2, MASKTYPE mask) \
return __builtin_shuffle (values1, values2, mask); \
}
-VEC_PERM (v4di, v4di);
-VEC_PERM (v8si, v8si);
-VEC_PERM (v16hi, v16hi);
-VEC_PERM (v32qi, v32qi);
-VEC_PERM (v4df, v4di);
-VEC_PERM (v8sf, v8si);
-VEC_PERM (v16hf, v16hi);
+VEC_PERM (vnx2di, vnx2di);
+VEC_PERM (vnx4si, vnx4si);
+VEC_PERM (vnx8hi, vnx8hi);
+VEC_PERM (vnx16qi, vnx16qi);
+VEC_PERM (vnx2df, vnx2di);
+VEC_PERM (vnx4sf, vnx4si);
+VEC_PERM (vnx8hf, vnx8hi);
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 4 } } */
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_overrange_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_overrange_run.c
index 6ab82250d4c..1b98389d996 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_overrange_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_overrange_run.c
@@ -19,93 +19,93 @@
int main (void)
{
- TEST_VEC_PERM (v4di, v4di,
- ((v4di) { 5, 36, 7, 48 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 12, 24, 36, 48 }),
- ((v4di) { 1 + (8 * 1), 6 + (8 * 3),
- 3 + (8 * 1), 7 + (8 * 5) }));
- TEST_VEC_PERM (v8si, v8si,
- ((v8si) { 34, 38, 40, 10, 9, 8, 7, 35 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 33, 34, 35, 36, 37, 38, 39, 40 }),
- ((v8si) { 9 + (16 * 2), 13 + (16 * 5),
- 15 + (16 * 1), 7 + (16 * 0),
- 6 + (16 * 8), 5 + (16 * 2),
- 4 + (16 * 3), 10 + (16 * 2) }));
- TEST_VEC_PERM (v16hi, v16hi,
- ((v16hi) { 12, 16, 18, 10, 42, 43, 44, 34,
- 7, 48, 3, 35, 9, 8, 7, 13 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48 }),
- ((v16hi) { 9 + (32 * 2), 13 + (32 * 2),
- 15 + (32 * 8), 7 + (32 * 9),
- 25 + (32 * 4), 26 + (32 * 3),
- 27 + (32 * 1), 17 + (32 * 2),
- 4 + (32 * 6), 31 + (32 * 7),
- 0 + (32 * 8), 18 + (32 * 9),
- 6 + (32 * 6), 5 + (32 * 7),
- 4 + (32 * 2), 10 + (32 * 2) }));
- TEST_VEC_PERM (v32qi, v32qi,
- ((v32qi) { 5, 6, 7, 4, 5, 6, 4, 5,
- 6, 7, 12, 24, 36, 48, 12, 24,
- 5, 6, 7, 4, 5, 6, 4, 5,
- 6, 7, 12, 24, 36, 48, 12, 24 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48 }),
- ((v32qi) { 5 + (64 * 3), 6 + (64 * 1),
- 7 + (64 * 2), 8 + (64 * 1),
- 9 + (64 * 3), 10 + (64 * 1),
- 28 + (64 * 3), 29 + (64 * 3),
- 30 + (64 * 1), 31 + (64 * 1),
- 32 + (64 * 3), 33 + (64 * 2),
- 54 + (64 * 2), 55 + (64 * 2),
- 56 + (64 * 1), 61 + (64 * 2),
- 5 + (64 * 2), 6 + (64 * 1),
- 7 + (64 * 2), 8 + (64 * 2),
- 9 + (64 * 2), 10 + (64 * 1),
- 28 + (64 * 3), 29 + (64 * 1),
- 30 + (64 * 3), 31 + (64 * 3),
- 32 + (64 * 1), 33 + (64 * 1),
- 54 + (64 * 2), 55 + (64 * 2),
- 56 + (64 * 2), 61 + (64 * 2) }));
- TEST_VEC_PERM (v4df, v4di,
- ((v4df) { 5.1, 36.1, 7.1, 48.1 }),
- ((v4df) { 4.1, 5.1, 6.1, 7.1 }),
- ((v4df) { 12.1, 24.1, 36.1, 48.1 }),
- ((v4di) { 1 + (8 * 3), 6 + (8 * 10),
- 3 + (8 * 8), 7 + (8 * 2) }));
- TEST_VEC_PERM (v8sf, v8si,
- ((v8sf) { 34.2, 38.2, 40.2, 10.2, 9.2, 8.2, 7.2, 35.2 }),
- ((v8sf) { 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 9.2, 10.2 }),
- ((v8sf) { 33.2, 34.2, 35.2, 36.2,
- 37.2, 38.2, 39.2, 40.2 }),
- ((v8si) { 9 + (16 * 1), 13 + (16 * 5),
- 15 + (16 * 4), 7 + (16 * 4),
- 6 + (16 * 3), 5 + (16 * 2),
- 4 + (16 * 1), 10 + (16 * 0) }));
- TEST_VEC_PERM (v16hf, v16hi,
- ((v16hf) { 12.0, 16.0, 18.0, 10.0, 42.0, 43.0, 44.0, 34.0,
- 7.0, 48.0, 3.0, 35.0, 9.0, 8.0, 7.0, 13.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
- 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
- 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }),
- ((v16hi) { 9 + (32 * 2), 13 + (32 * 2),
- 15 + (32 * 8), 7 + (32 * 9),
- 25 + (32 * 4), 26 + (32 * 3),
- 27 + (32 * 1), 17 + (32 * 2),
- 4 + (32 * 6), 31 + (32 * 7),
- 0 + (32 * 8), 18 + (32 * 9),
- 6 + (32 * 6), 5 + (32 * 7),
- 4 + (32 * 2), 10 + (32 * 2) }));
+ TEST_VEC_PERM (vnx2di, vnx2di,
+ ((vnx2di) { 5, 36, 7, 48 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 12, 24, 36, 48 }),
+ ((vnx2di) { 1 + (8 * 1), 6 + (8 * 3),
+ 3 + (8 * 1), 7 + (8 * 5) }));
+ TEST_VEC_PERM (vnx4si, vnx4si,
+ ((vnx4si) { 34, 38, 40, 10, 9, 8, 7, 35 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 33, 34, 35, 36, 37, 38, 39, 40 }),
+ ((vnx4si) { 9 + (16 * 2), 13 + (16 * 5),
+ 15 + (16 * 1), 7 + (16 * 0),
+ 6 + (16 * 8), 5 + (16 * 2),
+ 4 + (16 * 3), 10 + (16 * 2) }));
+ TEST_VEC_PERM (vnx8hi, vnx8hi,
+ ((vnx8hi) { 12, 16, 18, 10, 42, 43, 44, 34,
+ 7, 48, 3, 35, 9, 8, 7, 13 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48 }),
+ ((vnx8hi) { 9 + (32 * 2), 13 + (32 * 2),
+ 15 + (32 * 8), 7 + (32 * 9),
+ 25 + (32 * 4), 26 + (32 * 3),
+ 27 + (32 * 1), 17 + (32 * 2),
+ 4 + (32 * 6), 31 + (32 * 7),
+ 0 + (32 * 8), 18 + (32 * 9),
+ 6 + (32 * 6), 5 + (32 * 7),
+ 4 + (32 * 2), 10 + (32 * 2) }));
+ TEST_VEC_PERM (vnx16qi, vnx16qi,
+ ((vnx16qi) { 5, 6, 7, 4, 5, 6, 4, 5,
+ 6, 7, 12, 24, 36, 48, 12, 24,
+ 5, 6, 7, 4, 5, 6, 4, 5,
+ 6, 7, 12, 24, 36, 48, 12, 24 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48 }),
+ ((vnx16qi) { 5 + (64 * 3), 6 + (64 * 1),
+ 7 + (64 * 2), 8 + (64 * 1),
+ 9 + (64 * 3), 10 + (64 * 1),
+ 28 + (64 * 3), 29 + (64 * 3),
+ 30 + (64 * 1), 31 + (64 * 1),
+ 32 + (64 * 3), 33 + (64 * 2),
+ 54 + (64 * 2), 55 + (64 * 2),
+ 56 + (64 * 1), 61 + (64 * 2),
+ 5 + (64 * 2), 6 + (64 * 1),
+ 7 + (64 * 2), 8 + (64 * 2),
+ 9 + (64 * 2), 10 + (64 * 1),
+ 28 + (64 * 3), 29 + (64 * 1),
+ 30 + (64 * 3), 31 + (64 * 3),
+ 32 + (64 * 1), 33 + (64 * 1),
+ 54 + (64 * 2), 55 + (64 * 2),
+ 56 + (64 * 2), 61 + (64 * 2) }));
+ TEST_VEC_PERM (vnx2df, vnx2di,
+ ((vnx2df) { 5.1, 36.1, 7.1, 48.1 }),
+ ((vnx2df) { 4.1, 5.1, 6.1, 7.1 }),
+ ((vnx2df) { 12.1, 24.1, 36.1, 48.1 }),
+ ((vnx2di) { 1 + (8 * 3), 6 + (8 * 10),
+ 3 + (8 * 8), 7 + (8 * 2) }));
+ TEST_VEC_PERM (vnx4sf, vnx4si,
+ ((vnx4sf) { 34.2, 38.2, 40.2, 10.2, 9.2, 8.2, 7.2, 35.2 }),
+ ((vnx4sf) { 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 9.2, 10.2 }),
+ ((vnx4sf) { 33.2, 34.2, 35.2, 36.2,
+ 37.2, 38.2, 39.2, 40.2 }),
+ ((vnx4si) { 9 + (16 * 1), 13 + (16 * 5),
+ 15 + (16 * 4), 7 + (16 * 4),
+ 6 + (16 * 3), 5 + (16 * 2),
+ 4 + (16 * 1), 10 + (16 * 0) }));
+ TEST_VEC_PERM (vnx8hf, vnx8hi,
+ ((vnx8hf) { 12.0, 16.0, 18.0, 10.0, 42.0, 43.0, 44.0, 34.0,
+ 7.0, 48.0, 3.0, 35.0, 9.0, 8.0, 7.0, 13.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
+ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }),
+ ((vnx8hi) { 9 + (32 * 2), 13 + (32 * 2),
+ 15 + (32 * 8), 7 + (32 * 9),
+ 25 + (32 * 4), 26 + (32 * 3),
+ 27 + (32 * 1), 17 + (32 * 2),
+ 4 + (32 * 6), 31 + (32 * 7),
+ 0 + (32 * 8), 18 + (32 * 9),
+ 6 + (32 * 6), 5 + (32 * 7),
+ 4 + (32 * 2), 10 + (32 * 2) }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_run.c
index 4d46ff02192..a551ffa9b49 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_1_run.c
@@ -19,61 +19,61 @@
int main (void)
{
- TEST_VEC_PERM (v4di, v4di,
- ((v4di) { 5, 36, 7, 48 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 12, 24, 36, 48 }),
- ((v4di) { 1, 6, 3, 7 }));
- TEST_VEC_PERM (v8si, v8si,
- ((v8si) { 34, 38, 40, 10, 9, 8, 7, 35 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 33, 34, 35, 36, 37, 38, 39, 40 }),
- ((v8si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
- TEST_VEC_PERM (v16hi, v16hi,
- ((v16hi) { 12, 16, 18, 10, 42, 43, 44, 34,
- 7, 48, 3, 35, 9, 8, 7, 13 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48 }),
- ((v16hi) { 9, 13, 15, 7, 25, 26, 27, 17,
- 4, 31, 0, 18, 6, 5, 4, 10 }));
- TEST_VEC_PERM (v32qi, v32qi,
- ((v32qi) { 5, 6, 7, 4, 5, 6, 4, 5,
- 6, 7, 12, 24, 36, 48, 12, 24,
- 5, 6, 7, 4, 5, 6, 4, 5,
- 6, 7, 12, 24, 36, 48, 12, 24 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48 }),
- ((v32qi) { 5, 6, 7, 8, 9, 10, 28, 29,
- 30, 31, 32, 33, 54, 55, 56, 61,
- 5, 6, 7, 8, 9, 10, 28, 29,
- 30, 31, 32, 33, 54, 55, 56, 61 }));
- TEST_VEC_PERM (v4df, v4di,
- ((v4df) { 5.1, 36.1, 7.1, 48.1 }),
- ((v4df) { 4.1, 5.1, 6.1, 7.1 }),
- ((v4df) { 12.1, 24.1, 36.1, 48.1 }),
- ((v4di) { 1, 6, 3, 7 }));
- TEST_VEC_PERM (v8sf, v8si,
- ((v8sf) { 34.2, 38.2, 40.2, 10.2, 9.2, 8.2, 7.2, 35.2 }),
- ((v8sf) { 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 9.2, 10.2 }),
- ((v8sf) { 33.2, 34.2, 35.2, 36.2,
- 37.2, 38.2, 39.2, 40.2 }),
- ((v8si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
- TEST_VEC_PERM (v16hf, v16hi,
- ((v16hf) { 12.0, 16.0, 18.0, 10.0, 42.0, 43.0, 44.0, 34.0,
- 7.0, 48.0, 3.0, 35.0, 9.0, 8.0, 7.0, 13.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
- 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
- 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }),
- ((v16hi) { 9, 13, 15, 7, 25, 26, 27, 17,
- 4, 31, 0, 18, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx2di, vnx2di,
+ ((vnx2di) { 5, 36, 7, 48 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 12, 24, 36, 48 }),
+ ((vnx2di) { 1, 6, 3, 7 }));
+ TEST_VEC_PERM (vnx4si, vnx4si,
+ ((vnx4si) { 34, 38, 40, 10, 9, 8, 7, 35 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 33, 34, 35, 36, 37, 38, 39, 40 }),
+ ((vnx4si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx8hi, vnx8hi,
+ ((vnx8hi) { 12, 16, 18, 10, 42, 43, 44, 34,
+ 7, 48, 3, 35, 9, 8, 7, 13 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48 }),
+ ((vnx8hi) { 9, 13, 15, 7, 25, 26, 27, 17,
+ 4, 31, 0, 18, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx16qi, vnx16qi,
+ ((vnx16qi) { 5, 6, 7, 4, 5, 6, 4, 5,
+ 6, 7, 12, 24, 36, 48, 12, 24,
+ 5, 6, 7, 4, 5, 6, 4, 5,
+ 6, 7, 12, 24, 36, 48, 12, 24 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48 }),
+ ((vnx16qi) { 5, 6, 7, 8, 9, 10, 28, 29,
+ 30, 31, 32, 33, 54, 55, 56, 61,
+ 5, 6, 7, 8, 9, 10, 28, 29,
+ 30, 31, 32, 33, 54, 55, 56, 61 }));
+ TEST_VEC_PERM (vnx2df, vnx2di,
+ ((vnx2df) { 5.1, 36.1, 7.1, 48.1 }),
+ ((vnx2df) { 4.1, 5.1, 6.1, 7.1 }),
+ ((vnx2df) { 12.1, 24.1, 36.1, 48.1 }),
+ ((vnx2di) { 1, 6, 3, 7 }));
+ TEST_VEC_PERM (vnx4sf, vnx4si,
+ ((vnx4sf) { 34.2, 38.2, 40.2, 10.2, 9.2, 8.2, 7.2, 35.2 }),
+ ((vnx4sf) { 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 9.2, 10.2 }),
+ ((vnx4sf) { 33.2, 34.2, 35.2, 36.2,
+ 37.2, 38.2, 39.2, 40.2 }),
+ ((vnx4si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx8hf, vnx8hi,
+ ((vnx8hf) { 12.0, 16.0, 18.0, 10.0, 42.0, 43.0, 44.0, 34.0,
+ 7.0, 48.0, 3.0, 35.0, 9.0, 8.0, 7.0, 13.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
+ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }),
+ ((vnx8hi) { 9, 13, 15, 7, 25, 26, 27, 17,
+ 4, 31, 0, 18, 6, 5, 4, 10 }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2.c
index 31cff7ab113..4c3df975bab 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_reverse_##TYPE (TYPE *restrict a, TYPE *restrict b, int n) \
{ \
for (int i = 0; i < n; ++i) \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2_run.c
index 342b1ddb44d..9a9300509ab 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_2_run.c
@@ -9,7 +9,10 @@
{ \
TYPE a[N], b[N]; \
for (unsigned int i = 0; i < N; ++i) \
- b[i] = i * 2 + i % 5; \
+ { \
+ b[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
vec_reverse_##TYPE (a, b, N); \
for (unsigned int i = 0; i < N; ++i) \
{ \
@@ -19,7 +22,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3.c
index 4f70abd35e5..8b4901b1014 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_zip_##TYPE (TYPE *restrict a, TYPE *restrict b, \
TYPE *restrict c, long n) \
{ \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3_run.c
index 14d66f99383..c47b4050ae2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_3_run.c
@@ -12,6 +12,7 @@
{ \
b[i] = i * 2 + i % 5; \
c[i] = i * 3; \
+ asm volatile ("" ::: "memory"); \
} \
vec_zip_##TYPE (a, b, c, N / 8); \
for (unsigned int i = 0; i < N / 2; ++i) \
@@ -23,7 +24,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4.c
index 5fbd59f08bd..c08ad23868c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4.c
@@ -4,7 +4,7 @@
#include <stdint.h>
#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
+TYPE __attribute__ ((noinline, noclone)) \
vec_uzp_##TYPE (TYPE *restrict a, TYPE *restrict b, \
TYPE *restrict c, long n) \
{ \
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4_run.c
index 404429208a0..a096b6c5353 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_4_run.c
@@ -9,7 +9,10 @@
{ \
TYPE a[N], b[N], c[N]; \
for (unsigned int i = 0; i < N; ++i) \
- c[i] = i * 2 + i % 5; \
+ { \
+ c[i] = i * 2 + i % 5; \
+ asm volatile ("" ::: "memory"); \
+ } \
vec_uzp_##TYPE (a, b, c, N / 8); \
for (unsigned int i = 0; i < N; ++i) \
{ \
@@ -19,7 +22,7 @@
} \
}
-int
+int __attribute__ ((optimize (1)))
main (void)
{
TEST_ALL (HARNESS)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1.c
index e76b3bc5abb..7b470cb04e2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define VEC_PERM_CONST(TYPE, MASK) \
TYPE __attribute__ ((noinline, noclone)) \
@@ -18,18 +18,18 @@ vec_perm_##TYPE (TYPE values1, TYPE values2) \
return __builtin_shuffle (values1, values2, MASK); \
}
-VEC_PERM_CONST (v4di, ((v4di) { 4, 3, 6, 1 }));
-VEC_PERM_CONST (v8si, ((v8si) { 3, 9, 11, 12, 2, 4, 4, 2 }));
-VEC_PERM_CONST (v16hi, ((v16hi) { 8, 27, 5, 4, 21, 12, 13, 0,
- 22, 1, 8, 9, 3, 24, 15, 1 }));
-VEC_PERM_CONST (v32qi, ((v32qi) { 13, 31, 11, 2, 48, 28, 3, 4,
- 54, 11, 30, 1, 0, 61, 2, 3,
- 4, 5, 11, 63, 24, 11, 42, 39,
- 2, 57, 22, 11, 6, 16, 18, 21 }));
-VEC_PERM_CONST (v4df, ((v4di) { 7, 3, 2, 1 }));
-VEC_PERM_CONST (v8sf, ((v8si) { 1, 9, 13, 11, 2, 5, 4, 2 }));
-VEC_PERM_CONST (v16hf, ((v16hi) { 8, 27, 5, 4, 21, 12, 13, 0,
- 22, 1, 8, 9, 3, 24, 15, 1 }));
+VEC_PERM_CONST (vnx2di, ((vnx2di) { 4, 3, 6, 1 }));
+VEC_PERM_CONST (vnx4si, ((vnx4si) { 3, 9, 11, 12, 2, 4, 4, 2 }));
+VEC_PERM_CONST (vnx8hi, ((vnx8hi) { 8, 27, 5, 4, 21, 12, 13, 0,
+ 22, 1, 8, 9, 3, 24, 15, 1 }));
+VEC_PERM_CONST (vnx16qi, ((vnx16qi) { 13, 31, 11, 2, 48, 28, 3, 4,
+ 54, 11, 30, 1, 0, 61, 2, 3,
+ 4, 5, 11, 63, 24, 11, 42, 39,
+ 2, 57, 22, 11, 6, 16, 18, 21 }));
+VEC_PERM_CONST (vnx2df, ((vnx2di) { 7, 3, 2, 1 }));
+VEC_PERM_CONST (vnx4sf, ((vnx4si) { 1, 9, 13, 11, 2, 5, 4, 2 }));
+VEC_PERM_CONST (vnx8hf, ((vnx8hi) { 8, 27, 5, 4, 21, 12, 13, 0,
+ 22, 1, 8, 9, 3, 24, 15, 1 }));
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 4 } } */
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_overrun.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_overrun.c
index b4f82091f7c..d397c3d6670 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_overrun.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_overrun.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define VEC_PERM_CONST_OVERRUN(TYPE, MASK) \
TYPE vec_perm_overrun_##TYPE (TYPE values1, TYPE values2) \
@@ -17,50 +17,50 @@ TYPE vec_perm_overrun_##TYPE (TYPE values1, TYPE values2) \
return __builtin_shuffle (values1, values2, MASK); \
}
-VEC_PERM_CONST_OVERRUN (v4di, ((v4di) { 4 + (8 * 1), 3 + (8 * 1),
- 6 + (8 * 2), 1 + (8 * 3) }));
-VEC_PERM_CONST_OVERRUN (v8si, ((v8si) { 3 + (16 * 3), 9 + (16 * 4),
- 11 + (16 * 5), 12 + (16 * 3),
- 2 + (16 * 2), 4 + (16 * 1),
- 4 + (16 * 2), 2 + (16 * 1) }));
-VEC_PERM_CONST_OVERRUN (v16hi, ((v16hi) { 8 + (32 * 3), 27 + (32 * 1),
- 5 + (32 * 3), 4 + (32 * 3),
- 21 + (32 * 1), 12 + (32 * 3),
- 13 + (32 * 3), 0 + (32 * 1),
- 22 + (32 * 2), 1 + (32 * 2),
- 8 + (32 * 2), 9 + (32 * 1),
- 3 + (32 * 2), 24 + (32 * 2),
- 15 + (32 * 1), 1 + (32 * 1) }));
-VEC_PERM_CONST_OVERRUN (v32qi, ((v32qi) { 13 + (64 * 2), 31 + (64 * 2),
- 11 + (64 * 2), 2 + (64 * 1),
- 48 + (64 * 1), 28 + (64 * 2),
- 3 + (64 * 2), 4 + (64 * 3),
- 54 + (64 * 1), 11 + (64 * 2),
- 30 + (64 * 2), 1 + (64 * 1),
- 0 + (64 * 1), 61 + (64 * 2),
- 2 + (64 * 3), 3 + (64 * 2),
- 4 + (64 * 3), 5 + (64 * 3),
- 11 + (64 * 3), 63 + (64 * 1),
- 24 + (64 * 1), 11 + (64 * 3),
- 42 + (64 * 3), 39 + (64 * 2),
- 2 + (64 * 2), 57 + (64 * 3),
- 22 + (64 * 3), 11 + (64 * 2),
- 6 + (64 * 2), 16 + (64 * 2),
- 18 + (64 * 2), 21 + (64 * 3) }));
-VEC_PERM_CONST_OVERRUN (v4df, ((v4di) { 7 + (8 * 1), 3 + (8 * 3),
- 2 + (8 * 5), 1 + (8 * 3) }));
-VEC_PERM_CONST_OVERRUN (v8sf, ((v8si) { 1 + (16 * 1), 9 + (16 * 2),
- 13 + (16 * 2), 11 + (16 * 3),
- 2 + (16 * 2), 5 + (16 * 2),
- 4 + (16 * 4), 2 + (16 * 3) }));
-VEC_PERM_CONST_OVERRUN (v16hf, ((v16hi) { 8 + (32 * 3), 27 + (32 * 1),
- 5 + (32 * 3), 4 + (32 * 3),
- 21 + (32 * 1), 12 + (32 * 3),
- 13 + (32 * 3), 0 + (32 * 1),
- 22 + (32 * 2), 1 + (32 * 2),
- 8 + (32 * 2), 9 + (32 * 1),
- 3 + (32 * 2), 24 + (32 * 2),
- 15 + (32 * 1), 1 + (32 * 1) }));
+VEC_PERM_CONST_OVERRUN (vnx2di, ((vnx2di) { 4 + (8 * 1), 3 + (8 * 1),
+ 6 + (8 * 2), 1 + (8 * 3) }));
+VEC_PERM_CONST_OVERRUN (vnx4si, ((vnx4si) { 3 + (16 * 3), 9 + (16 * 4),
+ 11 + (16 * 5), 12 + (16 * 3),
+ 2 + (16 * 2), 4 + (16 * 1),
+ 4 + (16 * 2), 2 + (16 * 1) }));
+VEC_PERM_CONST_OVERRUN (vnx8hi, ((vnx8hi) { 8 + (32 * 3), 27 + (32 * 1),
+ 5 + (32 * 3), 4 + (32 * 3),
+ 21 + (32 * 1), 12 + (32 * 3),
+ 13 + (32 * 3), 0 + (32 * 1),
+ 22 + (32 * 2), 1 + (32 * 2),
+ 8 + (32 * 2), 9 + (32 * 1),
+ 3 + (32 * 2), 24 + (32 * 2),
+ 15 + (32 * 1), 1 + (32 * 1) }));
+VEC_PERM_CONST_OVERRUN (vnx16qi, ((vnx16qi) { 13 + (64 * 2), 31 + (64 * 2),
+ 11 + (64 * 2), 2 + (64 * 1),
+ 48 + (64 * 1), 28 + (64 * 2),
+ 3 + (64 * 2), 4 + (64 * 3),
+ 54 + (64 * 1), 11 + (64 * 2),
+ 30 + (64 * 2), 1 + (64 * 1),
+ 0 + (64 * 1), 61 + (64 * 2),
+ 2 + (64 * 3), 3 + (64 * 2),
+ 4 + (64 * 3), 5 + (64 * 3),
+ 11 + (64 * 3), 63 + (64 * 1),
+ 24 + (64 * 1), 11 + (64 * 3),
+ 42 + (64 * 3), 39 + (64 * 2),
+ 2 + (64 * 2), 57 + (64 * 3),
+ 22 + (64 * 3), 11 + (64 * 2),
+ 6 + (64 * 2), 16 + (64 * 2),
+ 18 + (64 * 2), 21 + (64 * 3) }));
+VEC_PERM_CONST_OVERRUN (vnx2df, ((vnx2di) { 7 + (8 * 1), 3 + (8 * 3),
+ 2 + (8 * 5), 1 + (8 * 3) }));
+VEC_PERM_CONST_OVERRUN (vnx4sf, ((vnx4si) { 1 + (16 * 1), 9 + (16 * 2),
+ 13 + (16 * 2), 11 + (16 * 3),
+ 2 + (16 * 2), 5 + (16 * 2),
+ 4 + (16 * 4), 2 + (16 * 3) }));
+VEC_PERM_CONST_OVERRUN (vnx8hf, ((vnx8hi) { 8 + (32 * 3), 27 + (32 * 1),
+ 5 + (32 * 3), 4 + (32 * 3),
+ 21 + (32 * 1), 12 + (32 * 3),
+ 13 + (32 * 3), 0 + (32 * 1),
+ 22 + (32 * 2), 1 + (32 * 2),
+ 8 + (32 * 2), 9 + (32 * 1),
+ 3 + (32 * 2), 24 + (32 * 2),
+ 15 + (32 * 1), 1 + (32 * 1) }));
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 4 } } */
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_run.c
index 7324c1da0a4..a0214880dbe 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_1_run.c
@@ -22,49 +22,49 @@
int main (void)
{
- TEST_VEC_PERM (v4di,
- ((v4di) { 12, 7, 36, 5 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 12, 24, 36, 48 }));
- TEST_VEC_PERM (v8si,
- ((v8si) { 6, 34, 36, 37, 5, 7, 7, 5 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
- TEST_VEC_PERM (v16hi,
- ((v16hi) { 11, 44, 8, 7, 38, 15, 16, 3,
- 39, 4, 11, 12, 6, 41, 18, 4 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48 }));
- TEST_VEC_PERM (v32qi,
- ((v32qi) { 5, 7, 7, 6, 12, 4, 7, 4,
- 36, 7, 6, 5, 4, 24, 6, 7,
- 4, 5, 7, 48, 4, 7, 36, 48,
- 6, 24, 6, 7, 6, 4, 6, 5 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48 }));
- TEST_VEC_PERM (v4df,
- ((v4df) { 48.5, 7.5, 6.5, 5.5 }),
- ((v4df) { 4.5, 5.5, 6.5, 7.5 }),
- ((v4df) { 12.5, 24.5, 36.5, 48.5 }));
- TEST_VEC_PERM (v8sf,
- ((v8sf) { 4.5, 34.5, 38.5, 36.5, 5.5, 8.5, 7.5, 5.5 }),
- ((v8sf) { 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5 }),
- ((v8sf) { 33.5, 34.5, 35.5, 36.5,
- 37.5, 38.5, 39.5, 40.5 }));
- TEST_VEC_PERM (v16hf,
- ((v16hf) { 11.0, 44.0, 8.0, 7.0, 38.0, 15.0, 16.0, 3.0,
- 39.0, 4.0, 11.0, 12.0, 6.0, 41.0, 18.0, 4.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
- 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
- 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
+ TEST_VEC_PERM (vnx2di,
+ ((vnx2di) { 12, 7, 36, 5 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 12, 24, 36, 48 }));
+ TEST_VEC_PERM (vnx4si,
+ ((vnx4si) { 6, 34, 36, 37, 5, 7, 7, 5 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
+ TEST_VEC_PERM (vnx8hi,
+ ((vnx8hi) { 11, 44, 8, 7, 38, 15, 16, 3,
+ 39, 4, 11, 12, 6, 41, 18, 4 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48 }));
+ TEST_VEC_PERM (vnx16qi,
+ ((vnx16qi) { 5, 7, 7, 6, 12, 4, 7, 4,
+ 36, 7, 6, 5, 4, 24, 6, 7,
+ 4, 5, 7, 48, 4, 7, 36, 48,
+ 6, 24, 6, 7, 6, 4, 6, 5 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48 }));
+ TEST_VEC_PERM (vnx2df,
+ ((vnx2df) { 48.5, 7.5, 6.5, 5.5 }),
+ ((vnx2df) { 4.5, 5.5, 6.5, 7.5 }),
+ ((vnx2df) { 12.5, 24.5, 36.5, 48.5 }));
+ TEST_VEC_PERM (vnx4sf,
+ ((vnx4sf) { 4.5, 34.5, 38.5, 36.5, 5.5, 8.5, 7.5, 5.5 }),
+ ((vnx4sf) { 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5 }),
+ ((vnx4sf) { 33.5, 34.5, 35.5, 36.5,
+ 37.5, 38.5, 39.5, 40.5 }));
+ TEST_VEC_PERM (vnx8hf,
+ ((vnx8hf) { 11.0, 44.0, 8.0, 7.0, 38.0, 15.0, 16.0, 3.0,
+ 39.0, 4.0, 11.0, 12.0, 6.0, 41.0, 18.0, 4.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
+ 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
+ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1.c
index a4efb4fea79..beabf272f11 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define VEC_PERM_SINGLE(TYPE, MASK) \
TYPE vec_perm_##TYPE (TYPE values1, TYPE values2) \
@@ -17,18 +17,18 @@ TYPE vec_perm_##TYPE (TYPE values1, TYPE values2) \
return __builtin_shuffle (values1, values2, MASK); \
}
-VEC_PERM_SINGLE (v4di, ((v4di) { 0, 3, 2, 1 }));
-VEC_PERM_SINGLE (v8si, ((v8si) { 3, 7, 1, 0, 2, 4, 4, 2 }));
-VEC_PERM_SINGLE (v16hi, ((v16hi) { 8, 7, 5, 4, 11, 12, 13, 0,
- 1, 1, 8, 9, 3, 14, 15, 1 }));
-VEC_PERM_SINGLE (v32qi, ((v32qi) { 13, 21, 11, 2, 8, 28, 3, 4,
- 14, 11, 30, 1, 0, 31, 2, 3,
- 4, 5, 11, 23, 24, 11, 12, 9,
- 2, 7, 22, 11, 6, 16, 18, 21 }));
-VEC_PERM_SINGLE (v4df, ((v4di) { 3, 3, 1, 1 }));
-VEC_PERM_SINGLE (v8sf, ((v8si) { 4, 5, 6, 0, 2, 7, 4, 2 }));
-VEC_PERM_SINGLE (v16hf, ((v16hi) { 8, 7, 5, 4, 11, 12, 13, 0,
- 1, 1, 8, 9, 3, 14, 15, 1 }));
+VEC_PERM_SINGLE (vnx2di, ((vnx2di) { 0, 3, 2, 1 }));
+VEC_PERM_SINGLE (vnx4si, ((vnx4si) { 3, 7, 1, 0, 2, 4, 4, 2 }));
+VEC_PERM_SINGLE (vnx8hi, ((vnx8hi) { 8, 7, 5, 4, 11, 12, 13, 0,
+ 1, 1, 8, 9, 3, 14, 15, 1 }));
+VEC_PERM_SINGLE (vnx16qi, ((vnx16qi) { 13, 21, 11, 2, 8, 28, 3, 4,
+ 14, 11, 30, 1, 0, 31, 2, 3,
+ 4, 5, 11, 23, 24, 11, 12, 9,
+ 2, 7, 22, 11, 6, 16, 18, 21 }));
+VEC_PERM_SINGLE (vnx2df, ((vnx2di) { 3, 3, 1, 1 }));
+VEC_PERM_SINGLE (vnx4sf, ((vnx4si) { 4, 5, 6, 0, 2, 7, 4, 2 }));
+VEC_PERM_SINGLE (vnx8hf, ((vnx8hi) { 8, 7, 5, 4, 11, 12, 13, 0,
+ 1, 1, 8, 9, 3, 14, 15, 1 }));
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1_run.c
index fbae30c8d1c..aa443563182 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_const_single_1_run.c
@@ -17,49 +17,49 @@
int main (void)
{
- TEST_VEC_PERM (v4di,
- ((v4di) { 4, 7, 6, 5 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 12, 24, 36, 48 }));
- TEST_VEC_PERM (v8si,
- ((v8si) { 6, 10, 4, 3, 5, 7, 7, 5 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
- TEST_VEC_PERM (v16hi,
- ((v16hi) { 11, 10, 8, 7, 14, 15, 16, 3,
- 4, 4, 11, 12, 6, 17, 18, 4 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48 }));
- TEST_VEC_PERM (v32qi,
- ((v32qi) { 5, 5, 7, 6, 4, 4, 7, 4,
- 6, 7, 6, 5, 4, 7, 6, 7,
- 4, 5, 7, 7, 4, 7, 4, 5,
- 6, 7, 6, 7, 6, 4, 6, 5 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48,
- 12, 24, 36, 48, 12, 24, 36, 48 }));
- TEST_VEC_PERM (v4df,
- ((v4df) { 7.5, 7.5, 5.5, 5.5 }),
- ((v4df) { 4.5, 5.5, 6.5, 7.5 }),
- ((v4df) { 12.5, 24.5, 36.5, 48.5 }));
- TEST_VEC_PERM (v8sf,
- ((v8sf) { 7.5, 8.5, 9.5, 3.5, 5.5, 10.5, 7.5, 5.5 }),
- ((v8sf) { 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5 }),
- ((v8sf) { 33.5, 34.5, 35.5, 36.5,
- 37.5, 38.5, 39.5, 40.5 }));
- TEST_VEC_PERM (v16hf,
- ((v16hf) { 11.0, 10.0, 8.0, 7.0, 14.0, 15.0, 16.0, 3.0,
- 4.0, 4.0, 11.0, 12.0, 6.0, 17.0, 18.0, 4.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
- 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
- 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
+ TEST_VEC_PERM (vnx2di,
+ ((vnx2di) { 4, 7, 6, 5 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 12, 24, 36, 48 }));
+ TEST_VEC_PERM (vnx4si,
+ ((vnx4si) { 6, 10, 4, 3, 5, 7, 7, 5 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 33, 34, 35, 36, 37, 38, 39, 40 }));
+ TEST_VEC_PERM (vnx8hi,
+ ((vnx8hi) { 11, 10, 8, 7, 14, 15, 16, 3,
+ 4, 4, 11, 12, 6, 17, 18, 4 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48 }));
+ TEST_VEC_PERM (vnx16qi,
+ ((vnx16qi) { 5, 5, 7, 6, 4, 4, 7, 4,
+ 6, 7, 6, 5, 4, 7, 6, 7,
+ 4, 5, 7, 7, 4, 7, 4, 5,
+ 6, 7, 6, 7, 6, 4, 6, 5 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48,
+ 12, 24, 36, 48, 12, 24, 36, 48 }));
+ TEST_VEC_PERM (vnx2df,
+ ((vnx2df) { 7.5, 7.5, 5.5, 5.5 }),
+ ((vnx2df) { 4.5, 5.5, 6.5, 7.5 }),
+ ((vnx2df) { 12.5, 24.5, 36.5, 48.5 }));
+ TEST_VEC_PERM (vnx4sf,
+ ((vnx4sf) { 7.5, 8.5, 9.5, 3.5, 5.5, 10.5, 7.5, 5.5 }),
+ ((vnx4sf) { 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5 }),
+ ((vnx4sf) { 33.5, 34.5, 35.5, 36.5,
+ 37.5, 38.5, 39.5, 40.5 }));
+ TEST_VEC_PERM (vnx8hf,
+ ((vnx8hf) { 11.0, 10.0, 8.0, 7.0, 14.0, 15.0, 16.0, 3.0,
+ 4.0, 4.0, 11.0, 12.0, 6.0, 17.0, 18.0, 4.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hf) { 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0,
+ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0 }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1.c
index a82b57dc378..c4abc2de551 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define VEC_PERM(TYPE, MASKTYPE) \
TYPE vec_perm_##TYPE (TYPE values, MASKTYPE mask) \
@@ -17,13 +17,13 @@ TYPE vec_perm_##TYPE (TYPE values, MASKTYPE mask) \
return __builtin_shuffle (values, mask); \
}
-VEC_PERM (v4di, v4di)
-VEC_PERM (v8si, v8si)
-VEC_PERM (v16hi, v16hi)
-VEC_PERM (v32qi, v32qi)
-VEC_PERM (v4df, v4di)
-VEC_PERM (v8sf, v8si)
-VEC_PERM (v16hf, v16hi)
+VEC_PERM (vnx2di, vnx2di)
+VEC_PERM (vnx4si, vnx4si)
+VEC_PERM (vnx8hi, vnx8hi)
+VEC_PERM (vnx16qi, vnx16qi)
+VEC_PERM (vnx2df, vnx2di)
+VEC_PERM (vnx4sf, vnx4si)
+VEC_PERM (vnx8hf, vnx8hi)
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */
/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1_run.c
index 539c99d4f61..fd73bc9652f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1_run.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_vec_perm_single_1_run.c
@@ -18,48 +18,48 @@ extern void abort (void);
int main (void)
{
- TEST_VEC_PERM (v4di, v4di,
- ((v4di) { 5, 6, 7, 5 }),
- ((v4di) { 4, 5, 6, 7 }),
- ((v4di) { 1, 6, 3, 5 }));
- TEST_VEC_PERM (v8si, v8si,
- ((v8si) { 4, 8, 10, 10, 9, 8, 7, 5 }),
- ((v8si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
- ((v8si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
- TEST_VEC_PERM (v16hi, v16hi,
- ((v16hi) { 12, 16, 18, 10, 12, 13, 14, 4,
- 7, 18, 3, 5, 9, 8, 7, 13 }),
- ((v16hi) { 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18 }),
- ((v16hi) { 9, 13, 15, 7, 25, 26, 27, 17,
- 4, 31, 0, 18, 6, 5, 4, 10 }));
- TEST_VEC_PERM (v32qi, v32qi,
- ((v32qi) { 5, 6, 7, 4, 5, 6, 4, 5,
- 6, 7, 4, 5, 6, 7, 4, 5,
- 5, 6, 7, 4, 5, 6, 4, 5,
- 6, 7, 4, 5, 6, 7, 4, 5 }),
- ((v32qi) { 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7,
- 4, 5, 6, 7, 4, 5, 6, 7 }),
- ((v32qi) { 5, 6, 7, 8, 9, 10, 28, 29,
- 30, 31, 32, 33, 54, 55, 56, 61,
- 5, 6, 7, 8, 9, 10, 28, 29,
- 30, 31, 32, 33, 54, 55, 56, 61 }));
- TEST_VEC_PERM (v4df, v4di,
- ((v4df) { 5.1, 6.1, 7.1, 5.1 }),
- ((v4df) { 4.1, 5.1, 6.1, 7.1 }),
- ((v4di) { 1, 6, 3, 5 }));
- TEST_VEC_PERM (v8sf, v8si,
- ((v8sf) { 4.2, 8.2, 10.2, 10.2, 9.2, 8.2, 7.2, 5.2 }),
- ((v8sf) { 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 9.2, 10.2 }),
- ((v8si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
- TEST_VEC_PERM (v16hf, v16hi,
- ((v16hf) { 12.0, 16.0, 18.0, 10.0, 12.0, 13.0, 14.0, 4.0,
- 7.0, 18.0, 3.0, 5.0, 9.0, 8.0, 7.0, 13.0 }),
- ((v16hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
- 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
- ((v16hi) { 9, 13, 15, 7, 25, 26, 27, 17,
- 4, 31, 0, 18, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx2di, vnx2di,
+ ((vnx2di) { 5, 6, 7, 5 }),
+ ((vnx2di) { 4, 5, 6, 7 }),
+ ((vnx2di) { 1, 6, 3, 5 }));
+ TEST_VEC_PERM (vnx4si, vnx4si,
+ ((vnx4si) { 4, 8, 10, 10, 9, 8, 7, 5 }),
+ ((vnx4si) { 3, 4, 5, 6, 7, 8, 9, 10 }),
+ ((vnx4si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx8hi, vnx8hi,
+ ((vnx8hi) { 12, 16, 18, 10, 12, 13, 14, 4,
+ 7, 18, 3, 5, 9, 8, 7, 13 }),
+ ((vnx8hi) { 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18 }),
+ ((vnx8hi) { 9, 13, 15, 7, 25, 26, 27, 17,
+ 4, 31, 0, 18, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx16qi, vnx16qi,
+ ((vnx16qi) { 5, 6, 7, 4, 5, 6, 4, 5,
+ 6, 7, 4, 5, 6, 7, 4, 5,
+ 5, 6, 7, 4, 5, 6, 4, 5,
+ 6, 7, 4, 5, 6, 7, 4, 5 }),
+ ((vnx16qi) { 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7,
+ 4, 5, 6, 7, 4, 5, 6, 7 }),
+ ((vnx16qi) { 5, 6, 7, 8, 9, 10, 28, 29,
+ 30, 31, 32, 33, 54, 55, 56, 61,
+ 5, 6, 7, 8, 9, 10, 28, 29,
+ 30, 31, 32, 33, 54, 55, 56, 61 }));
+ TEST_VEC_PERM (vnx2df, vnx2di,
+ ((vnx2df) { 5.1, 6.1, 7.1, 5.1 }),
+ ((vnx2df) { 4.1, 5.1, 6.1, 7.1 }),
+ ((vnx2di) { 1, 6, 3, 5 }));
+ TEST_VEC_PERM (vnx4sf, vnx4si,
+ ((vnx4sf) { 4.2, 8.2, 10.2, 10.2, 9.2, 8.2, 7.2, 5.2 }),
+ ((vnx4sf) { 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 9.2, 10.2 }),
+ ((vnx4si) { 9, 13, 15, 7, 6, 5, 4, 10 }));
+ TEST_VEC_PERM (vnx8hf, vnx8hi,
+ ((vnx8hf) { 12.0, 16.0, 18.0, 10.0, 12.0, 13.0, 14.0, 4.0,
+ 7.0, 18.0, 3.0, 5.0, 9.0, 8.0, 7.0, 13.0 }),
+ ((vnx8hf) { 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0 }),
+ ((vnx8hi) { 9, 13, 15, 7, 25, 26, 27, 17,
+ 4, 31, 0, 18, 6, 5, 4, 10 }));
return 0;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_1.c b/gcc/testsuite/gcc.target/aarch64/sve_while_1.c
index c54db87fa21..2a268a447e3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_while_1.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
-vec_while_##TYPE (TYPE *restrict a, int n) \
-{ \
- for (int i = 0; i < n; ++i) \
- a[i] += 1; \
-}
+#define ADD_LOOP(TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ vec_while_##TYPE (TYPE *restrict a, int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ a[i] += 1; \
+ }
#define TEST_ALL(T) \
T (int8_t) \
@@ -23,7 +23,7 @@ vec_while_##TYPE (TYPE *restrict a, int n) \
T (float) \
T (double)
-TEST_ALL (VEC_PERM)
+TEST_ALL (ADD_LOOP)
/* { dg-final { scan-assembler-not {\tuqdec} } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b, xzr,} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_2.c b/gcc/testsuite/gcc.target/aarch64/sve_while_2.c
index 62f82cc43f4..2f0f0f49e12 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_while_2.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
-vec_while_##TYPE (TYPE *restrict a, unsigned int n) \
-{ \
- for (unsigned int i = 0; i < n; ++i) \
- a[i] += 1; \
-}
+#define ADD_LOOP(TYPE) \
+ void __attribute__ ((noinline, noclone)) \
+ vec_while_##TYPE (TYPE *restrict a, unsigned int n) \
+ { \
+ for (unsigned int i = 0; i < n; ++i) \
+ a[i] += 1; \
+ }
#define TEST_ALL(T) \
T (int8_t) \
@@ -23,7 +23,7 @@ vec_while_##TYPE (TYPE *restrict a, unsigned int n) \
T (float) \
T (double)
-TEST_ALL (VEC_PERM)
+TEST_ALL (ADD_LOOP)
/* { dg-final { scan-assembler-not {\tuqdec} } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b, xzr,} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_3.c b/gcc/testsuite/gcc.target/aarch64/sve_while_3.c
index ace7ebc5a0f..026a8195238 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_while_3.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
-vec_while_##TYPE (TYPE *restrict a, long n) \
-{ \
- for (long i = 0; i < n; ++i) \
- a[i] += 1; \
-}
+#define ADD_LOOP(TYPE) \
+ TYPE __attribute__ ((noinline, noclone)) \
+ vec_while_##TYPE (TYPE *restrict a, int64_t n) \
+ { \
+ for (int64_t i = 0; i < n; ++i) \
+ a[i] += 1; \
+ }
#define TEST_ALL(T) \
T (int8_t) \
@@ -23,7 +23,7 @@ vec_while_##TYPE (TYPE *restrict a, long n) \
T (float) \
T (double)
-TEST_ALL (VEC_PERM)
+TEST_ALL (ADD_LOOP)
/* { dg-final { scan-assembler-not {\tuqdec} } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.b, xzr,} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_4.c b/gcc/testsuite/gcc.target/aarch64/sve_while_4.c
index 0717eac1ff6..d71b141b431 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_while_4.c
@@ -3,13 +3,13 @@
#include <stdint.h>
-#define VEC_PERM(TYPE) \
-TYPE __attribute__ ((weak)) \
-vec_while_##TYPE (TYPE *restrict a, unsigned long n) \
-{ \
- for (unsigned long i = 0; i < n; ++i) \
- a[i] += 1; \
-}
+#define ADD_LOOP(TYPE) \
+ TYPE __attribute__ ((noinline, noclone)) \
+ vec_while_##TYPE (TYPE *restrict a, uint64_t n) \
+ { \
+ for (uint64_t i = 0; i < n; ++i) \
+ a[i] += 1; \
+ }
#define TEST_ALL(T) \
T (int8_t) \
@@ -23,7 +23,7 @@ vec_while_##TYPE (TYPE *restrict a, unsigned long n) \
T (float) \
T (double)
-TEST_ALL (VEC_PERM)
+TEST_ALL (ADD_LOOP)
/* { dg-final { scan-assembler-times {\tuqdec} 2 } } */
/* { dg-final { scan-assembler-times {\tuqdecb\tx[0-9]+} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_1.c b/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_1.c
deleted file mode 100644
index ead821b43ca..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_1.c
+++ /dev/null
@@ -1,16 +0,0 @@
-/* { dg-do assemble } */
-/* { dg-options "-O3 -march=armv8-a+sve --save-temps" } */
-
-int
-loop (short b)
-{
- int c = 0;
-l1:
- b++;
- c |= b;
- if (b)
- goto l1;
- return c;
-}
-
-/* { dg-final { scan-assembler-times {\tadd\tx[0-9], x[0-9], 1\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_2.c b/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_2.c
deleted file mode 100644
index 1a3502a0f94..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_2.c
+++ /dev/null
@@ -1,16 +0,0 @@
-/* { dg-do assemble } */
-/* { dg-options "-O3 -march=armv8-a+sve --save-temps" } */
-
-int
-loop (short b)
-{
- int c = 0;
-l1:
- b++;
- c |= b;
- if (b < 32767)
- goto l1;
-return c;
-}
-
-/* { dg-final { scan-assembler-times {\tadd\tx[0-9], x[0-9], 1\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_3.c b/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_3.c
deleted file mode 100644
index 125fc31a464..00000000000
--- a/gcc/testsuite/gcc.target/aarch64/sve_while_maxiter_3.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/* { dg-do assemble } */
-/* { dg-options "-O3 -march=armv8-a+sve --save-temps" } */
-
-int
-loop (short b)
-{
- int c = 0;
-l1:
- b++;
- c |= b;
- if (b < 32766)
- goto l1;
-return c;
-}
-
-/* { dg-final { scan-assembler-not {\tmov\tx[0-9], 65536\n} } } */
-/* { dg-final { scan-assembler-not {\tcmp\tx[0-9], 0\n} } } */
-/* { dg-final { scan-assembler-not {\tcsel\tx[0-9], x[0-9], x[0-9], ne\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve_zip1_1.c b/gcc/testsuite/gcc.target/aarch64/sve_zip1_1.c
index 918313f62bd..c84b88a2e70 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve_zip1_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve_zip1_1.c
@@ -7,13 +7,13 @@
#include <stdint.h>
-typedef int64_t v4di __attribute__((vector_size (32)));
-typedef int32_t v8si __attribute__((vector_size (32)));
-typedef int16_t v16hi __attribute__((vector_size (32)));
-typedef int8_t v32qi __attribute__((vector_size (32)));
-typedef double v4df __attribute__((vector_size (32)));
-typedef float v8sf __attribute__((vector_size (32)));
-typedef _Float16 v16hf __attribute__((vector_size (32)));
+typedef int64_t vnx2di __attribute__((vector_size (32)));
+typedef int32_t vnx4si __attribute__((vector_size (32)));
+typedef int16_t vnx8hi __attribute__((vector_size (32)));
+typedef int8_t vnx16qi __attribute__((vector_size (32)));
+typedef double vnx2df __attribute__((vector_size (32)));
+typedef float vnx4sf __attribute__((vector_size (32)));
+typedef _Float16 vnx8hf __attribute__((vector_size (32)));
#define MASK_2(X, Y) X, Y + X
#define MASK_4(X, Y) MASK_2 (X, Y), MASK_2 (X + 1, Y)
@@ -21,10 +21,10 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
#define MASK_16(X, Y) MASK_8 (X, Y), MASK_8 (X + 4, Y)
#define MASK_32(X, Y) MASK_16 (X, Y), MASK_16 (X + 8, Y)
-#define INDEX_4 v4di
-#define INDEX_8 v8si
-#define INDEX_16 v16hi
-#define INDEX_32 v32qi
+#define INDEX_4 vnx2di
+#define INDEX_8 vnx4si
+#define INDEX_16 vnx8hi
+#define INDEX_32 vnx16qi
#define PERMUTE(TYPE, NUNITS) \
TYPE permute_##TYPE (TYPE values1, TYPE values2) \
@@ -36,13 +36,13 @@ typedef _Float16 v16hf __attribute__((vector_size (32)));
}
#define TEST_ALL(T) \
- T (v4di, 4) \
- T (v8si, 8) \
- T (v16hi, 16) \
- T (v32qi, 32) \
- T (v4df, 4) \
- T (v8sf, 8) \
- T (v16hf, 16)
+ T (vnx2di, 4) \
+ T (vnx4si, 8) \
+ T (vnx8hi, 16) \
+ T (vnx16qi, 32) \
+ T (vnx2df, 4) \
+ T (vnx4sf, 8) \
+ T (vnx8hf, 16)
TEST_ALL (PERMUTE)
diff --git a/gcc/testsuite/gcc.target/aarch64/vector_initialization_nostack.c b/gcc/testsuite/gcc.target/aarch64/vector_initialization_nostack.c
index c7c15ee5c4a..aecf8262706 100644
--- a/gcc/testsuite/gcc.target/aarch64/vector_initialization_nostack.c
+++ b/gcc/testsuite/gcc.target/aarch64/vector_initialization_nostack.c
@@ -49,4 +49,6 @@ f12 (void)
return sum;
}
-/* { dg-final { scan-assembler-not "sp" } } */
+/* Fails for fixed-length SVE because we lack a vec_init pattern.
+ A later patch fixes this in generic code. */
+/* { dg-final { scan-assembler-not "sp" { xfail { aarch64_sve && { ! vect_variable_length } } } } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c
index a6c1386c06e..2911da3a72d 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c
@@ -1,46 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned int b:5;
- unsigned int c:11, :0, d:8;
- struct { unsigned int ee:2; } e;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-extern void foo (test_st st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
- r.values.v4 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-4.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c
index d51ce2d42c0..376e92b23fa 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c
@@ -1,42 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned short b :5;
- unsigned char c;
- unsigned short d :11;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-5.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #8191" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c
index 77e9104b546..9845b6054c1 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c
@@ -1,51 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned int b : 3;
- unsigned int c : 14;
- unsigned int d : 1;
- struct {
- unsigned int ee : 2;
- unsigned short ff : 15;
- } e;
- unsigned char g : 1;
- unsigned char : 4;
- unsigned char h : 3;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
- r.values.v4 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-6.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c
index 3d8941bbfee..2ea52dfe655 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c
@@ -1,43 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned short b :5;
- unsigned char c;
- unsigned short d :11;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
-
+#include "../bitfield-7.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #8191" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c
index 9ffbb718d34..9bc32b83d74 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c
@@ -1,45 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned int :0;
- unsigned int b :1;
- unsigned short :0;
- unsigned short c;
- unsigned int :0;
- unsigned int d :21;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-8.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movs\tr4, #255" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c
index 8a614182923..f6c15338d00 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c
@@ -1,48 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- char a:3;
-} test_st3;
-
-typedef struct
-{
- char a:3;
-} test_st2;
-
-typedef struct
-{
- test_st2 st2;
- test_st3 st3;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-9.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #1799" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c
deleted file mode 100644
index 642f4e0346b..00000000000
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mcmse" } */
-
-typedef struct
-{
- unsigned short a :11;
-} test_st_4;
-
-typedef union
-{
- char a;
- test_st_4 st4;
-}test_un_2;
-
-typedef struct
-{
- unsigned char a;
- unsigned int :0;
- unsigned int b :1;
- unsigned short :0;
- unsigned short c;
- unsigned int :0;
- unsigned int d :21;
-} test_st_3;
-
-typedef struct
-{
- unsigned char a :3;
- unsigned int b :13;
- test_un_2 un2;
-} test_st_2;
-
-typedef union
-{
- test_st_2 st2;
- test_st_3 st3;
-}test_un_1;
-
-typedef struct
-{
- unsigned char a :2;
- unsigned char :0;
- unsigned short b :5;
- unsigned char :0;
- unsigned char c :4;
- test_un_1 un1;
-} test_st_1;
-
-typedef union
-{
- test_st_1 st1;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st_1;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st_1);
-
-int
-main (void)
-{
- read_st_1 r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
- r.values.v4 = 0xFFFFFFFF;
-
- f (r.st1);
- return 0;
-}
-
-/* { dg-final { scan-assembler "mov\tip, r4" } } */
-/* { dg-final { scan-assembler "movw\tr4, #7939" } } */
-/* { dg-final { scan-assembler "movt\tr4, 15" } } */
-/* { dg-final { scan-assembler "ands\tr0, r4" } } */
-/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
-/* { dg-final { scan-assembler "movt\tr4, 2047" } } */
-/* { dg-final { scan-assembler "ands\tr1, r4" } } */
-/* { dg-final { scan-assembler "movs\tr4, #1" } } */
-/* { dg-final { scan-assembler "movt\tr4, 65535" } } */
-/* { dg-final { scan-assembler "ands\tr2, r4" } } */
-/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
-/* { dg-final { scan-assembler "movt\tr4, 31" } } */
-/* { dg-final { scan-assembler "ands\tr3, r4" } } */
-/* { dg-final { scan-assembler "mov\tr4, ip" } } */
-/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union.c
new file mode 100644
index 00000000000..31249489e89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+#include "../bitfield-and-union.x"
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #7939" } } */
+/* { dg-final { scan-assembler "movt\tr4, 15" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 2047" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #1" } } */
+/* { dg-final { scan-assembler "movt\tr4, 65535" } } */
+/* { dg-final { scan-assembler "ands\tr2, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 31" } } */
+/* { dg-final { scan-assembler "ands\tr3, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c
index 3007409ad88..795544fe11d 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
/* { dg-require-effective-target arm_arch_v8m_base_ok } */
/* { dg-add-options arm_arch_v8m_base } */
-/* { dg-options "-mcmse" } */
int __attribute__ ((cmse_nonsecure_call)) (*bar) (int);
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c
index f2b931be591..7208a2cedd2 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c
@@ -1,15 +1,9 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
/* { dg-require-effective-target arm_arch_v8m_base_ok } */
/* { dg-add-options arm_arch_v8m_base } */
-/* { dg-options "-mcmse" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
-int
-foo (int a)
-{
- return bar (1.0f, 2.0) + a + 1;
-}
+#include "../cmse-13.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c
index 814502d4e5d..fec7dc10484 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
/* { dg-require-effective-target arm_arch_v8m_base_ok } */
/* { dg-add-options arm_arch_v8m_base } */
-/* { dg-options "-mcmse" } */
extern float bar (void);
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c
index 95da045690a..43d45e7a63e 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
/* { dg-require-effective-target arm_arch_v8m_base_ok } */
/* { dg-add-options arm_arch_v8m_base } */
-/* { dg-options "-mcmse" } */
int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c
index 0069fcdaebf..ca76e12cd92 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp" } */
/* { dg-require-effective-target arm_arch_v8m_base_ok } */
/* { dg-add-options arm_arch_v8m_base } */
-/* { dg-options "-mcmse -mfloat-abi=softfp" } */
double __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c
index ff18e839b02..afd5b98509c 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c
@@ -1,60 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a :2;
- unsigned char :0;
- unsigned short b :5;
- unsigned char :0;
- unsigned short c :3;
- unsigned char :0;
- unsigned int d :9;
-} test_st_1;
-
-typedef struct
-{
- unsigned short a :7;
- unsigned char :0;
- unsigned char b :1;
- unsigned char :0;
- unsigned short c :6;
-} test_st_2;
-
-typedef union
-{
- test_st_1 st_1;
- test_st_2 st_2;
-}test_un;
-
-typedef union
-{
- test_un un;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_un;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
-
-int
-main (void)
-{
- read_un r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
-
- f (r.un);
- return 0;
-}
+#include "../union-1.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #8063" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c
index b2e024b7f07..6e60f2a7628 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c
@@ -1,73 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a :2;
- unsigned char :0;
- unsigned short b :5;
- unsigned char :0;
- unsigned short c :3;
- unsigned char :0;
- unsigned int d :9;
-} test_st_1;
-
-typedef struct
-{
- unsigned short a :7;
- unsigned char :0;
- unsigned char b :1;
- unsigned char :0;
- unsigned short c :6;
-} test_st_2;
-
-typedef struct
-{
- unsigned char a;
- unsigned int :0;
- unsigned int b :1;
- unsigned short :0;
- unsigned short c;
- unsigned int :0;
- unsigned int d :21;
-} test_st_3;
-
-typedef union
-{
- test_st_1 st_1;
- test_st_2 st_2;
- test_st_3 st_3;
-}test_un;
-
-typedef union
-{
- test_un un;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_un;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
-
-int
-main (void)
-{
- read_un r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
-
- f (r.un);
- return 0;
-}
+#include "../union-2.x"
/* { dg-final { scan-assembler "mov\tip, r4" } } */
/* { dg-final { scan-assembler "movw\tr4, #8191" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-4.x b/gcc/testsuite/gcc.target/arm/cmse/bitfield-4.x
new file mode 100644
index 00000000000..62e35cc3cb8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-4.x
@@ -0,0 +1,40 @@
+typedef struct
+{
+ unsigned char a;
+ unsigned int b:5;
+ unsigned int c:11, :0, d:8;
+ struct { unsigned int ee:2; } e;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+extern void foo (test_st st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-5.x b/gcc/testsuite/gcc.target/arm/cmse/bitfield-5.x
new file mode 100644
index 00000000000..de5649dda6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-5.x
@@ -0,0 +1,36 @@
+typedef struct
+{
+ unsigned char a;
+ unsigned short b :5;
+ unsigned char c;
+ unsigned short d :11;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-6.x b/gcc/testsuite/gcc.target/arm/cmse/bitfield-6.x
new file mode 100644
index 00000000000..693a8ae0abb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-6.x
@@ -0,0 +1,45 @@
+typedef struct
+{
+ unsigned char a;
+ unsigned int b : 3;
+ unsigned int c : 14;
+ unsigned int d : 1;
+ struct {
+ unsigned int ee : 2;
+ unsigned short ff : 15;
+ } e;
+ unsigned char g : 1;
+ unsigned char : 4;
+ unsigned char h : 3;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-7.x b/gcc/testsuite/gcc.target/arm/cmse/bitfield-7.x
new file mode 100644
index 00000000000..de5649dda6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-7.x
@@ -0,0 +1,36 @@
+typedef struct
+{
+ unsigned char a;
+ unsigned short b :5;
+ unsigned char c;
+ unsigned short d :11;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-8.x b/gcc/testsuite/gcc.target/arm/cmse/bitfield-8.x
new file mode 100644
index 00000000000..654b21e94b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-8.x
@@ -0,0 +1,39 @@
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-9.x b/gcc/testsuite/gcc.target/arm/cmse/bitfield-9.x
new file mode 100644
index 00000000000..7543ac52696
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-9.x
@@ -0,0 +1,42 @@
+typedef struct
+{
+ char a:3;
+} test_st3;
+
+typedef struct
+{
+ char a:3;
+} test_st2;
+
+typedef struct
+{
+ test_st2 st2;
+ test_st3 st3;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c b/gcc/testsuite/gcc.target/arm/cmse/bitfield-and-union.x
index e139ba61af5..0a6eb3dd816 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-and-union.x
@@ -1,6 +1,3 @@
-/* { dg-do compile } */
-/* { dg-options "-mcmse" } */
-
typedef struct
{
unsigned short a :11;
@@ -76,19 +73,3 @@ main (void)
f (r.st1);
return 0;
}
-
-/* { dg-final { scan-assembler "movw\tip, #7939" } } */
-/* { dg-final { scan-assembler "movt\tip, 15" } } */
-/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
-/* { dg-final { scan-assembler "movw\tip, #65535" } } */
-/* { dg-final { scan-assembler "movt\tip, 2047" } } */
-/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
-/* { dg-final { scan-assembler "mov\tip, #1" } } */
-/* { dg-final { scan-assembler "movt\tip, 65535" } } */
-/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
-/* { dg-final { scan-assembler "movw\tip, #65535" } } */
-/* { dg-final { scan-assembler "movt\tip, 31" } } */
-/* { dg-final { scan-assembler "and\tr3, r3, ip" } } */
-/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-13.x b/gcc/testsuite/gcc.target/arm/cmse/cmse-13.x
new file mode 100644
index 00000000000..cdcd5ba6cf6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-13.x
@@ -0,0 +1,7 @@
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (3.0f, 2.0) + a + 1;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-5.x b/gcc/testsuite/gcc.target/arm/cmse/cmse-5.x
new file mode 100644
index 00000000000..7b03819d6b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-5.x
@@ -0,0 +1,7 @@
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-7.x b/gcc/testsuite/gcc.target/arm/cmse/cmse-7.x
new file mode 100644
index 00000000000..3fa372af237
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-7.x
@@ -0,0 +1,7 @@
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-8.x b/gcc/testsuite/gcc.target/arm/cmse/cmse-8.x
new file mode 100644
index 00000000000..7e1479542ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-8.x
@@ -0,0 +1,7 @@
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c
index c3b1396d52e..55da2a0c622 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c
@@ -1,46 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned int b:5;
- unsigned int c:11, :0, d:8;
- struct { unsigned int ee:2; } e;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-extern void foo (test_st st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
- r.values.v4 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-4.x"
/* { dg-final { scan-assembler "movw\tip, #65535" } } */
/* { dg-final { scan-assembler "movt\tip, 255" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c
index 0d029044aa9..383363233e6 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c
@@ -1,42 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned short b :5;
- unsigned char c;
- unsigned short d :11;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-5.x"
/* { dg-final { scan-assembler "movw\tip, #8191" } } */
/* { dg-final { scan-assembler "movt\tip, 255" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c
index 005515ab9cb..03c294ea323 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c
@@ -1,51 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned int b : 3;
- unsigned int c : 14;
- unsigned int d : 1;
- struct {
- unsigned int ee : 2;
- unsigned short ff : 15;
- } e;
- unsigned char g : 1;
- unsigned char : 4;
- unsigned char h : 3;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
- r.values.v4 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-6.x"
/* { dg-final { scan-assembler "movw\tip, #65535" } } */
/* { dg-final { scan-assembler "movt\tip, 1023" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c
index 6dd218e62fd..7692a69b159 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c
@@ -1,43 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned short b :5;
- unsigned char c;
- unsigned short d :11;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
-
+#include "../bitfield-7.x"
/* { dg-final { scan-assembler "movw\tip, #8191" } } */
/* { dg-final { scan-assembler "movt\tip, 255" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c
index c833bcb0ae9..a0a488775fe 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c
@@ -1,45 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a;
- unsigned int :0;
- unsigned int b :1;
- unsigned short :0;
- unsigned short c;
- unsigned int :0;
- unsigned int d :21;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-8.x"
/* { dg-final { scan-assembler "mov\tip, #255" } } */
/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c
index d6e4cdb8c44..8bfeeb0bbf6 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c
@@ -1,48 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- char a:3;
-} test_st3;
-
-typedef struct
-{
- char a:3;
-} test_st2;
-
-typedef struct
-{
- test_st2 st2;
- test_st3 st3;
-} test_st;
-
-typedef union
-{
- test_st st;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_st;
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
-
-int
-main (void)
-{
- read_st r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
-
- f (r.st);
- return 0;
-}
+#include "../bitfield-9.x"
/* { dg-final { scan-assembler "movw\tip, #1799" } } */
/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union.c
new file mode 100644
index 00000000000..aac5ae1a052
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+#include "../bitfield-and-union.x"
+
+/* { dg-final { scan-assembler "movw\tip, #7939" } } */
+/* { dg-final { scan-assembler "movt\tip, 15" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 2047" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #1" } } */
+/* { dg-final { scan-assembler "movt\tip, 65535" } } */
+/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 31" } } */
+/* { dg-final { scan-assembler "and\tr3, r3, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c
index d90ad811fc1..6f4d6b4b755 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c
@@ -1,18 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
-
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
-int
-foo (int a)
-{
- return bar (3.0f, 2.0) + a + 1;
-}
+#include "../../cmse-13.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c
index 88dec276281..0ae2a51990b 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c
@@ -1,17 +1,12 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
-extern float bar (void);
+#include "../../cmse-5.x"
-float __attribute__ ((cmse_nonsecure_entry))
-foo (void)
-{
- return bar ();
-}
/* { dg-final { scan-assembler "mov\tr0, lr" } } */
/* { dg-final { scan-assembler "mov\tr1, lr" } } */
/* { dg-final { scan-assembler "mov\tr2, lr" } } */
@@ -32,8 +27,8 @@ foo (void)
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
/* { dg-final { scan-assembler "push\t{r4}" } } */
/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c
index c047cd51c94..141ba73484c 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
-int
-foo (int a)
-{
- return bar () + a + 1;
-}
+#include "../../cmse-7.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c
index 20d2d4a8fb1..6c5e688f220 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
-int
-foo (int a)
-{
- return bar (2.0) + a + 1;
-}
+#include "../../cmse-8.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c
index 0af586a7fd1..d35321bfda8 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c
@@ -1,18 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
-
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
-int
-foo (int a)
-{
- return bar (3.0f, 2.0) + a + 1;
-}
+#include "../../cmse-13.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c
index 29f60baf521..955f749cb72 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c
@@ -1,17 +1,12 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
-extern float bar (void);
+#include "../../cmse-5.x"
-float __attribute__ ((cmse_nonsecure_entry))
-foo (void)
-{
- return bar ();
-}
/* { dg-final { scan-assembler "mov\tr0, lr" } } */
/* { dg-final { scan-assembler "mov\tr1, lr" } } */
/* { dg-final { scan-assembler "mov\tr2, lr" } } */
@@ -25,8 +20,8 @@ foo (void)
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
/* { dg-final { scan-assembler "push\t{r4}" } } */
/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c
index a5c64fb06ed..858555b8d89 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
-int
-foo (int a)
-{
- return bar () + a + 1;
-}
+#include "../../cmse-7.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c
index 5e041b17b0e..f85d68a3eff 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
-int
-foo (int a)
-{
- return bar (2.0) + a + 1;
-}
+#include "../../cmse-8.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c
index dbbd262c890..11d44550de9 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c
@@ -1,16 +1,10 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
-/* { dg-options "-mcmse -mfloat-abi=soft" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
-int
-foo (int a)
-{
- return bar (1.0f, 2.0) + a + 1;
-}
+#include "../../cmse-13.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c
index a7229ea8eb2..dfd2fe6323a 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c
@@ -1,16 +1,10 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
-/* { dg-options "-mcmse -mfloat-abi=soft" } */
-
-extern float bar (void);
-float __attribute__ ((cmse_nonsecure_entry))
-foo (void)
-{
- return bar ();
-}
+#include "../../cmse-5.x"
/* { dg-final { scan-assembler "mov\tr1, lr" } } */
/* { dg-final { scan-assembler "mov\tr2, lr" } } */
@@ -18,7 +12,7 @@ foo (void)
/* { dg-final { scan-assembler "mov\tip, lr" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
/* { dg-final { scan-assembler "bxns" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c
index e33568400ef..76ca271278e 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c
@@ -1,16 +1,10 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
-/* { dg-options "-mcmse -mfloat-abi=soft" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
-int
-foo (int a)
-{
- return bar () + a + 1;
-}
+#include "../../cmse-7.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c
index 024a12e0a41..a917aa7778a 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c
@@ -1,16 +1,10 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
-/* { dg-options "-mcmse -mfloat-abi=soft" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
-int
-foo (int a)
-{
- return bar (2.0) + a + 1;
-}
+#include "../../cmse-8.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c
index 7734d77dc38..01e5d659fe2 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c
@@ -1,17 +1,12 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
-extern float bar (void);
+#include "../../cmse-5.x"
-float __attribute__ ((cmse_nonsecure_entry))
-foo (void)
-{
- return bar ();
-}
/* { dg-final { scan-assembler "__acle_se_foo:" } } */
/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
/* { dg-final { scan-assembler "mov\tr1, lr" } } */
@@ -33,8 +28,8 @@ foo (void)
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
/* { dg-final { scan-assembler "push\t{r4}" } } */
/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c
index fb195eb58d5..5d904786e41 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
-int
-foo (int a)
-{
- return bar () + a + 1;
-}
+#include "../../cmse-7.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c
index 22ed3f8af88..3feee43c423 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
-int
-foo (int a)
-{
- return bar (2.0) + a + 1;
-}
+#include "../../cmse-8.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c
index 9634065e7cb..4eb984f4479 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
-int
-foo (int a)
-{
- return bar (1.0f, 2.0) + a + 1;
-}
+#include "../../cmse-13.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c
index 6addaa1a4ed..4815a480f66 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c
@@ -1,17 +1,12 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
-extern float bar (void);
+#include "../../cmse-5.x"
-float __attribute__ ((cmse_nonsecure_entry))
-foo (void)
-{
- return bar ();
-}
/* { dg-final { scan-assembler "__acle_se_foo:" } } */
/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
/* { dg-final { scan-assembler "mov\tr1, lr" } } */
@@ -25,8 +20,8 @@ foo (void)
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
/* { dg-final { scan-assembler "push\t{r4}" } } */
/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c
index 04f8466cc11..5535c5514b1 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
-int
-foo (int a)
-{
- return bar () + a + 1;
-}
+#include "../../cmse-7.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c
index ffe94de8541..6663fc43f5f 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c
@@ -1,17 +1,11 @@
/* { dg-do compile } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
/* { dg-require-effective-target arm_arch_v8m_main_ok } */
/* { dg-add-options arm_arch_v8m_main } */
/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
-/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
-
-int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
-int
-foo (int a)
-{
- return bar (2.0) + a + 1;
-}
+#include "../../cmse-8.x"
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c
index 1fc846cd7a5..071955f206c 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c
@@ -1,60 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a :2;
- unsigned char :0;
- unsigned short b :5;
- unsigned char :0;
- unsigned short c :3;
- unsigned char :0;
- unsigned int d :9;
-} test_st_1;
-
-typedef struct
-{
- unsigned short a :7;
- unsigned char :0;
- unsigned char b :1;
- unsigned char :0;
- unsigned short c :6;
-} test_st_2;
-
-typedef union
-{
- test_st_1 st_1;
- test_st_2 st_2;
-}test_un;
-
-typedef union
-{
- test_un un;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_un;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
-
-int
-main (void)
-{
- read_un r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
-
- f (r.un);
- return 0;
-}
+#include "../union-1.x"
/* { dg-final { scan-assembler "movw\tip, #8063" } } */
/* { dg-final { scan-assembler "movt\tip, 63" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c
index 420d0f136ef..c7431930ff9 100644
--- a/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c
@@ -1,73 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-mcmse" } */
-typedef struct
-{
- unsigned char a :2;
- unsigned char :0;
- unsigned short b :5;
- unsigned char :0;
- unsigned short c :3;
- unsigned char :0;
- unsigned int d :9;
-} test_st_1;
-
-typedef struct
-{
- unsigned short a :7;
- unsigned char :0;
- unsigned char b :1;
- unsigned char :0;
- unsigned short c :6;
-} test_st_2;
-
-typedef struct
-{
- unsigned char a;
- unsigned int :0;
- unsigned int b :1;
- unsigned short :0;
- unsigned short c;
- unsigned int :0;
- unsigned int d :21;
-} test_st_3;
-
-typedef union
-{
- test_st_1 st_1;
- test_st_2 st_2;
- test_st_3 st_3;
-}test_un;
-
-typedef union
-{
- test_un un;
- struct
- {
- unsigned int v1;
- unsigned int v2;
- unsigned int v3;
- unsigned int v4;
- }values;
-} read_un;
-
-
-typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
-
-int
-main (void)
-{
- read_un r;
- foo_ns f;
-
- f = (foo_ns) 0x200000;
- r.values.v1 = 0xFFFFFFFF;
- r.values.v2 = 0xFFFFFFFF;
- r.values.v3 = 0xFFFFFFFF;
-
- f (r.un);
- return 0;
-}
+#include "../union-2.x"
/* { dg-final { scan-assembler "movw\tip, #8191" } } */
/* { dg-final { scan-assembler "movt\tip, 63" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/union-1.x b/gcc/testsuite/gcc.target/arm/cmse/union-1.x
new file mode 100644
index 00000000000..8fe95351495
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/union-1.x
@@ -0,0 +1,54 @@
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned short c :3;
+ unsigned char :0;
+ unsigned int d :9;
+} test_st_1;
+
+typedef struct
+{
+ unsigned short a :7;
+ unsigned char :0;
+ unsigned char b :1;
+ unsigned char :0;
+ unsigned short c :6;
+} test_st_2;
+
+typedef union
+{
+ test_st_1 st_1;
+ test_st_2 st_2;
+}test_un;
+
+typedef union
+{
+ test_un un;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_un;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
+
+int
+main (void)
+{
+ read_un r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.un);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/cmse/union-2.x b/gcc/testsuite/gcc.target/arm/cmse/union-2.x
new file mode 100644
index 00000000000..8a880e7cb5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/union-2.x
@@ -0,0 +1,67 @@
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned short c :3;
+ unsigned char :0;
+ unsigned int d :9;
+} test_st_1;
+
+typedef struct
+{
+ unsigned short a :7;
+ unsigned char :0;
+ unsigned char b :1;
+ unsigned char :0;
+ unsigned short c :6;
+} test_st_2;
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st_3;
+
+typedef union
+{
+ test_st_1 st_1;
+ test_st_2 st_2;
+ test_st_3 st_3;
+}test_un;
+
+typedef union
+{
+ test_un un;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_un;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
+
+int
+main (void)
+{
+ read_un r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+
+ f (r.un);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/copysign_softfloat_1.c b/gcc/testsuite/gcc.target/arm/copysign_softfloat_1.c
index 1260a6f8eeb..d79d014e27c 100644
--- a/gcc/testsuite/gcc.target/arm/copysign_softfloat_1.c
+++ b/gcc/testsuite/gcc.target/arm/copysign_softfloat_1.c
@@ -1,5 +1,6 @@
/* { dg-do run } */
/* { dg-require-effective-target arm_thumb2_ok } */
+/* { dg-require-effective-target arm_soft_ok } */
/* { dg-skip-if "skip override" { *-*-* } { "-mfloat-abi=softfp" "-mfloat-abi=hard" } { "" } } */
/* { dg-options "-O2 -mfloat-abi=soft --save-temps" } */
extern void abort (void);
diff --git a/gcc/testsuite/gcc.target/arm/lp1189445.c b/gcc/testsuite/gcc.target/arm/lp1189445.c
index 766748e5509..4866953558a 100644
--- a/gcc/testsuite/gcc.target/arm/lp1189445.c
+++ b/gcc/testsuite/gcc.target/arm/lp1189445.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
+/* { dg-options "-O3" } */
/* { dg-require-effective-target arm_neon } */
/* { dg-add-options arm_neon } */
-/* { dg-options "-O3" } */
int id;
int
diff --git a/gcc/testsuite/gcc.target/arm/pr54300.C b/gcc/testsuite/gcc.target/arm/pr54300.C
index eb1a74e36cf..9105e279b33 100644
--- a/gcc/testsuite/gcc.target/arm/pr54300.C
+++ b/gcc/testsuite/gcc.target/arm/pr54300.C
@@ -51,6 +51,7 @@ test(unsigned short *_Inp, int32_t *_Out,
vst1q_s32( _Out, c );
}
+int
main()
{
unsigned short a[4] = {1, 2, 3, 4};
@@ -58,4 +59,5 @@ main()
test(a, b, 1, 1, ~0);
if (b[0] != 1 || b[1] != 2 || b[2] != 3 || b[3] != 4)
abort();
+ return 0;
}
diff --git a/gcc/testsuite/gcc.target/arm/pr67989.C b/gcc/testsuite/gcc.target/arm/pr67989.C
index 0006924e24f..89d2530f3a6 100644
--- a/gcc/testsuite/gcc.target/arm/pr67989.C
+++ b/gcc/testsuite/gcc.target/arm/pr67989.C
@@ -2,7 +2,8 @@
/* { dg-options "-std=c++11 -O2" } */
/* { dg-require-effective-target arm_arch_v4t_ok } */
/* { dg-add-options arm_arch_v4t } */
-/* { dg-additional-options "-marm" } */
+/* { dg-additional-options "-marm -Wno-return-type" } */
+
/* Duplicate version of the test in g++.dg to be able to run this test only if
ARMv4t in ARM execution state can be targetted. Newer architecture don't
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
index 46238265ae6..97a899cfb5c 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni" } */
+/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mavx512bw" } */
/* { dg-add-options bind_pic_locally } */
#include <mm_malloc.h>
@@ -610,8 +610,12 @@
#define __builtin_ia32_vgf2p8affineinvqb_v16qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask(A, B, 1, D, E)
#define __builtin_ia32_vgf2p8affineinvqb_v32qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask(A, B, 1, D, E)
#define __builtin_ia32_vgf2p8affineinvqb_v64qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask(A, B, 1, D, E)
-
-
+#define __builtin_ia32_vgf2p8affineqb_v16qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v16qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v32qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v32qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v64qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v64qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v16qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v16qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v32qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v32qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v64qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v64qi_mask(A, B, 1, D, E)
#include <wmmintrin.h>
#include <immintrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx-2.c b/gcc/testsuite/gcc.target/i386/avx-2.c
index 0061d9cdd22..986fbd819e4 100644
--- a/gcc/testsuite/gcc.target/i386/avx-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -msse4a -maes -mpclmul" } */
+/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -msse4a -maes -mpclmul -mavx512bw" } */
/* { dg-add-options bind_pic_locally } */
#include <mm_malloc.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx512dq-pr82855.c b/gcc/testsuite/gcc.target/i386/avx512dq-pr82855.c
new file mode 100644
index 00000000000..563454c3578
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512dq-pr82855.c
@@ -0,0 +1,14 @@
+/* PR target/82855 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512vl -mavx512dq" } */
+/* { dg-final { scan-assembler {\mktestb\M} } } */
+
+#include <immintrin.h>
+
+int
+foo (const __m256i *ptr)
+{
+ __m256i v = _mm256_loadu_si256 (ptr);
+ __mmask8 m = _mm256_cmpeq_epi32_mask (v, _mm256_setzero_si256 ());
+ return 0 == m;
+}
diff --git a/gcc/testsuite/gcc.target/i386/avx512f-gf2p8affineqb-2.c b/gcc/testsuite/gcc.target/i386/avx512f-gf2p8affineqb-2.c
new file mode 100644
index 00000000000..807da2c972f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512f-gf2p8affineqb-2.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -mavx512f -mgfni -mavx512bw" } */
+/* { dg-require-effective-target avx512f } */
+/* { dg-require-effective-target gfni } */
+
+#define AVX512F
+
+#define GFNI
+#include "avx512f-helper.h"
+
+#define SIZE (AVX512F_LEN / 8)
+
+#include "avx512f-mask-type.h"
+#include <x86intrin.h>
+
+static void
+CALC (unsigned char *r, unsigned char *s1, unsigned char *s2, unsigned char imm)
+{
+ for (int a = 0; a < SIZE/8; a++)
+ {
+ for (int val = 0; val < 8; val++)
+ {
+ unsigned char result = 0;
+ for (int bit = 0; bit < 8; bit++)
+ {
+ unsigned char temp = s1[a*8 + val] & s2[a*8 + bit];
+ unsigned char parity = __popcntd(temp);
+ if (parity % 2)
+ result |= (1 << (8 - bit - 1));
+ }
+ r[a*8 + val] = result ^ imm;
+ }
+ }
+}
+
+void
+TEST (void)
+{
+ int i;
+ UNION_TYPE (AVX512F_LEN, i_b) res1, res2, res3, src1, src2;
+ MASK_TYPE mask = MASK_VALUE;
+ char res_ref[SIZE];
+ unsigned char imm = 0;
+
+ for (i = 0; i < SIZE; i++)
+ {
+ src1.a[i] = 1 + i;
+ src2.a[i] = 1;
+ }
+
+ for (i = 0; i < SIZE; i++)
+ {
+ res1.a[i] = DEFAULT_VALUE;
+ res2.a[i] = DEFAULT_VALUE;
+ res3.a[i] = DEFAULT_VALUE;
+ }
+
+ CALC (res_ref, src1.a, src2.a, imm);
+
+ res1.x = INTRINSIC (_gf2p8affine_epi64_epi8) (src1.x, src2.x, imm);
+ res2.x = INTRINSIC (_mask_gf2p8affine_epi64_epi8) (res2.x, mask, src1.x, src2.x, imm);
+ res3.x = INTRINSIC (_maskz_gf2p8affine_epi64_epi8) (mask, src1.x, src2.x, imm);
+ if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
+ abort ();
+
+ MASK_MERGE (i_b) (res_ref, mask, SIZE);
+ if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
+ abort ();
+
+ MASK_ZERO (i_b) (res_ref, mask, SIZE);
+ if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
+ abort ();
+
+}
diff --git a/gcc/testsuite/gcc.target/i386/avx512f-gf2p8mulb-2.c b/gcc/testsuite/gcc.target/i386/avx512f-gf2p8mulb-2.c
new file mode 100644
index 00000000000..08fc5b7b7b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512f-gf2p8mulb-2.c
@@ -0,0 +1,76 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -mavx512f -mgfni -mavx512bw" } */
+/* { dg-require-effective-target avx512f } */
+/* { dg-require-effective-target gfni } */
+
+#define AVX512F
+
+#define GFNI
+#include "avx512f-helper.h"
+
+#define SIZE (AVX512F_LEN / 8)
+
+#include "avx512f-mask-type.h"
+
+static void
+CALC (unsigned char *r, unsigned char *s1, unsigned char *s2)
+{
+ for (int i = 0; i < SIZE; i++)
+ {
+ unsigned short result = 0;
+ for (int bit = 0; bit < 8; bit++)
+ {
+ if ((s1[i] >> bit) & 1)
+ {
+ result ^= s2[i] << bit;
+ }
+ }
+ // Reduce result by x^8 + x^4 + x^3 + x + 1
+ for (int bit = 14; bit > 7; bit--)
+ {
+ unsigned short p = 0x11B << (bit - 8);
+ if ((result >> bit) & 1)
+ result ^= p;
+ }
+ r[i] = result;
+ }
+}
+
+void
+TEST (void)
+{
+ int i;
+ UNION_TYPE (AVX512F_LEN, i_b) res1, res2, res3, src1, src2;
+ MASK_TYPE mask = MASK_VALUE;
+ unsigned char res_ref[SIZE];
+
+ for (i = 0; i < SIZE; i++)
+ {
+ src1.a[i] = 1 + i;
+ src2.a[i] = 2 + 2*i;
+ }
+
+ for (i = 0; i < SIZE; i++)
+ {
+ res1.a[i] = DEFAULT_VALUE;
+ res2.a[i] = DEFAULT_VALUE;
+ res3.a[i] = DEFAULT_VALUE;
+ }
+
+ CALC (res_ref, src1.a, src2.a);
+
+ res1.x = INTRINSIC (_gf2p8mul_epi8) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_gf2p8mul_epi8) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_gf2p8mul_epi8) (mask, src1.x, src2.x);
+
+ if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
+ abort ();
+
+ MASK_MERGE (i_b) (res_ref, mask, SIZE);
+ if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
+ abort ();
+
+ MASK_ZERO (i_b) (res_ref, mask, SIZE);
+ if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
+ abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-gf2p8affineqb-2.c b/gcc/testsuite/gcc.target/i386/avx512vl-gf2p8affineqb-2.c
new file mode 100644
index 00000000000..1b650d07539
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512vl-gf2p8affineqb-2.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -mavx512bw -mavx512vl -mgfni" } */
+/* { dg-require-effective-target avx512vl } */
+/* { dg-require-effective-target avx512bw } */
+/* { dg-require-effective-target gfni } */
+
+#define AVX512VL
+#define AVX512F_LEN 256
+#define AVX512F_LEN_HALF 128
+#include "avx512f-gf2p8affineqb-2.c"
+
+#undef AVX512F_LEN
+#undef AVX512F_LEN_HALF
+
+#define AVX512F_LEN 128
+#define AVX512F_LEN_HALF 128
+#include "avx512f-gf2p8affineqb-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-gf2p8mulb-2.c b/gcc/testsuite/gcc.target/i386/avx512vl-gf2p8mulb-2.c
new file mode 100644
index 00000000000..8215247a714
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512vl-gf2p8mulb-2.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -mavx512bw -mavx512vl -mgfni" } */
+/* { dg-require-effective-target avx512vl } */
+/* { dg-require-effective-target avx512bw } */
+/* { dg-require-effective-target gfni } */
+
+#define AVX512VL
+#define AVX512F_LEN 256
+#define AVX512F_LEN_HALF 128
+#include "avx512f-gf2p8mulb-2.c"
+
+#undef AVX512F_LEN
+#undef AVX512F_LEN_HALF
+
+#define AVX512F_LEN 128
+#define AVX512F_LEN_HALF 128
+#include "avx512f-gf2p8mulb-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/force-indirect-call-1.c b/gcc/testsuite/gcc.target/i386/force-indirect-call-1.c
new file mode 100644
index 00000000000..6ecf598708a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/force-indirect-call-1.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mforce-indirect-call" } */
+/* { dg-final { scan-assembler-times "(?:call|jmp)\[ \\t\]+\\*%" 3 } } */
+
+int x;
+int y;
+
+void __attribute__((noinline)) f1(void)
+{
+ x++;
+}
+
+static __attribute__((noinline)) void f3(void)
+{
+ y++;
+}
+
+void f2()
+{
+ f1();
+ f3();
+ f1();
+}
diff --git a/gcc/testsuite/gcc.target/i386/force-indirect-call-2.c b/gcc/testsuite/gcc.target/i386/force-indirect-call-2.c
new file mode 100644
index 00000000000..2f702363041
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/force-indirect-call-2.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mforce-indirect-call -fPIC" } */
+/* { dg-require-effective-target fpic } */
+/* { dg-final { scan-assembler-times "(?:call|jmp)\[ \\t\]+\\*%" 3 } } */
+
+#include "force-indirect-call-1.c"
diff --git a/gcc/testsuite/gcc.target/i386/force-indirect-call-3.c b/gcc/testsuite/gcc.target/i386/force-indirect-call-3.c
new file mode 100644
index 00000000000..37bc01bf5c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/force-indirect-call-3.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mforce-indirect-call -mcmodel=medium" } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-final { scan-assembler-times "(?:call|jmp)\[ \\t\]+\\*%" 3 } } */
+
+#include "force-indirect-call-1.c"
diff --git a/gcc/testsuite/gcc.target/i386/gfni-1.c b/gcc/testsuite/gcc.target/i386/gfni-1.c
index 5e22c9eae92..bf72ad041a2 100644
--- a/gcc/testsuite/gcc.target/i386/gfni-1.c
+++ b/gcc/testsuite/gcc.target/i386/gfni-1.c
@@ -3,6 +3,12 @@
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%zmm\[0-9\]+\[^\\n\\r]*%zmm\[0-9\]+\[^\\n\\r\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%zmm\[0-9\]+\[^\\n\\r]*%zmm\[0-9\]+\[^\\n\\r\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%zmm\[0-9\]+\[^\\n\\r]*%zmm\[0-9\]+\[^\\n\\r\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%zmm\[0-9\]+\[^\\n\\r]*%zmm\[0-9\]+\[^\\n\\r\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\\n\\r]*%zmm\[0-9\]+\[^\\n\\r\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\\n\\r]*%zmm\[0-9\]+\[^\\n\\r\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <x86intrin.h>
@@ -15,4 +21,10 @@ avx512vl_test (void)
x1 = _mm512_gf2p8affineinv_epi64_epi8(x1, x2, 3);
x1 = _mm512_mask_gf2p8affineinv_epi64_epi8(x1, m64, x2, x1, 3);
x1 = _mm512_maskz_gf2p8affineinv_epi64_epi8(m64, x1, x2, 3);
+ x1 = _mm512_gf2p8affine_epi64_epi8(x1, x2, 3);
+ x1 = _mm512_mask_gf2p8affine_epi64_epi8(x1, m64, x2, x1, 3);
+ x1 = _mm512_maskz_gf2p8affine_epi64_epi8(m64, x1, x2, 3);
+ x1 = _mm512_gf2p8mul_epi8(x1, x2);
+ x1 = _mm512_mask_gf2p8mul_epi8(x1, m64, x2, x1);
+ x1 = _mm512_maskz_gf2p8mul_epi8(m64, x1, x2);
}
diff --git a/gcc/testsuite/gcc.target/i386/gfni-2.c b/gcc/testsuite/gcc.target/i386/gfni-2.c
index 4d1f151aa40..413cb64c6b2 100644
--- a/gcc/testsuite/gcc.target/i386/gfni-2.c
+++ b/gcc/testsuite/gcc.target/i386/gfni-2.c
@@ -6,6 +6,18 @@
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\\n\\r]*%xmm\[0-9\]+\[^\\n\\r\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\\n\\r]*%xmm\[0-9\]+\[^\\n\\r\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%ymm\[0-9\]+\[^\\n\\r]*%ymm\[0-9\]+\[^\\n\\r\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%ymm\[0-9\]+\[^\\n\\r]*%ymm\[0-9\]+\[^\\n\\r\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\\n\\r]*%xmm\[0-9\]+\[^\\n\\r\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\\n\\r]*%xmm\[0-9\]+\[^\\n\\r\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\\n\\r]*%ymm\[0-9\]+\[^\\n\\r\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\\n\\r]*%ymm\[0-9\]+\[^\\n\\r\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\\n\\r]*%xmm\[0-9\]+\[^\\n\\r\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\\n\\r]*%xmm\[0-9\]+\[^\\n\\r\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <x86intrin.h>
@@ -24,4 +36,16 @@ avx512vl_test (void)
x5 = _mm_gf2p8affineinv_epi64_epi8(x5, x6, 3);
x5 = _mm_mask_gf2p8affineinv_epi64_epi8(x5, m16, x6, x5, 3);
x5 = _mm_maskz_gf2p8affineinv_epi64_epi8(m16, x5, x6, 3);
+ x3 = _mm256_gf2p8affine_epi64_epi8(x3, x4, 3);
+ x3 = _mm256_mask_gf2p8affine_epi64_epi8(x3, m32, x4, x3, 3);
+ x3 = _mm256_maskz_gf2p8affine_epi64_epi8(m32, x3, x4, 3);
+ x5 = _mm_gf2p8affine_epi64_epi8(x5, x6, 3);
+ x5 = _mm_mask_gf2p8affine_epi64_epi8(x5, m16, x6, x5, 3);
+ x5 = _mm_maskz_gf2p8affine_epi64_epi8(m16, x5, x6, 3);
+ x3 = _mm256_gf2p8mul_epi8(x3, x4);
+ x3 = _mm256_mask_gf2p8mul_epi8(x3, m32, x4, x3);
+ x3 = _mm256_maskz_gf2p8mul_epi8(m32, x3, x4);
+ x5 = _mm_gf2p8mul_epi8(x5, x6);
+ x5 = _mm_mask_gf2p8mul_epi8(x5, m16, x6, x5);
+ x5 = _mm_maskz_gf2p8mul_epi8(m16, x5, x6);
}
diff --git a/gcc/testsuite/gcc.target/i386/gfni-3.c b/gcc/testsuite/gcc.target/i386/gfni-3.c
index de5f80b1124..2beedc8abb3 100644
--- a/gcc/testsuite/gcc.target/i386/gfni-3.c
+++ b/gcc/testsuite/gcc.target/i386/gfni-3.c
@@ -2,6 +2,10 @@
/* { dg-options "-mgfni -mavx -O2" } */
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vgf2p8mulb\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
#include <x86intrin.h>
@@ -14,4 +18,8 @@ avx512vl_test (void)
{
x3 = _mm256_gf2p8affineinv_epi64_epi8(x3, x4, 3);
x5 = _mm_gf2p8affineinv_epi64_epi8(x5, x6, 3);
+ x3 = _mm256_gf2p8affine_epi64_epi8(x3, x4, 3);
+ x5 = _mm_gf2p8affine_epi64_epi8(x5, x6, 3);
+ x3 = _mm256_gf2p8mul_epi8(x3, x4);
+ x5 = _mm_gf2p8mul_epi8(x5, x6);
}
diff --git a/gcc/testsuite/gcc.target/i386/gfni-4.c b/gcc/testsuite/gcc.target/i386/gfni-4.c
index 1532716191e..e0750054b82 100644
--- a/gcc/testsuite/gcc.target/i386/gfni-4.c
+++ b/gcc/testsuite/gcc.target/i386/gfni-4.c
@@ -1,6 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-mgfni -O2" } */
+/* { dg-options "-mgfni -O2 -msse" } */
/* { dg-final { scan-assembler-times "gf2p8affineinvqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "gf2p8affineqb\[ \\t\]+\[^\{\n\]*\\\$3\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "gf2p8mulb\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
#include <x86intrin.h>
@@ -11,4 +13,6 @@ void extern
avx512vl_test (void)
{
x5 = _mm_gf2p8affineinv_epi64_epi8(x5, x6, 3);
+ x5 = _mm_gf2p8affine_epi64_epi8(x5, x6, 3);
+ x5 = _mm_gf2p8mul_epi8(x5, x6);
}
diff --git a/gcc/testsuite/gcc.target/i386/pr80425-3.c b/gcc/testsuite/gcc.target/i386/pr80425-3.c
new file mode 100644
index 00000000000..1bf80b17b1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr80425-3.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512f" } */
+
+#include <x86intrin.h>
+
+extern int a;
+
+__m512i
+f1 (__m512i x)
+{
+ return _mm512_srai_epi32 (x, a);
+}
+
+/* { dg-final { scan-assembler-times "movd\[ \\t\]+\[^\n\]*%xmm" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr81706.c b/gcc/testsuite/gcc.target/i386/pr81706.c
index 333fd159770..b4b7c781b70 100644
--- a/gcc/testsuite/gcc.target/i386/pr81706.c
+++ b/gcc/testsuite/gcc.target/i386/pr81706.c
@@ -1,8 +1,8 @@
/* PR libstdc++/81706 */
/* { dg-do compile } */
/* { dg-options "-O3 -mavx2 -mno-avx512f" } */
-/* { dg-final { scan-assembler "call\[^\n\r]_ZGVdN4v_cos" } } */
-/* { dg-final { scan-assembler "call\[^\n\r]_ZGVdN4v_sin" } } */
+/* { dg-final { scan-assembler "call\[^\n\r]__?ZGVdN4v_cos" } } */
+/* { dg-final { scan-assembler "call\[^\n\r]__?ZGVdN4v_sin" } } */
#ifdef __cplusplus
extern "C" {
diff --git a/gcc/testsuite/gcc.target/i386/pr82002-2a.c b/gcc/testsuite/gcc.target/i386/pr82002-2a.c
index bc85080ba8e..c31440debe2 100644
--- a/gcc/testsuite/gcc.target/i386/pr82002-2a.c
+++ b/gcc/testsuite/gcc.target/i386/pr82002-2a.c
@@ -1,7 +1,5 @@
/* { dg-do compile { target lp64 } } */
/* { dg-options "-Ofast -mstackrealign -mabi=ms" } */
-/* { dg-xfail-if "" { *-*-* } } */
-/* { dg-xfail-run-if "" { *-*-* } } */
void __attribute__((sysv_abi)) a (char *);
void
diff --git a/gcc/testsuite/gcc.target/i386/pr82002-2b.c b/gcc/testsuite/gcc.target/i386/pr82002-2b.c
index 10e44cd7b1d..939e069517d 100644
--- a/gcc/testsuite/gcc.target/i386/pr82002-2b.c
+++ b/gcc/testsuite/gcc.target/i386/pr82002-2b.c
@@ -1,7 +1,5 @@
/* { dg-do compile { target lp64 } } */
/* { dg-options "-Ofast -mstackrealign -mabi=ms -mcall-ms2sysv-xlogues" } */
-/* { dg-xfail-if "" { *-*-* } } */
-/* { dg-xfail-run-if "" { *-*-* } } */
void __attribute__((sysv_abi)) a (char *);
void
diff --git a/gcc/testsuite/gcc.target/i386/pr82941-1.c b/gcc/testsuite/gcc.target/i386/pr82941-1.c
new file mode 100644
index 00000000000..d7e530d5116
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82941-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=skylake-avx512" } */
+
+#include <immintrin.h>
+
+extern __m512d y, z;
+
+void
+pr82941 ()
+{
+ z = y;
+}
+
+/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82941-2.c b/gcc/testsuite/gcc.target/i386/pr82941-2.c
new file mode 100644
index 00000000000..db2f8589ab6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82941-2.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=knl" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-not "vzeroupper" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82942-1.c b/gcc/testsuite/gcc.target/i386/pr82942-1.c
new file mode 100644
index 00000000000..9cdf81a9d60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82942-1.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512f -mno-avx512er -O2" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82942-2.c b/gcc/testsuite/gcc.target/i386/pr82942-2.c
new file mode 100644
index 00000000000..ddb4e689659
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82942-2.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512f -mavx512er -mtune=knl -O2" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-not "vzeroupper" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-1.c b/gcc/testsuite/gcc.target/i386/pr82990-1.c
new file mode 100644
index 00000000000..ff1d6d40eb2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=knl -mvzeroupper" } */
+
+#include <immintrin.h>
+
+extern __m512d y, z;
+
+void
+pr82941 ()
+{
+ z = y;
+}
+
+/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-2.c b/gcc/testsuite/gcc.target/i386/pr82990-2.c
new file mode 100644
index 00000000000..0d3cb2333dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-2.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=skylake-avx512 -mno-vzeroupper" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-not "vzeroupper" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-3.c b/gcc/testsuite/gcc.target/i386/pr82990-3.c
new file mode 100644
index 00000000000..201fa98d8d4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-3.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512f -mavx512er -mvzeroupper -O2" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-4.c b/gcc/testsuite/gcc.target/i386/pr82990-4.c
new file mode 100644
index 00000000000..09f161c7291
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-4.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512f -mno-avx512er -mno-vzeroupper -O2" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-not "vzeroupper" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-5.c b/gcc/testsuite/gcc.target/i386/pr82990-5.c
new file mode 100644
index 00000000000..9932bdc5375
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-5.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512f -mtune=generic" } */
+
+#include <immintrin.h>
+
+extern __m512d y, z;
+
+void
+pr82941 ()
+{
+ z = y;
+}
+
+/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-6.c b/gcc/testsuite/gcc.target/i386/pr82990-6.c
new file mode 100644
index 00000000000..063a61c111d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-6.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=skylake-avx512 -mtune=knl" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-not "vzeroupper" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82990-7.c b/gcc/testsuite/gcc.target/i386/pr82990-7.c
new file mode 100644
index 00000000000..dedde8b854b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr82990-7.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=skylake-avx512 -mtune=generic -mtune-ctrl=^emit_vzeroupper" } */
+
+#include "pr82941-1.c"
+
+/* { dg-final { scan-assembler-not "vzeroupper" } } */
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
index c35ec9a47cb..9bdc73f0c57 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -627,5 +627,12 @@
#define __builtin_ia32_vgf2p8affineinvqb_v16qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask(A, B, 1, D, E)
#define __builtin_ia32_vgf2p8affineinvqb_v32qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask(A, B, 1, D, E)
#define __builtin_ia32_vgf2p8affineinvqb_v64qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v16qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v16qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v32qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v32qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v64qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v64qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v16qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v16qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v32qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v32qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v64qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v64qi_mask(A, B, 1, D, E)
+
#include <x86intrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index 388026f927a..fb2c35ab909 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -689,3 +689,6 @@ test_1 ( __bextri_u64, unsigned long long, unsigned long long, 1)
test_2 (_mm_gf2p8affineinv_epi64_epi8, __m128i, __m128i, __m128i, 1)
test_2 (_mm256_gf2p8affineinv_epi64_epi8, __m256i, __m256i, __m256i, 1)
test_2 (_mm512_gf2p8affineinv_epi64_epi8, __m512i, __m512i, __m512i, 1)
+test_2 (_mm_gf2p8affine_epi64_epi8, __m128i, __m128i, __m128i, 1)
+test_2 (_mm256_gf2p8affine_epi64_epi8, __m256i, __m256i, __m256i, 1)
+test_2 (_mm512_gf2p8affine_epi64_epi8, __m512i, __m512i, __m512i, 1)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
index 911258fa042..66c25c74add 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -626,6 +626,12 @@
#define __builtin_ia32_vgf2p8affineinvqb_v16qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask(A, B, 1, D, E)
#define __builtin_ia32_vgf2p8affineinvqb_v32qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask(A, B, 1, D, E)
#define __builtin_ia32_vgf2p8affineinvqb_v64qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v16qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v16qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v32qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v32qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v64qi(A, B, C) __builtin_ia32_vgf2p8affineqb_v64qi(A, B, 1)
+#define __builtin_ia32_vgf2p8affineqb_v16qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v16qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v32qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v32qi_mask(A, B, 1, D, E)
+#define __builtin_ia32_vgf2p8affineqb_v64qi_mask(A, B, C, D, E) __builtin_ia32_vgf2p8affineqb_v64qi_mask(A, B, 1, D, E)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni")
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-12.c b/gcc/testsuite/gcc.target/i386/stack-check-12.c
index cb69bb08086..980416946df 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-12.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-12.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fstack-clash-protection -mtune=generic" } */
+/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fomit-frame-pointer" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
__attribute__ ((noreturn)) void exit (int);
diff --git a/gcc/testsuite/gcc.target/mips/pr82981.c b/gcc/testsuite/gcc.target/mips/pr82981.c
new file mode 100644
index 00000000000..677e4cc01e9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/mips/pr82981.c
@@ -0,0 +1,13 @@
+/* PR target/82981 */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-march=mips64r6 -mabi=64 -mexplicit-relocs" } */
+
+unsigned long
+func (unsigned long a, unsigned long b)
+{
+ return a > (~0UL) / b;
+}
+
+/* { dg-final { scan-assembler-not "__multi3" } } */
+/* { dg-final { scan-assembler "\tdmuhu" } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/builtin-vec-sums-be-int.c b/gcc/testsuite/gcc.target/powerpc/builtin-vec-sums-be-int.c
new file mode 100644
index 00000000000..b4dfd0637e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/builtin-vec-sums-be-int.c
@@ -0,0 +1,16 @@
+/* Test for the __builtin_altivec_vsumsws_be() builtin.
+ It produces just the instruction vsumsws in LE and BE modes. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O2" } */
+
+#include <altivec.h>
+
+vector signed int
+test_vec_sums (vector signed int vsi2, vector signed int vsi3)
+{
+ return __builtin_altivec_vsumsws_be (vsi2, vsi3);
+}
+
+/* { dg-final { scan-assembler-times "vsumsws" 1 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/builtins-3-p9.c b/gcc/testsuite/gcc.target/powerpc/builtins-3-p9.c
index 46a31aeecf5..9dc53da58ad 100644
--- a/gcc/testsuite/gcc.target/powerpc/builtins-3-p9.c
+++ b/gcc/testsuite/gcc.target/powerpc/builtins-3-p9.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
@@ -53,19 +53,20 @@ test_vull_bperm_vull_vuc (vector unsigned long long x,
test_ne_short 1 vcmpneh
test_ne_int 1 vcmpnew
test_ne_long 1 vcmpequd, 1 xxlnor inst
- test_nabs_long_long 1 xxspltib, 1 vsubudm, 1 vminsd
test_neg_long_long 1 vnegd
test_vull_bperm_vull_vuc 1 vbpermd
-
+ test_nabs_long_long (-O0) 1 xxspltib, 1 vsubudm, 1 vminsd
+ test_nabs_long_long (-O1) 1 vnegd, vminsd
+*/
/* { dg-final { scan-assembler-times "vcmpneb" 1 } } */
/* { dg-final { scan-assembler-times "vcmpneh" 1 } } */
/* { dg-final { scan-assembler-times "vcmpnew" 1 } } */
/* { dg-final { scan-assembler-times "vcmpequd" 1 } } */
/* { dg-final { scan-assembler-times "xxlnor" 1 } } */
-/* { dg-final { scan-assembler-times "xxspltib" 1 } } */
-/* { dg-final { scan-assembler-times "vsubudm" 1 } } */
+/* { dg-final { scan-assembler-times "xxspltib" 0 } } */
+/* { dg-final { scan-assembler-times "vsubudm" 0 } } */
/* { dg-final { scan-assembler-times "vminsd" 1 } } */
-/* { dg-final { scan-assembler-times "vnegd" 1 } } */
+/* { dg-final { scan-assembler-times "vnegd" 2 } } */
/* { dg-final { scan-assembler-times "vbpermd" 1 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/builtins-6-p9-runnable.c b/gcc/testsuite/gcc.target/powerpc/builtins-6-p9-runnable.c
new file mode 100644
index 00000000000..9319a372748
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/builtins-6-p9-runnable.c
@@ -0,0 +1,1046 @@
+/* { dg-do run { target { powerpc*-*-* && p9vector_hw } } } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+/* { dg-options "-mcpu=power9 -O2" } */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <altivec.h> // vector
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+void abort (void);
+
+int main() {
+ vector signed char char_src1, char_src2;
+ vector unsigned char uchar_src1, uchar_src2;
+ vector signed short int short_src1, short_src2;
+ vector unsigned short int ushort_src1, ushort_src2;
+ vector signed int int_src1, int_src2;
+ vector unsigned int uint_src1, uint_src2;
+ unsigned int result, expected_result;
+
+ /* Tests for: vec_first_match_index() */
+ /* char */
+ char_src1 = (vector signed char) {-1, 2, 3, 4, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {-1, 2, 3, 20, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 0;
+
+ result = vec_first_match_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ char_src1 = (vector signed char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {-1, -2, -3, -4, -5, -6, -7, -8,
+ -9, -10, -11, -12, -13, -14, -15, -16};
+ expected_result = 16;
+
+ result = vec_first_match_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {0, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {1, 0, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 2;
+
+ result = vec_first_match_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17};
+ uchar_src2 = (vector unsigned char) {3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18};
+ expected_result = 16;
+
+ result = vec_first_match_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* short int */
+ short_src1 = (vector short int) {10, -20, -30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {-10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 3;
+
+ result = vec_first_match_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+
+ short_src1 = (vector short int) {10, 20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {0, 0, 0, 0, 0, 0, 0, 0};
+
+ expected_result = 8;
+
+ result = vec_first_match_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {0, 0, 0, 0, 0, 60, 70, 0};
+ ushort_src2 = (vector short unsigned int) {10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 5;
+
+ result = vec_first_match_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {-20, 30, -40, 50,
+ 60, -70, 80, -90};
+ ushort_src2 = (vector short unsigned int) {20, -30, 40, -50,
+ -60, 70, -80, 90};
+
+ expected_result = 8;
+
+ result = vec_first_match_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* int */
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {10, 20, 30, 4};
+
+ expected_result = 3;
+
+ result = vec_first_match_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {4, 3, 2, 1};
+
+ expected_result = 4;
+
+ result = vec_first_match_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {11, 2, 33, 4};
+
+ expected_result = 1;
+
+ result = vec_first_match_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {2, 3, 4, 5};
+
+ expected_result = 4;
+
+ result = vec_first_match_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* Tests for: vec_first_mismatch_index() */
+ /* char */
+ char_src1 = (vector signed char) {-1, 2, 3, 4, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {-1, 2, 3, 20, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 3;
+
+ result = vec_first_mismatch_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ char_src1 = (vector signed char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 16;
+
+ result = vec_first_mismatch_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {1, 0, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 1;
+
+ result = vec_first_mismatch_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 11, 12, 13, 14, 15, 16};
+ expected_result = 8;
+
+ result = vec_first_mismatch_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 16;
+
+ result = vec_first_mismatch_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* short int */
+ short_src1 = (vector short int) {-10, -20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {-10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {10, 20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 8;
+
+ result = vec_first_mismatch_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {10, 20, 30, 40, 50, 60, 70, 0};
+ ushort_src2 = (vector short unsigned int) {10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 7;
+
+ result = vec_first_mismatch_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {20, 30, 40, 50, 60, 70, 80, 90};
+ ushort_src2 = (vector short unsigned int) {20, 30, 40, 50, 60, 70, 80, 90};
+
+ expected_result = 8;
+
+ result = vec_first_mismatch_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* int */
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {1, 20, 3, 4};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {1, 2, 3, 4};
+
+ expected_result = 4;
+
+ result = vec_first_mismatch_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {1, 0, 3, 4};
+ int_src2 = (vector int) {1, 2, 3, 4};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {11, 2, 33, 4};
+
+ expected_result = 0;
+
+ result = vec_first_mismatch_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {1, 2, 3, 4};
+
+ expected_result = 4;
+
+ result = vec_first_mismatch_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* Tests for: vec_first_match_or_eos_index() */
+ /* char */
+ char_src1 = (vector signed char) {-1, 2, 3, 4, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {-1, 2, 3, 20, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 0;
+
+ result = vec_first_match_or_eos_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first match result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ char_src1 = (vector signed char) {-1, 2, 3, 0, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {2, 3, 20, 0, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 3;
+
+ result = vec_first_match_or_eos_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ char_src1 = (vector signed char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {-1, -2, -3, -4, -5, -6, -7, -8,
+ -9, -10, -11, -12, -13, -14, -15, -16};
+ expected_result = 16;
+
+ result = vec_first_match_or_eos_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+#endif
+
+ uchar_src1 = (vector unsigned char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {-1, 0, -3, -4, -5, -6, -7, -8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 1;
+
+ result = vec_first_match_or_eos_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17};
+ uchar_src2 = (vector unsigned char) {3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18};
+ expected_result = 16;
+
+ result = vec_first_match_or_eos_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* short int */
+ short_src1 = (vector short int) {10, -20, -30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {-10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 3;
+
+ result = vec_first_match_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {1, 20, 30, 40, 50, 60, 70, 80};
+
+ short_src2 = (vector short int) {10, 0, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 1;
+
+ result = vec_first_match_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {-10, -20, -30, -40, -50, -60, -70, -80};
+
+ short_src2 = (vector short int) {10, 20, 30, 40, 50, 0, 70, 80};
+
+ expected_result = 5;
+
+ result = vec_first_match_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {10, 20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {0, 0, 0, 0, 0, 0, 0, 0};
+
+ expected_result = 0;
+
+ result = vec_first_match_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {1, 2, 0, 0, 60, 70, 0};
+ ushort_src2 = (vector short unsigned int) {10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 2;
+
+ result = vec_first_match_or_eos_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {-20, 30, -40, 50,
+ 60, -70, 80, -90};
+ ushort_src2 = (vector short unsigned int) {20, -30, 40, -50,
+ -60, 70, -80, 90};
+
+ expected_result = 8;
+
+ result = vec_first_match_or_eos_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+ ushort_src1 = (vector short unsigned int) {-20, 30, -40, 50,
+ 60, -70, 80, 0};
+
+ ushort_src2 = (vector short unsigned int) {20, -30, 40, -50,
+ -60, 70, -80, 90};
+
+ expected_result = 7;
+
+ result = vec_first_match_or_eos_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* int */
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {10, 20, 30, 4};
+
+ expected_result = 3;
+
+ result = vec_first_match_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {0, 2, 3, 4};
+ int_src2 = (vector int) {4, 3, 2, 1};
+
+ expected_result = 0;
+
+ result = vec_first_match_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {4, 3, 2, 1};
+
+ expected_result = 4;
+
+ result = vec_first_match_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {11, 2, 33, 4};
+
+ expected_result = 1;
+
+ result = vec_first_match_or_eos_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 0, 4};
+ uint_src2 = (vector unsigned int) {2, 3, 4, 5};
+
+ expected_result = 2;
+
+ result = vec_first_match_or_eos_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {2, 3, 4, 5};
+
+ expected_result = 4;
+
+ result = vec_first_match_or_eos_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first match or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* Tests for: vec_first_mismatch_or_eos_index() */
+ /* char */
+ char_src1 = (vector signed char) {-1, 2, 3, 4, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {-1, 2, 3, 20, -5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 3;
+
+ result = vec_first_mismatch_or_eos_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ char_src1 = (vector signed char) {1, 2, 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {1, 2, 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 2;
+
+ result = vec_first_mismatch_or_eos_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ char_src1 = (vector signed char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ char_src2 = (vector signed char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 16;
+
+ result = vec_first_mismatch_or_eos_index (char_src1, char_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: char first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {1, 0, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ expected_result = 1;
+
+ result = vec_first_mismatch_or_eos_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 11, 12, 13, 14, 15, 16};
+ uchar_src2 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 11, 12, 13, 14, 15, 16};
+ expected_result = 8;
+
+ result = vec_first_mismatch_or_eos_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17};
+ uchar_src2 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 0, 16, 17};
+ expected_result = 13;
+
+ result = vec_first_mismatch_or_eos_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uchar_src1 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17};
+ uchar_src2 = (vector unsigned char) {2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17};
+ expected_result = 16;
+
+ result = vec_first_mismatch_or_eos_index (uchar_src1, uchar_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uchar first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* short int */
+ short_src1 = (vector short int) {-10, -20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {-10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {0, 20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {0, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 0;
+
+ result = vec_first_mismatch_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {10, 20, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 8;
+
+ result = vec_first_mismatch_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ short_src1 = (vector short int) {10, 0, 30, 40, 50, 60, 70, 80};
+ short_src2 = (vector short int) {10, 0, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_or_eos_index (short_src1, short_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: short int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {10, 20, 30, 40, 50, 60, 70, 0};
+ ushort_src2 = (vector short unsigned int) {10, 20, 30, 40, 50, 60, 70, 80};
+
+ expected_result = 7;
+
+ result = vec_first_mismatch_or_eos_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {20, 0, 40, 50, 60, 70, 80, 90};
+ ushort_src2 = (vector short unsigned int) {20, 0, 40, 50, 60, 70, 80, 90};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_or_eos_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ ushort_src1 = (vector short unsigned int) {20, 30, 40, 50, 60, 70, 80, 90};
+ ushort_src2 = (vector short unsigned int) {20, 30, 40, 50, 60, 70, 80, 90};
+
+ expected_result = 8;
+
+ result = vec_first_mismatch_or_eos_index (ushort_src1, ushort_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: ushort int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ /* int */
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {1, 20, 3, 4};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch or EOS result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {1, 2, 3, 4};
+ int_src2 = (vector int) {1, 2, 3, 4};
+
+ expected_result = 4;
+
+ result = vec_first_mismatch_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {1, 2, 0, 4};
+ int_src2 = (vector int) {1, 2, 0, 4};
+
+ expected_result = 2;
+
+ result = vec_first_mismatch_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ int_src1 = (vector int) {1, 0, 3, 4};
+ int_src2 = (vector int) {1, 2, 3, 4};
+
+ expected_result = 1;
+
+ result = vec_first_mismatch_or_eos_index (int_src1, int_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: int first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {11, 2, 33, 4};
+
+ expected_result = 0;
+
+ result = vec_first_mismatch_or_eos_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 0};
+ uint_src2 = (vector unsigned int) {1, 2, 3, 0};
+
+ expected_result = 3;
+
+ result = vec_first_mismatch_or_eos_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+ uint_src1 = (vector unsigned int) {1, 2, 3, 4};
+ uint_src2 = (vector unsigned int) {1, 2, 3, 4};
+
+ expected_result = 4;
+
+ result = vec_first_mismatch_or_eos_index (uint_src1, uint_src2);
+
+ if (result != expected_result)
+#ifdef DEBUG
+ printf("Error: uint first mismatch result (%d) does not match expected result (%d)\n",
+ result, expected_result);
+#else
+ abort();
+#endif
+
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/builtins-revb-runnable.c b/gcc/testsuite/gcc.target/powerpc/builtins-revb-runnable.c
new file mode 100644
index 00000000000..b6ffa238221
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/builtins-revb-runnable.c
@@ -0,0 +1,342 @@
+/* { dg-do run { target { powerpc*-*-* && { lp64 && p8vector_hw } } } } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-mcpu=power8 -O3" } */
+
+#include <altivec.h>
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+void abort (void);
+
+/* Verify vec_revb builtins */
+
+int
+main()
+{
+ int i;
+ vector bool char arg_bc, result_bc, expected_bc;
+ vector unsigned char arg_uc, result_uc, expected_uc;
+ vector signed char arg_sc, result_sc, expected_sc;
+
+ vector bool short int arg_bsi, result_bsi, expected_bsi;
+ vector unsigned short int arg_usi, result_usi, expected_usi;
+ vector short int arg_si, result_si, expected_si;
+
+ vector bool int arg_bi, result_bi, expected_bi;
+ vector unsigned int arg_ui, result_ui, expected_ui;
+ vector int arg_int, result_int, expected_int;
+
+ vector bool long long int arg_blli, result_blli, expected_blli;
+ vector unsigned long long int arg_ulli, result_ulli, expected_ulli;
+ vector long long int arg_lli, result_lli, expected_lli;
+
+ vector __uint128_t arg_uint128, result_uint128, expected_uint128;
+ vector __int128_t arg_int128, result_int128, expected_int128;
+
+ vector float arg_f, result_f, expected_f;
+ vector double arg_d, result_d, expected_d;
+
+ /* 8-bit ints */
+ /* The element is a byte. Reversing the byte in each byte element
+ gives the same value. */
+ arg_bc = (vector bool char) {0x01, 0x23, 0x45, 0x67,
+ 0x7E, 0x7C, 0x7A, 0x78,
+ 0x02, 0x46, 0x7A, 0x7E,
+ 0x13, 0x57, 0x7B, 0x7F};
+ expected_bc = arg_bc;
+
+ result_bc = vec_revb (arg_bc);
+
+ for (i = 0; i < 16; i++) {
+ if (result_bc[i] != expected_bc[i])
+#ifdef DEBUG
+ printf("arg_bc[%d] = 0x%x, result_bc[%d] = 0x%x, expected_bc[%d] = 0x%x\n",
+ i, arg_bc[i], i, result_bc[i], i, expected_bc[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_uc = (vector unsigned char) {0x01, 0x23, 0x45, 0x67,
+ 0x7E, 0x7C, 0x7A, 0x78,
+ 0x02, 0x46, 0x7A, 0x7E,
+ 0x13, 0x57, 0x7B, 0x7F};
+ expected_uc = arg_uc;
+
+ result_uc = vec_revb (arg_uc);
+
+ for (i = 0; i < 16; i++) {
+ if (result_uc[i] != expected_uc[i])
+#ifdef DEBUG
+ printf("arg_uc[%d] = 0x%x, result_uc[%d] = 0x%x, expected_uc[%d] = 0x%x\n",
+ i, arg_uc[i], i, result_uc[i], i, expected_uc[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_sc = (vector signed char) {0x01, 0x23, 0x45, 0x67,
+ 0x7E, 0x7C, 0x7A, 0x78,
+ 0x02, 0x46, 0x7A, 0x7E,
+ 0x13, 0x57, 0x7B, 0x7F};
+ expected_sc = arg_sc;
+
+ result_sc = vec_revb (arg_sc);
+
+ for (i = 0; i < 16; i++) {
+ if (result_sc[i] != expected_sc[i])
+#ifdef DEBUG
+ printf("arg_sc[%d] = 0x%x, result_sc[%d] = 0x%x, expected_sc[%d] = 0x%x\n",
+ i, arg_sc[i], i, result_sc[i], i, expected_sc[i]);
+#else
+ abort();
+#endif
+ }
+
+ /* 16-bit ints */
+ arg_bsi = (vector bool short int) {0x0123, 0x4567, 0xFEDC, 0xBA98, 0x0246,
+ 0x8ACE, 0x1357, 0x9BDF};
+ expected_bsi = (vector bool short int) {0x2301, 0x6745, 0xDCFE, 0x98BA,
+ 0x4602, 0xCE8A, 0x5713, 0xDF9B};
+
+ result_bsi = vec_revb (arg_bsi);
+
+ for (i = 0; i < 8; i++) {
+ if (result_bsi[i] != expected_bsi[i])
+#ifdef DEBUG
+ printf("arg_bsi[%d] = 0x%x, result_bsi[%d] = 0x%x, expected_bsi[%d] = 0x%x\n",
+ i, arg_bsi[i], i, result_bsi[i], i, expected_bsi[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_usi = (vector unsigned short int) {0x0123, 0x4567, 0xFEDC, 0xBA98,
+ 0x0246, 0x8ACE, 0x1357, 0x9BDF};
+ expected_usi = (vector unsigned short int) {0x2301, 0x6745, 0xDCFE, 0x98BA,
+ 0x4602, 0xCE8A, 0x5713, 0xDF9B};
+
+ result_usi = vec_revb (arg_usi);
+
+ for (i = 0; i < 8; i++) {
+ if (result_usi[i] != expected_usi[i])
+#ifdef DEBUG
+ printf("arg_usi[%d] = 0x%x, result_usi[%d] = 0x%x, expected_usi[%d] = 0x%x\n",
+ i, arg_usi[i], i, result_usi[i], i, expected_usi[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_si = (vector short int) {0x0123, 0x4567, 0xFEDC, 0xBA98, 0x0246, 0x8ACE,
+ 0x1357, 0x9BDF};
+ expected_si = (vector short int) {0x2301, 0x6745, 0xDCFE, 0x98BA, 0x4602,
+ 0xCE8A, 0x5713, 0xDF9B};
+
+ result_si = vec_revb (arg_si);
+
+ for (i = 0; i < 8; i++) {
+ if (result_si[i] != expected_si[i])
+#ifdef DEBUG
+ printf("arg_si[%d] = 0x%x, result_si[%d] = 0x%x, expected_si[%d] = 0x%x\n",
+ i, arg_si[i], i, result_si[i], i, expected_si[i]);
+#else
+ abort();
+#endif
+ }
+
+ /* 32-bit ints */
+ arg_bi = (vector bool int) {0x01234567, 0xFEDCBA98, 0x02468ACE, 0x13579BDF};
+ expected_bi = (vector bool int) {0x67452301, 0x98BADCFE, 0xCE8A4602,
+ 0xDF9B5713};
+
+ result_bi = vec_revb (arg_bi);
+
+ for (i = 0; i < 4; i++) {
+ if (result_bi[i] != expected_bi[i])
+#ifdef DEBUG
+ printf("arg_bi[%d] = 0x%x, result_bi[%d] = 0x%x, expected_bi[%d] = 0x%x\n",
+ i, arg_bi[i], i, result_bi[i], i, expected_bi[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_ui = (vector unsigned int) {0x01234567, 0xFEDCBA98, 0x02468ACE,
+ 0x13579BDF};
+ expected_ui = (vector unsigned int) {0x67452301, 0x98BADCFE, 0xCE8A4602,
+ 0xDF9B5713};
+
+ result_ui = vec_revb (arg_ui);
+
+ for (i = 0; i < 4; i++) {
+ if (result_ui[i] != expected_ui[i])
+#ifdef DEBUG
+ printf("arg_ui[%d] = 0x%x, result_ui[%d] = 0x%x, expected_ui[%d] = 0x%x\n",
+ i, arg_ui[i], i, result_ui[i], i, expected_ui[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_int = (vector int) {0x01234567, 0xFEDCBA98, 0x02468ACE, 0x13579BDF};
+ expected_int = (vector int) {0x67452301, 0x98BADCFE, 0xCE8A4602, 0xDF9B5713};
+
+ result_int = vec_revb (arg_int);
+
+ for (i = 0; i < 4; i++) {
+ if (result_int[i] != expected_int[i])
+#ifdef DEBUG
+ printf("arg_int[%d] = 0x%x, result_int[%d] = 0x%x, expected_int[%d] = 0x%x\n",
+ i, arg_int[i], i, result_int[i], i, expected_int[i]);
+#else
+ abort();
+#endif
+ }
+
+ /* 64-bit ints */
+ arg_blli = (vector bool long long int) {0x01234567FEDCBA98,
+ 0x02468ACE13579BDF};
+ expected_blli = (vector bool long long int) {0x98BADCFE67452301,
+ 0xDF9B5713CE8A4602};
+
+ result_blli = vec_revb (arg_blli);
+
+ for (i = 0; i < 2; i++) {
+ if (result_blli[i] != expected_blli[i])
+#ifdef DEBUG
+ printf("arg_blli[%d] = 0x%x, result_blli[%d] = 0x%llx, expected_blli[%d] = 0x%llx\n",
+ i, arg_blli[i], i, result_blli[i], i, expected_blli[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_ulli = (vector unsigned long long int) {0x01234567FEDCBA98,
+ 0x02468ACE13579BDF};
+ expected_ulli = (vector unsigned long long int) {0x98BADCFE67452301,
+ 0xDF9B5713CE8A4602};
+
+ result_ulli = vec_revb (arg_ulli);
+
+ for (i = 0; i < 2; i++) {
+ if (result_ulli[i] != expected_ulli[i])
+#ifdef DEBUG
+ printf("arg_ulli[%d] = 0x%x, result_ulli[%d] = 0x%llx, expected_ulli[%d] = 0x%llx\n",
+ i, arg_ulli[i], i, result_ulli[i], i, expected_ulli[i]);
+#else
+ abort();
+#endif
+ }
+
+ arg_lli = (vector long long int) {0x01234567FEDCBA98, 0x02468ACE13579BDF};
+ expected_lli = (vector long long int) {0x98BADCFE67452301,
+ 0xDF9B5713CE8A4602};
+
+ result_lli = vec_revb (arg_lli);
+
+ for (i = 0; i < 2; i++) {
+ if (result_lli[i] != expected_lli[i])
+#ifdef DEBUG
+ printf("arg_lli[%d] = 0x%x, result_lli[%d] = 0x%llx, expected_lli[%d] = 0x%llx\n",
+ i, arg_lli[i], i, result_lli[i], i, expected_lli[i]);
+#else
+ abort();
+#endif
+ }
+
+ /* 128-bit ints */
+ arg_uint128[0] = 0x1627384950617243;
+ arg_uint128[0] = arg_uint128[0] << 64;
+ arg_uint128[0] |= 0x9405182930415263;
+ expected_uint128[0] = 0x6352413029180594;
+ expected_uint128[0] = expected_uint128[0] << 64;
+ expected_uint128[0] |= 0x4372615049382716;
+
+ result_uint128 = vec_revb (arg_uint128);
+
+ if (result_uint128[0] != expected_uint128[0])
+ {
+#ifdef DEBUG
+ printf("result_uint128[0] doesn't match expected_u128[0]\n");
+ printf("arg_uint128[0] = %llx ", arg_uint128[0] >> 64);
+ printf(" %llx\n", arg_uint128[0] & 0xFFFFFFFFFFFFFFFF);
+
+ printf("result_uint128[0] = %llx ", result_uint128[0] >> 64);
+ printf(" %llx\n", result_uint128[0] & 0xFFFFFFFFFFFFFFFF);
+
+ printf("expected_uint128[0] = %llx ", expected_uint128[0] >> 64);
+ printf(" %llx\n", expected_uint128[0] & 0xFFFFFFFFFFFFFFFF);
+#else
+ abort();
+#endif
+ }
+
+ arg_int128[0] = 0x1627384950617283;
+ arg_int128[0] = arg_int128[0] << 64;
+ arg_int128[0] |= 0x9405182930415263;
+ expected_int128[0] = 0x6352413029180594;
+ expected_int128[0] = expected_int128[0] << 64;
+ expected_int128[0] |= 0x8372615049382716;;
+
+ result_int128 = vec_revb (arg_int128);
+
+ if (result_int128[0] != expected_int128[0])
+ {
+#ifdef DEBUG
+ printf("result_int128[0] doesn't match expected128[0]\n");
+ printf("arg_int128[0] = %llx ", arg_int128[0] >> 64);
+ printf(" %llx\n", arg_int128[0] & 0xFFFFFFFFFFFFFFFF);
+
+ printf("result_int128[0] = %llx ", result_int128[0] >> 64);
+ printf(" %llx\n", result_int128[0] & 0xFFFFFFFFFFFFFFFF);
+
+ printf("expected_int128[0] = %llx ", expected_int128[0] >> 64);
+ printf(" %llx\n", expected_int128[0] & 0xFFFFFFFFFFFFFFFF);
+#else
+ abort();
+#endif
+ }
+
+ /* 32-bit floats */
+ /* 0x42f7224e, 0x43e471ec, 0x49712062, 0x4a0f2b38 */
+ arg_f = (vector float) {123.567, 456.89, 987654.123456, 2345678.0};
+ /* 0x4e22F742, 0xec71e443, 0x62207149, 0x382b0f4a */
+ expected_f = (vector float) {683528320.0,
+ -1169716232068291395011477504.0,
+ 739910526898278498304.0,
+ 0.0000407838160754181444644927978515625};
+
+ result_f = vec_revb (arg_f);
+
+ for (i = 0; i < 4; i++) {
+ if (result_f[i] != expected_f[i])
+#ifdef DEBUG
+ printf(" arg_f[%d] = %f, result_f[%d] = %f, expected_f[%d] = %f\n",
+ i, arg_f[i], i, result_f[i], i, expected_f[i]);
+#else
+ abort();
+#endif
+ }
+
+ /* 64-bit floats */
+ /* 0x419D6F34547E6B75 0x4194E5FEC781948B */
+ arg_d = (vector double) {123456789.123456789, 87654321.87654321};
+ /* 0x756B7E54346F9D41 0x8B9481C7FEE59441 */
+ expected_d = (vector double) {4.12815412905659550518671402044E257,
+ -6.99269992046390236552018719554E-253};
+
+ result_d = vec_revb (arg_d);
+
+ for (i = 0; i < 2; i++) {
+ if (result_d[i] != expected_d[i])
+#ifdef DEBUG
+ printf("arg_d[%d] = %f, result_d[%d] = %f, expected_d[%d] = %f\n",
+ i, arg_d[i], i, result_d[i], i, expected_d[i]);
+#else
+ abort();
+#endif
+ }
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/float128-hw4.c b/gcc/testsuite/gcc.target/powerpc/float128-hw4.c
new file mode 100644
index 00000000000..be5d0d6eef4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/float128-hw4.c
@@ -0,0 +1,135 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-mpower9-vector -O2 -mabi=ieeelongdouble -Wno-psabi" } */
+
+/* Insure that the ISA 3.0 IEEE 128-bit floating point built-in functions can
+ be used with long double when the default is IEEE 128-bit. */
+
+#ifndef TYPE
+#define TYPE long double
+#endif
+
+unsigned int
+get_double_exponent (double a)
+{
+ return __builtin_vec_scalar_extract_exp (a);
+}
+
+unsigned int
+get_float128_exponent (TYPE a)
+{
+ return __builtin_vec_scalar_extract_exp (a);
+}
+
+unsigned long
+get_double_mantissa (double a)
+{
+ return __builtin_vec_scalar_extract_sig (a);
+}
+
+__uint128_t
+get_float128_mantissa (TYPE a)
+{
+ return __builtin_vec_scalar_extract_sig (a);
+}
+
+double
+set_double_exponent_ulong (unsigned long a, unsigned long e)
+{
+ return __builtin_vec_scalar_insert_exp (a, e);
+}
+
+TYPE
+set_float128_exponent_uint128 (__uint128_t a, unsigned long e)
+{
+ return __builtin_vec_scalar_insert_exp (a, e);
+}
+
+double
+set_double_exponent_double (double a, unsigned long e)
+{
+ return __builtin_vec_scalar_insert_exp (a, e);
+}
+
+TYPE
+set_float128_exponent_float128 (TYPE a, __uint128_t e)
+{
+ return __builtin_vec_scalar_insert_exp (a, e);
+}
+
+TYPE
+sqrt_odd (TYPE a)
+{
+ return __builtin_sqrtf128_round_to_odd (a);
+}
+
+double
+trunc_odd (TYPE a)
+{
+ return __builtin_truncf128_round_to_odd (a);
+}
+
+TYPE
+add_odd (TYPE a, TYPE b)
+{
+ return __builtin_addf128_round_to_odd (a, b);
+}
+
+TYPE
+sub_odd (TYPE a, TYPE b)
+{
+ return __builtin_subf128_round_to_odd (a, b);
+}
+
+TYPE
+mul_odd (TYPE a, TYPE b)
+{
+ return __builtin_mulf128_round_to_odd (a, b);
+}
+
+TYPE
+div_odd (TYPE a, TYPE b)
+{
+ return __builtin_divf128_round_to_odd (a, b);
+}
+
+TYPE
+fma_odd (TYPE a, TYPE b, TYPE c)
+{
+ return __builtin_fmaf128_round_to_odd (a, b, c);
+}
+
+TYPE
+fms_odd (TYPE a, TYPE b, TYPE c)
+{
+ return __builtin_fmaf128_round_to_odd (a, b, -c);
+}
+
+TYPE
+nfma_odd (TYPE a, TYPE b, TYPE c)
+{
+ return -__builtin_fmaf128_round_to_odd (a, b, c);
+}
+
+TYPE
+nfms_odd (TYPE a, TYPE b, TYPE c)
+{
+ return -__builtin_fmaf128_round_to_odd (a, b, -c);
+}
+
+/* { dg-final { scan-assembler {\mxsiexpdp\M} } } */
+/* { dg-final { scan-assembler {\mxsiexpqp\M} } } */
+/* { dg-final { scan-assembler {\mxsxexpdp\M} } } */
+/* { dg-final { scan-assembler {\mxsxexpqp\M} } } */
+/* { dg-final { scan-assembler {\mxsxsigdp\M} } } */
+/* { dg-final { scan-assembler {\mxsxsigqp\M} } } */
+/* { dg-final { scan-assembler {\mxsaddqpo\M} } } */
+/* { dg-final { scan-assembler {\mxsdivqpo\M} } } */
+/* { dg-final { scan-assembler {\mxsmaddqpo\M} } } */
+/* { dg-final { scan-assembler {\mxsmsubqpo\M} } } */
+/* { dg-final { scan-assembler {\mxsmulqpo\M} } } */
+/* { dg-final { scan-assembler {\mxsnmaddqpo\M} } } */
+/* { dg-final { scan-assembler {\mxsnmsubqpo\M} } } */
+/* { dg-final { scan-assembler {\mxssqrtqpo\M} } } */
+/* { dg-final { scan-assembler {\mxssubqpo\M} } } */
+/* { dg-final { scan-assembler-not {\mbl\M} } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/float128-minmax.c b/gcc/testsuite/gcc.target/powerpc/float128-minmax.c
new file mode 100644
index 00000000000..f8b025d66fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/float128-minmax.c
@@ -0,0 +1,15 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-mpower9-vector -O2 -ffast-math" } */
+
+#ifndef TYPE
+#define TYPE _Float128
+#endif
+
+/* Test that the fminf128/fmaxf128 functions generate if/then/else and not a
+ call. */
+TYPE f128_min (TYPE a, TYPE b) { return __builtin_fminf128 (a, b); }
+TYPE f128_max (TYPE a, TYPE b) { return __builtin_fmaxf128 (a, b); }
+
+/* { dg-final { scan-assembler {\mxscmpuqp\M} } } */
+/* { dg-final { scan-assembler-not {\mbl\M} } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/p9-xxbr-1.c b/gcc/testsuite/gcc.target/powerpc/p9-xxbr-1.c
index 164f11f6ea3..7a07d0f8f2a 100644
--- a/gcc/testsuite/gcc.target/powerpc/p9-xxbr-1.c
+++ b/gcc/testsuite/gcc.target/powerpc/p9-xxbr-1.c
@@ -1,4 +1,4 @@
-/* { dg-do compile { target { powerpc64*-*-* } } } */
+/* { dg-do compile { target { powerpc*-*-* && { lp64 && p9vector_hw } } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
/* { dg-options "-mcpu=power9 -O3" } */
@@ -10,25 +10,25 @@
vector char
rev_char (vector char a)
{
- return vec_revb (a); /* XXBRQ. */
+ return vec_revb (a); /* Is a NOP, maps to move inst */
}
vector bool char
rev_bool_char (vector bool char a)
{
- return vec_revb (a); /* XXBRQ. */
+ return vec_revb (a); /* Is a NOP, maps to move inst */
}
vector signed char
rev_schar (vector signed char a)
{
- return vec_revb (a); /* XXBRQ. */
+ return vec_revb (a); /* Is a NOP, maps to move inst */
}
vector unsigned char
rev_uchar (vector unsigned char a)
{
- return vec_revb (a); /* XXBRQ. */
+ return vec_revb (a); /* Is a NOP, maps to move inst */
}
vector short
@@ -81,5 +81,4 @@ rev_double (vector double a)
/* { dg-final { scan-assembler-times "xxbrd" 1 } } */
/* { dg-final { scan-assembler-times "xxbrh" 3 } } */
-/* { dg-final { scan-assembler-times "xxbrq" 4 } } */
/* { dg-final { scan-assembler-times "xxbrw" 4 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/p9-xxbr-3.c b/gcc/testsuite/gcc.target/powerpc/p9-xxbr-3.c
new file mode 100644
index 00000000000..98ad7ebfd87
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/p9-xxbr-3.c
@@ -0,0 +1,99 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-mpower9-vector -O2" } */
+
+/* Verify that the XXBR{H,W} instructions are generated if the value is
+ forced to be in a vector register, and XXBRD is generated all of the
+ time for register bswap64's. */
+
+unsigned short
+do_bswap16_mem (unsigned short *p)
+{
+ return __builtin_bswap16 (*p); /* LHBRX. */
+}
+
+unsigned short
+do_bswap16_reg (unsigned short a)
+{
+ return __builtin_bswap16 (a); /* gpr sequences. */
+}
+
+void
+do_bswap16_store (unsigned short *p, unsigned short a)
+{
+ *p = __builtin_bswap16 (a); /* STHBRX. */
+}
+
+unsigned short
+do_bswap16_vect (unsigned short a)
+{
+ __asm__ (" # %x0" : "+v" (a));
+ return __builtin_bswap16 (a); /* XXBRW. */
+}
+
+unsigned int
+do_bswap32_mem (unsigned int *p)
+{
+ return __builtin_bswap32 (*p); /* LWBRX. */
+}
+
+unsigned int
+do_bswap32_reg (unsigned int a)
+{
+ return __builtin_bswap32 (a); /* gpr sequences. */
+}
+
+void
+do_bswap32_store (unsigned int *p, unsigned int a)
+{
+ *p = __builtin_bswap32 (a); /* STWBRX. */
+}
+
+unsigned int
+do_bswap32_vect (unsigned int a)
+{
+ __asm__ (" # %x0" : "+v" (a));
+ return __builtin_bswap32 (a); /* XXBRW. */
+}
+
+unsigned long
+do_bswap64_mem (unsigned long *p)
+{
+ return __builtin_bswap64 (*p); /* LDBRX. */
+}
+
+unsigned long
+do_bswap64_reg (unsigned long a)
+{
+ return __builtin_bswap64 (a); /* gpr sequences. */
+}
+
+void
+do_bswap64_store (unsigned long *p, unsigned int a)
+{
+ *p = __builtin_bswap64 (a); /* STDBRX. */
+}
+
+double
+do_bswap64_double (unsigned long a)
+{
+ return (double) __builtin_bswap64 (a); /* XXBRD. */
+}
+
+unsigned long
+do_bswap64_vect (unsigned long a)
+{
+ __asm__ (" # %x0" : "+v" (a)); /* XXBRD. */
+ return __builtin_bswap64 (a);
+}
+
+/* Make sure XXBR{H,W,D} is not generated by default. */
+/* { dg-final { scan-assembler-times "xxbrd" 3 } } */
+/* { dg-final { scan-assembler-times "xxbrh" 1 } } */
+/* { dg-final { scan-assembler-times "xxbrw" 1 } } */
+/* { dg-final { scan-assembler-times "ldbrx" 1 } } */
+/* { dg-final { scan-assembler-times "lhbrx" 1 } } */
+/* { dg-final { scan-assembler-times "lwbrx" 1 } } */
+/* { dg-final { scan-assembler-times "stdbrx" 1 } } */
+/* { dg-final { scan-assembler-times "sthbrx" 1 } } */
+/* { dg-final { scan-assembler-times "stwbrx" 1 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/pr82748-1.c b/gcc/testsuite/gcc.target/powerpc/pr82748-1.c
new file mode 100644
index 00000000000..15a746bcf63
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr82748-1.c
@@ -0,0 +1,82 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-mpower9-vector -O2 -mabi=ieeelongdouble -Wno-psabi" } */
+
+/* Make sure the old 'q' builtin functions work correctly when the long double
+ default has been changed to be IEEE 128-bit floating point. */
+
+_Float128
+do_fabs_f (_Float128 a)
+{
+ return __builtin_fabsq (a);
+}
+
+_Float128
+do_copysign_f (_Float128 a, _Float128 b)
+{
+ return __builtin_copysignq (a, b);
+}
+
+_Float128
+do_inf_f (void)
+{
+ return __builtin_infq ();
+}
+
+_Float128
+do_nan_f (void)
+{
+ return __builtin_nanq ("");
+}
+
+_Float128
+do_nans_f (void)
+{
+ return __builtin_nansq ("");
+}
+
+_Float128
+do_huge_val_f (void)
+{
+ return __builtin_huge_valq ();
+}
+
+long double
+do_fabs_ld (long double a)
+{
+ return __builtin_fabsq (a);
+}
+
+long double
+do_copysign_ld (long double a, long double b)
+{
+ return __builtin_copysignq (a, b);
+}
+
+long double
+do_inf_ld (void)
+{
+ return __builtin_infq ();
+}
+
+long double
+do_nan_ld (void)
+{
+ return __builtin_nanq ("");
+}
+
+long double
+do_nans_ld (void)
+{
+ return __builtin_nansq ("");
+}
+
+long double
+do_huge_val_ld (void)
+{
+ return __builtin_huge_valq ();
+}
+
+/* { dg-final { scan-assembler {\mxsabsqp\M} } } */
+/* { dg-final { scan-assembler {\mxscpsgnqp\M} } } */
+/* { dg-final { scan-assembler-not {\mbl\M} } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/pr82748-2.c b/gcc/testsuite/gcc.target/powerpc/pr82748-2.c
new file mode 100644
index 00000000000..0079394b101
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr82748-2.c
@@ -0,0 +1,46 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-mpower9-vector -O2 -mabi=ibmlongdouble -Wno-psabi" } */
+
+/* Make sure the old 'q' builtin functions work correctly when the long double
+ default uses the IBM double-double format. */
+
+_Float128
+do_fabs (_Float128 a)
+{
+ return __builtin_fabsq (a);
+}
+
+_Float128
+do_copysign (_Float128 a, _Float128 b)
+{
+ return __builtin_copysignq (a, b);
+}
+
+_Float128
+do_inf (void)
+{
+ return __builtin_infq ();
+}
+
+_Float128
+do_nan (void)
+{
+ return __builtin_nanq ("");
+}
+
+_Float128
+do_nans (void)
+{
+ return __builtin_nansq ("");
+}
+
+_Float128
+do_huge_val (void)
+{
+ return __builtin_huge_valq ();
+}
+
+/* { dg-final { scan-assembler {\mxsabsqp\M} } } */
+/* { dg-final { scan-assembler {\mxscpsgnqp\M} } } */
+/* { dg-final { scan-assembler-not {\mbl\M} } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/sad-vectorize-1.c b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-1.c
new file mode 100644
index 00000000000..b122bf5ce3e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-1.c
@@ -0,0 +1,36 @@
+/* { dg-do compile { target { powerpc*-*-* } } } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-skip-if "" { powerpc*-*-aix* } } */
+/* { dg-options "-O3 -mcpu=power9" } */
+
+/* Verify that we vectorize this SAD loop using vabsdub. */
+
+extern int abs (int __x) __attribute__ ((__nothrow__, __leaf__)) __attribute__ ((__const__));
+
+static int
+foo (unsigned char *w, int i, unsigned char *x, int j)
+{
+ int tot = 0;
+ for (int a = 0; a < 16; a++)
+ {
+ for (int b = 0; b < 16; b++)
+ tot += abs (w[b] - x[b]);
+ w += i;
+ x += j;
+ }
+ return tot;
+}
+
+void
+bar (unsigned char *w, unsigned char *x, int i, int *result)
+{
+ *result = foo (w, 16, x, i);
+}
+
+/* { dg-final { scan-assembler-times "vabsdub" 16 } } */
+/* { dg-final { scan-assembler-times "vsum4ubs" 16 } } */
+/* { dg-final { scan-assembler-times "vadduwm" 17 } } */
+
+/* Note: One of the 16 adds is optimized out (add with zero),
+ leaving 15. The extra two adds are for the final reduction. */
diff --git a/gcc/testsuite/gcc.target/powerpc/sad-vectorize-2.c b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-2.c
new file mode 100644
index 00000000000..b1b6de9ddea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-2.c
@@ -0,0 +1,36 @@
+/* { dg-do compile { target { powerpc*-*-* } } } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-skip-if "" { powerpc*-*-aix* } } */
+/* { dg-options "-O3 -mcpu=power9" } */
+
+/* Verify that we vectorize this SAD loop using vabsduh. */
+
+extern int abs (int __x) __attribute__ ((__nothrow__, __leaf__)) __attribute__ ((__const__));
+
+static int
+foo (unsigned short *w, int i, unsigned short *x, int j)
+{
+ int tot = 0;
+ for (int a = 0; a < 16; a++)
+ {
+ for (int b = 0; b < 8; b++)
+ tot += abs (w[b] - x[b]);
+ w += i;
+ x += j;
+ }
+ return tot;
+}
+
+void
+bar (unsigned short *w, unsigned short *x, int i, int *result)
+{
+ *result = foo (w, 8, x, i);
+}
+
+/* { dg-final { scan-assembler-times "vabsduh" 16 } } */
+/* { dg-final { scan-assembler-times "vsum4shs" 16 } } */
+/* { dg-final { scan-assembler-times "vadduwm" 17 } } */
+
+/* Note: One of the 16 adds is optimized out (add with zero),
+ leaving 15. The extra two adds are for the final reduction. */
diff --git a/gcc/testsuite/gcc.target/powerpc/sad-vectorize-3.c b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-3.c
new file mode 100644
index 00000000000..0513a507484
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-3.c
@@ -0,0 +1,57 @@
+/* { dg-do run { target { powerpc*-*-linux* && { lp64 && p9vector_hw } } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-O3 -mcpu=power9" } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+
+/* Verify that we get correct code when we vectorize this SAD loop using
+ vabsdub. */
+
+extern void abort ();
+extern int abs (int __x) __attribute__ ((__nothrow__, __leaf__)) __attribute__ ((__const__));
+
+static int
+foo (unsigned char *w, int i, unsigned char *x, int j)
+{
+ int tot = 0;
+ for (int a = 0; a < 16; a++)
+ {
+ for (int b = 0; b < 16; b++)
+ tot += abs (w[b] - x[b]);
+ w += i;
+ x += j;
+ }
+ return tot;
+}
+
+void
+bar (unsigned char *w, unsigned char *x, int i, int *result)
+{
+ *result = foo (w, 16, x, i);
+}
+
+int
+main ()
+{
+ unsigned char m[256];
+ unsigned char n[256];
+ int sum, i;
+
+ for (i = 0; i < 256; ++i)
+ if (i % 2 == 0)
+ {
+ m[i] = (i % 8) * 2 + 1;
+ n[i] = -(i % 8);
+ }
+ else
+ {
+ m[i] = -((i % 8) * 2 + 2);
+ n[i] = -((i % 8) >> 1);
+ }
+
+ bar (m, n, 16, &sum);
+
+ if (sum != 32384)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/sad-vectorize-4.c b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-4.c
new file mode 100644
index 00000000000..2db016563a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/sad-vectorize-4.c
@@ -0,0 +1,57 @@
+/* { dg-do run { target { powerpc*-*-linux* && { lp64 && p9vector_hw } } } } */
+/* { dg-require-effective-target powerpc_p9vector_ok } */
+/* { dg-options "-O3 -mcpu=power9" } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
+
+/* Verify that we get correct code when we vectorize this SAD loop using
+ vabsduh. */
+
+extern void abort ();
+extern int abs (int __x) __attribute__ ((__nothrow__, __leaf__)) __attribute__ ((__const__));
+
+static int
+foo (unsigned short *w, int i, unsigned short *x, int j)
+{
+ int tot = 0;
+ for (int a = 0; a < 16; a++)
+ {
+ for (int b = 0; b < 8; b++)
+ tot += abs (w[b] - x[b]);
+ w += i;
+ x += j;
+ }
+ return tot;
+}
+
+void
+bar (unsigned short *w, unsigned short *x, int i, int *result)
+{
+ *result = foo (w, 8, x, i);
+}
+
+int
+main ()
+{
+ unsigned short m[128];
+ unsigned short n[128];
+ int sum, i;
+
+ for (i = 0; i < 128; ++i)
+ if (i % 2 == 0)
+ {
+ m[i] = (i % 8) * 2 + 1;
+ n[i] = i % 8;
+ }
+ else
+ {
+ m[i] = (i % 8) * 4 - 3;
+ n[i] = (i % 8) >> 1;
+ }
+
+ bar (m, n, 8, &sum);
+
+ if (sum != 992)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/swaps-p8-26.c b/gcc/testsuite/gcc.target/powerpc/swaps-p8-26.c
index d01d86b94eb..28ce1cd39e4 100644
--- a/gcc/testsuite/gcc.target/powerpc/swaps-p8-26.c
+++ b/gcc/testsuite/gcc.target/powerpc/swaps-p8-26.c
@@ -1,11 +1,11 @@
/* { dg-do compile { target { powerpc64le-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
/* { dg-options "-mcpu=power8 -O3 " } */
-/* { dg-final { scan-assembler-times "lxvw4x" 2 } } */
-/* { dg-final { scan-assembler "stxvw4x" } } */
+/* { dg-final { scan-assembler-times "lxvd2x" 2 } } */
+/* { dg-final { scan-assembler "stxvd2x" } } */
/* { dg-final { scan-assembler-not "xxpermdi" } } */
-/* Verify that swap optimization does not interfere with element-reversing
+/* Verify that swap optimization does not interfere with unaligned
loads and stores. */
/* Test case to resolve PR79044. */
diff --git a/gcc/testsuite/gcc.target/powerpc/vec-cmp-sel.c b/gcc/testsuite/gcc.target/powerpc/vec-cmp-sel.c
index 6f3c0937ba4..f74a117ace4 100644
--- a/gcc/testsuite/gcc.target/powerpc/vec-cmp-sel.c
+++ b/gcc/testsuite/gcc.target/powerpc/vec-cmp-sel.c
@@ -12,9 +12,10 @@
#include <altivec.h>
+volatile vector signed long long x = { 25399, -12900 };
+volatile vector signed long long y = { 12178, -9987 };
+
vector signed long long foo () {
- vector signed long long x = { 25399, -12900 };
- vector signed long long y = { 12178, -9987 };
vector bool long long b = vec_cmpge (x, y);
vector signed long long z = vec_sel (y, x, b);
return z;
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-0.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-0.c
index 8e036e3e2c9..5c09c70ae28 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-0.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-0.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-1.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-1.c
index e510a448a81..a74f7398543 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-1.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-1.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-2.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-2.c
index 0ea5aa79dc6..f7f1e0d7fb2 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-2.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-2.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-3.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-3.c
index 6bb5ebe24e4..8ec94bd4a50 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-3.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-3.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-4.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-4.c
index a8d3f175378..2f47697d384 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-4.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-4.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-5.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-5.c
index dae3e2291e2..11670859996 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-5.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-5.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-6.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-6.c
index 550a3531afd..031a48f1ca3 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-6.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cmpne-6.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
-/* { dg-options "-mcpu=power9" } */
+/* { dg-options "-mcpu=power9 -O1" } */
#include <altivec.h>
diff --git a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cnttz-lsbb-2.c b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cnttz-lsbb-2.c
index 969107a24f7..cd4bb9dc9f7 100644
--- a/gcc/testsuite/gcc.target/powerpc/vsu/vec-cnttz-lsbb-2.c
+++ b/gcc/testsuite/gcc.target/powerpc/vsu/vec-cnttz-lsbb-2.c
@@ -10,5 +10,5 @@ count_trailing_zero_byte_bits (vector unsigned char *arg1_p)
{
vector unsigned char arg_1 = *arg1_p;
- return __builtin_vec_vctzlsbb (arg_1); /* { dg-error "builtin function '__builtin_altivec_vctzlsbb' requires the '-mcpu=power9' option" } */
+ return __builtin_vec_vctzlsbb (arg_1); /* { dg-error "builtin function '__builtin_altivec_vctzlsbb_v16qi' requires the '-mcpu=power9' option" } */
}
diff --git a/gcc/testsuite/gfortran.dg/alloc_comp_basics_1.f90 b/gcc/testsuite/gfortran.dg/alloc_comp_basics_1.f90
index 0b5ef274cc4..44d1c8bc0e6 100644
--- a/gcc/testsuite/gfortran.dg/alloc_comp_basics_1.f90
+++ b/gcc/testsuite/gfortran.dg/alloc_comp_basics_1.f90
@@ -141,4 +141,4 @@ contains
end subroutine check_alloc2
end program alloc
-! { dg-final { scan-tree-dump-times "builtin_free" 18 "original" } }
+! { dg-final { scan-tree-dump-times "builtin_free" 21 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/allocatable_scalar_9.f90 b/gcc/testsuite/gfortran.dg/allocatable_scalar_9.f90
index d36175cd8d3..802c5f7bc8d 100644
--- a/gcc/testsuite/gfortran.dg/allocatable_scalar_9.f90
+++ b/gcc/testsuite/gfortran.dg/allocatable_scalar_9.f90
@@ -5,13 +5,13 @@
!
! Contributed by Tobias Burnus <burnus@gcc.gnu.org>
-module m
-type st
- integer , allocatable :: a1
-end type st
-type at
- integer , allocatable :: a2(:)
-end type at
+module m
+type st
+ integer , allocatable :: a1
+end type st
+type at
+ integer , allocatable :: a2(:)
+end type at
type t1
type(st), allocatable :: b1
@@ -52,4 +52,4 @@ if(allocated(na4%b4)) call abort()
end block
end
-! { dg-final { scan-tree-dump-times "__builtin_free" 32 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_free" 54 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/allocate_assumed_charlen_1.f90 b/gcc/testsuite/gfortran.dg/allocate_assumed_charlen_1.f90
new file mode 100644
index 00000000000..382df36375d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/allocate_assumed_charlen_1.f90
@@ -0,0 +1,28 @@
+! { dg-do run }
+!
+! PR82934: Segfault on compilation in trans-stmt.c:5919(8.0.0).
+! The original report only had one item in the allocate list. This
+! has been doubled up to verify that the correct string length is
+! is used in the allocation.
+!
+! Contributed by FortranFan on clf.
+!
+ character(len=42), allocatable :: foo
+ character(len=22), allocatable :: foofoo
+
+ call alloc( foo , foofoo)
+
+ if (len(foo) .ne. 42) call abort
+ if (len(foofoo) .ne. 22) call abort
+
+contains
+
+ subroutine alloc( bar, barbar )
+
+ character(len=*), allocatable :: bar, barbar
+
+ allocate( character(len=*) :: bar , barbar) ! <= Here!
+
+ end subroutine
+
+end
diff --git a/gcc/testsuite/gfortran.dg/auto_dealloc_1.f90 b/gcc/testsuite/gfortran.dg/auto_dealloc_1.f90
index 4f15bcd6159..99ecd1df856 100644
--- a/gcc/testsuite/gfortran.dg/auto_dealloc_1.f90
+++ b/gcc/testsuite/gfortran.dg/auto_dealloc_1.f90
@@ -50,7 +50,7 @@ contains
m%k%i = 45
end subroutine
-end module
+end module
-! { dg-final { scan-tree-dump-times "__builtin_free" 4 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_free" 10 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/class_65.f90 b/gcc/testsuite/gfortran.dg/class_65.f90
new file mode 100644
index 00000000000..a82918c2087
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_65.f90
@@ -0,0 +1,41 @@
+! { dg-do run }
+!
+! Test the fix for PR81447 in which a vtable was not being created
+! in the module 'm' so that x->vptr in 's' did not have the same
+! value as that in 'p'.
+!
+! Contributed by Mat Cross <mathewc@nag.co.uk>
+!
+Module m
+ Type :: t
+ integer :: i
+ End Type
+End Module
+
+Program p
+ Use m
+ Class (t), Allocatable :: x
+ Interface
+ Subroutine s(x)
+ Use m
+ Class (t), Allocatable :: x
+ End Subroutine
+ End Interface
+ Call s(x)
+ Select Type (x)
+ Type Is (t)
+ Continue
+ Class Is (t)
+ call abort
+ Class Default
+ call abort
+ End Select
+! Print *, 'ok'
+End Program
+
+Subroutine s(x)
+ Use m, Only: t
+ Implicit None
+ Class (t), Allocatable :: x
+ Allocate (t :: x)
+End Subroutine
diff --git a/gcc/testsuite/gfortran.dg/class_66.f90 b/gcc/testsuite/gfortran.dg/class_66.f90
new file mode 100644
index 00000000000..1843ea7eb69
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_66.f90
@@ -0,0 +1,28 @@
+! { dg- do run }
+!
+! Test the fix for PR78641 in which an ICE occured on assignment
+! of a class array constructor to a derived type array.
+!
+! Contributed by Damian Rouson <damian@sourceryinstitute.org>
+!
+ implicit none
+ type foo
+ integer :: i = 99
+ end type
+ type(foo) :: bar(4)
+ class(foo), allocatable :: barfoo
+
+ allocate(barfoo,source = f(11))
+ bar = [f(33), [f(22), barfoo], f(1)]
+ if (any (bar%i .ne. [33, 22, 11, 1])) call abort
+ deallocate (barfoo)
+
+contains
+
+ function f(arg) result(foobar)
+ class(foo), allocatable :: foobar
+ integer :: arg
+ allocate(foobar,source = foo(arg))
+ end function
+
+end program
diff --git a/gcc/testsuite/gfortran.dg/coarray/send_char_array_1.f90 b/gcc/testsuite/gfortran.dg/coarray/send_char_array_1.f90
new file mode 100644
index 00000000000..800a8acc34c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/coarray/send_char_array_1.f90
@@ -0,0 +1,54 @@
+!{ dg-do run }
+
+program send_convert_char_array
+
+ implicit none
+
+ character(kind=1, len=:), allocatable, codimension[:] :: co_str_k1_scal
+ character(kind=1, len=:), allocatable :: str_k1_scal
+ character(kind=4, len=:), allocatable, codimension[:] :: co_str_k4_scal
+ character(kind=4, len=:), allocatable :: str_k4_scal
+
+ character(kind=1, len=:), allocatable, codimension[:] :: co_str_k1_arr(:)
+ character(kind=1, len=:), allocatable :: str_k1_arr(:)
+ character(kind=4, len=:), allocatable, codimension[:] :: co_str_k4_arr(:)
+ character(kind=4, len=:), allocatable :: str_k4_arr(:)
+
+ allocate(str_k1_scal, SOURCE='abcdefghij')
+ allocate(str_k4_scal, SOURCE=4_'abcdefghij')
+ allocate(character(len=20)::co_str_k1_scal[*]) ! allocate syncs here
+ allocate(character(kind=4, len=20)::co_str_k4_scal[*]) ! allocate syncs here
+
+ allocate(str_k1_arr, SOURCE=['abc', 'EFG', 'klm', 'NOP'])
+ allocate(str_k4_arr, SOURCE=[4_'abc', 4_'EFG', 4_'klm', 4_'NOP'])
+ allocate(character(len=5)::co_str_k1_arr(4)[*])
+ allocate(character(kind=4, len=5)::co_str_k4_arr(4)[*])
+
+ ! First check send/copy to self
+ co_str_k1_scal[1] = str_k1_scal
+ if (co_str_k1_scal /= str_k1_scal // ' ') call abort()
+
+ co_str_k4_scal[1] = str_k4_scal
+ if (co_str_k4_scal /= str_k4_scal // 4_' ') call abort()
+
+ co_str_k4_scal[1] = str_k1_scal
+ if (co_str_k4_scal /= str_k4_scal // 4_' ') call abort()
+
+ co_str_k1_scal[1] = str_k4_scal
+ if (co_str_k1_scal /= str_k1_scal // ' ') call abort()
+
+ co_str_k1_arr(:)[1] = str_k1_arr
+ if (any(co_str_k1_arr /= ['abc ', 'EFG ', 'klm ', 'NOP '])) call abort()
+
+ co_str_k4_arr(:)[1] = [4_'abc', 4_'EFG', 4_'klm', 4_'NOP']! str_k4_arr
+ if (any(co_str_k4_arr /= [4_'abc ', 4_'EFG ', 4_'klm ', 4_'NOP '])) call abort()
+
+ co_str_k4_arr(:)[1] = str_k1_arr
+ if (any(co_str_k4_arr /= [ 4_'abc ', 4_'EFG ', 4_'klm ', 4_'NOP '])) call abort()
+
+ co_str_k1_arr(:)[1] = str_k4_arr
+ if (any(co_str_k1_arr /= ['abc ', 'EFG ', 'klm ', 'NOP '])) call abort()
+
+end program send_convert_char_array
+
+! vim:ts=2:sts=2:sw=2:
diff --git a/gcc/testsuite/gfortran.dg/coarray_lib_realloc_1.f90 b/gcc/testsuite/gfortran.dg/coarray_lib_realloc_1.f90
index c55507b5821..559d880b5ac 100644
--- a/gcc/testsuite/gfortran.dg/coarray_lib_realloc_1.f90
+++ b/gcc/testsuite/gfortran.dg/coarray_lib_realloc_1.f90
@@ -21,14 +21,14 @@ x = y
end
! For comp%ii: End of scope of x + y (2x) and for the LHS of the assignment (1x)
-! { dg-final { scan-tree-dump-times "__builtin_free" 3 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_free" 6 "original" } }
! For comp%CAF: End of scope of x + y (2x); no LHS freeing for the CAF in assignment
-! { dg-final { scan-tree-dump-times "_gfortran_caf_deregister" 2 "original" } }
+! { dg-final { scan-tree-dump-times "_gfortran_caf_deregister" 3 "original" } }
! Only malloc "ii":
-! { dg-final { scan-tree-dump-times "__builtin_malloc" 1 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_malloc" 4 "original" } }
! But copy "ii" and "CAF":
-! { dg-final { scan-tree-dump-times "__builtin_memcpy|= MEM" 2 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_memcpy|= MEM" 5 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/dec_structure_23.f90 b/gcc/testsuite/gfortran.dg/dec_structure_23.f90
new file mode 100644
index 00000000000..3c68489c4bd
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/dec_structure_23.f90
@@ -0,0 +1,19 @@
+! { dg-do compile }
+! { dg-options "-fdec-structure" }
+!
+! PR fortran/78240
+!
+! Test a regression where an ICE occurred attempting to create array variables
+! with non-constant array-specs in legacy clist initializers.
+!
+
+program p
+ implicit none
+ integer :: nn
+ real :: rr
+ structure /s/
+ integer x(n) /1/ ! { dg-error "xpected constant" }
+ integer xx(nn) /1/ ! { dg-error "xpected constant" }
+ integer xxx(rr) /1.0/ ! { dg-error "xpected constant" }
+ end structure
+end
diff --git a/gcc/testsuite/gfortran.dg/finalize_28.f90 b/gcc/testsuite/gfortran.dg/finalize_28.f90
index 03de5d0d28b..f0c9665252f 100644
--- a/gcc/testsuite/gfortran.dg/finalize_28.f90
+++ b/gcc/testsuite/gfortran.dg/finalize_28.f90
@@ -21,4 +21,4 @@ contains
integer, intent(out) :: edges(:,:)
end subroutine coo_dump_edges
end module coo_graphs
-! { dg-final { scan-tree-dump-times "__builtin_free" 3 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_free" 6 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/hollerith_character_array_constructor.f90 b/gcc/testsuite/gfortran.dg/hollerith_character_array_constructor.f90
new file mode 100644
index 00000000000..086d46efc2e
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/hollerith_character_array_constructor.f90
@@ -0,0 +1,11 @@
+! { dg-do run }
+! { dg-options "-w" }
+! PR fortran/82884
+! Original code contributed by Gerhard Steinmetz
+program p
+ character :: c(4) = [1h(, 1hi, 1h4, 1h)]
+ if (c(1) /= '(') call abort
+ if (c(2) /= 'i') call abort
+ if (c(3) /= '4') call abort
+ if (c(4) /= ')') call abort
+end
diff --git a/gcc/testsuite/gfortran.dg/init_flag_16.f03 b/gcc/testsuite/gfortran.dg/init_flag_16.f03
new file mode 100644
index 00000000000..a39df63d772
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/init_flag_16.f03
@@ -0,0 +1,25 @@
+! { dg-do compile }
+! { dg-options "-finit-derived" }
+!
+! PR fortran/82886
+!
+! Test a regression which caused an ICE when -finit-derived was given without
+! other -finit-* flags, especially for derived-type components with potentially
+! hidden basic integer components.
+!
+
+program pr82886
+
+ use, intrinsic :: iso_c_binding, only: c_ptr, c_null_ptr
+ type t
+ type(c_ptr) :: my_c_ptr
+ end type
+
+contains
+
+ subroutine sub0() bind(c)
+ type(t), target :: my_f90_type
+ my_f90_type%my_c_ptr = c_null_ptr
+ end subroutine
+
+end
diff --git a/gcc/testsuite/gfortran.dg/interface_40.f90 b/gcc/testsuite/gfortran.dg/interface_40.f90
new file mode 100644
index 00000000000..085c6b30f39
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/interface_40.f90
@@ -0,0 +1,8 @@
+! { dg-do compile }
+! PR fortran/78814
+! Code contributed by Gerhard Steinmetz
+program p
+ class(*) :: x ! { dg-error " must be dummy, allocatable or pointer" }
+ print *, f(x)
+end
+
diff --git a/gcc/testsuite/gfortran.dg/logical_temp_io.f90 b/gcc/testsuite/gfortran.dg/logical_temp_io.f90
new file mode 100644
index 00000000000..77260a9c669
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/logical_temp_io.f90
@@ -0,0 +1,13 @@
+! { dg-do run }
+! PR 82869
+! A temp variable of type logical was incorrectly transferred
+! to the I/O library as a logical type of a different kind.
+program pr82869
+ use, intrinsic :: iso_c_binding
+ type(c_ptr) :: p = c_null_ptr
+ character(len=4) :: s
+ write (s, *) c_associated(p), c_associated(c_null_ptr)
+ if (s /= ' F F') then
+ call abort()
+ end if
+end program pr82869
diff --git a/gcc/testsuite/gfortran.dg/logical_temp_io_kind8.f90 b/gcc/testsuite/gfortran.dg/logical_temp_io_kind8.f90
new file mode 100644
index 00000000000..662289e1c34
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/logical_temp_io_kind8.f90
@@ -0,0 +1,14 @@
+! { dg-do run }
+! { dg-options "-fdefault-integer-8" }
+! PR 82869
+! A temp variable of type logical was incorrectly transferred
+! to the I/O library as a logical type of a different kind.
+program pr82869_8
+ use, intrinsic :: iso_c_binding
+ type(c_ptr) :: p = c_null_ptr
+ character(len=4) :: s
+ write (s, *) c_associated(p), c_associated(c_null_ptr)
+ if (s /= ' F F') then
+ call abort()
+ end if
+end program pr82869_8
diff --git a/gcc/testsuite/gfortran.dg/loop_interchange_1.f90 b/gcc/testsuite/gfortran.dg/loop_interchange_1.f90
new file mode 100644
index 00000000000..a061e28b35a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/loop_interchange_1.f90
@@ -0,0 +1,22 @@
+! { dg-do compile }
+! { dg-additional-options "-O -Wfrontend-loop-interchange" }
+PROGRAM TEST_DO_SPEED
+ IMPLICIT NONE
+
+ REAL, ALLOCATABLE :: A(:,:,:), B(:,:,:), C(:,:,:)
+ REAL :: TIC
+ INTEGER :: T0, T1, T2
+ INTEGER :: I, J, K
+ INTEGER, PARAMETER :: L = 512, M = 512, N = 512
+
+ ALLOCATE( A(L,M,N), B(L,M,N), C(L,M,N) )
+ CALL RANDOM_NUMBER(A)
+ CALL RANDOM_NUMBER(B)
+
+ CALL SYSTEM_CLOCK( T0, TIC)
+
+ DO CONCURRENT( K=1:N, J=1:M, I=1:L) ! { dg-warning "Interchanging loops" }
+ C(I,J,K) = A(I,J,K) +B(I,J,K)
+ END DO
+END
+
diff --git a/gcc/testsuite/gfortran.dg/minmaxloc_8.f90 b/gcc/testsuite/gfortran.dg/minmaxloc_8.f90
new file mode 100644
index 00000000000..e9f37f2b689
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/minmaxloc_8.f90
@@ -0,0 +1,48 @@
+! { dg-do run }
+! { dg-options "-fdump-tree-original" }
+! Test that minloc and maxloc using KINDs return the right
+! kind, by using unformatted I/O for a specific kind.
+program main
+ implicit none
+ real, dimension(3) :: a
+ integer :: r1, r2, r4, r8
+ integer :: k
+ character(len=30) :: l1, l2
+
+ ! Check via I/O if the KIND is used correctly
+ a = [ 1.0, 3.0, 2.0]
+ write (unit=l1,fmt=*) 2_1
+ write (unit=l2,fmt=*) maxloc(a,kind=1)
+ if (l1 /= l2) call abort
+
+ write (unit=l1,fmt=*) 2_2
+ write (unit=l2,fmt=*) maxloc(a,kind=2)
+ if (l1 /= l2) call abort
+
+ write (unit=l1,fmt=*) 2_4
+ write (unit=l2,fmt=*) maxloc(a,kind=4)
+ if (l1 /= l2) call abort
+
+ write (unit=l1,fmt=*) 2_8
+ write (unit=l2,fmt=*) maxloc(a,kind=8)
+ if (l1 /= l2) call abort
+
+ a = [ 3.0, -1.0, 2.0]
+
+ write (unit=l1,fmt=*) 2_1
+ write (unit=l2,fmt=*) minloc(a,kind=1)
+ if (l1 /= l2) call abort
+
+ write (unit=l1,fmt=*) 2_2
+ write (unit=l2,fmt=*) minloc(a,kind=2)
+ if (l1 /= l2) call abort
+
+ write (unit=l1,fmt=*) 2_4
+ write (unit=l2,fmt=*) minloc(a,kind=4)
+ if (l1 /= l2) call abort
+
+ write (unit=l1,fmt=*) 2_8
+ write (unit=l2,fmt=*) minloc(a,kind=8)
+ if (l1 /= l2) call abort
+
+end program main
diff --git a/gcc/testsuite/gfortran.dg/move_alloc_15.f90 b/gcc/testsuite/gfortran.dg/move_alloc_15.f90
index 1c96ccba1cf..0c8cacf3cf0 100644
--- a/gcc/testsuite/gfortran.dg/move_alloc_15.f90
+++ b/gcc/testsuite/gfortran.dg/move_alloc_15.f90
@@ -84,5 +84,5 @@ contains
end do
end subroutine
end program name
-! { dg-final { scan-tree-dump-times "__builtin_malloc" 11 "original" } }
-! { dg-final { scan-tree-dump-times "__builtin_free" 11 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_malloc" 14 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_free" 14 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/pr69739.f90 b/gcc/testsuite/gfortran.dg/pr69739.f90
new file mode 100644
index 00000000000..f5e2359878e
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr69739.f90
@@ -0,0 +1,39 @@
+! { dg-do run }
+!
+! Test the fix for PR69739 in which the statement
+! R = operate(A, X) caused an ICE.
+!
+! Contributed by John <jwmwalrus@gmail.com>
+!
+module test
+
+ implicit none
+ type, public :: sometype
+ real :: a = 0.
+ end type
+contains
+
+ function dosomething(A) result(r)
+ type(sometype), intent(IN) :: A(:,:,:)
+ integer :: N
+ real, allocatable :: R(:), X(:)
+
+ N = PRODUCT(UBOUND(A))
+ allocate (R(N),X(N))
+ X = [(real(N), N = 1, size(X, 1))]
+ R = operate(A, X)
+ end function
+
+ function operate(A, X)
+ type(sometype), intent(IN) :: A(:,:,:)
+ real, intent(IN) :: X(:)
+ real :: operate(1:PRODUCT(UBOUND(A)))
+
+ operate = x
+ end function
+end module test
+
+ use test
+ type(sometype) :: a(2, 2, 2)
+ if (any(int (dosomething(a)) .ne. [1,2,3,4,5,6])) call abort
+end
diff --git a/gcc/testsuite/gfortran.dg/pr70330.f90 b/gcc/testsuite/gfortran.dg/pr70330.f90
new file mode 100644
index 00000000000..29f5f7b6a04
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr70330.f90
@@ -0,0 +1,7 @@
+! { dg-do compile }
+! { dg-additional-options "-Wall -Wextra -Wno-unused-dummy-argument" }
+! PR fortran/70330 - this used to cause an ICE.
+! Test case by Vladimir Fuka
+function f(o) ! { dg-warning "Return value of function" }
+ optional o
+end function f
diff --git a/gcc/testsuite/gfortran.dg/pr78240.f90 b/gcc/testsuite/gfortran.dg/pr78240.f90
new file mode 100644
index 00000000000..5373b555a50
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr78240.f90
@@ -0,0 +1,15 @@
+! { dg-do compile }
+! { dg-options "-w" }
+!
+! PR fortran/78240
+!
+! Test a regression where an ICE occurred by passing an invalid reference
+! to the error handling routine for non-constant array-specs in DATA list
+! initializers.
+!
+
+program p
+ integer x(n) /1/ ! { dg-error "cannot appear in the expression" }
+end
+! { dg-prune-output "module or main program" }
+! { dg-prune-output "Nonconstant array" }
diff --git a/gcc/testsuite/gfortran.dg/pr78619.f90 b/gcc/testsuite/gfortran.dg/pr78619.f90
new file mode 100644
index 00000000000..5fbe185cfab
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr78619.f90
@@ -0,0 +1,21 @@
+! { dg-do compile }
+! { dg-options "-Werror -O3" }
+!
+! Tests the fix for PR78619, in which the recursive use of 'f' at line 13
+! caused an ICE.
+!
+! Contributed by Gerhard Steinmetz <gerhard.steinmetz.fortran@t-online.de>
+!
+ print *, g(1.0) ! 'g' is OK
+contains
+ function f(x) result(z)
+ real :: x, z
+ z = sign(1.0, f) ! { dg-error "calling itself recursively|must be the same type" }
+ end
+ real function g(x)
+ real :: x
+ g = -1
+ g = -sign(1.0, g) ! This is OK.
+ end
+end
+! { dg-message "all warnings being treated as errors" "" { target *-*-* } 0 }
diff --git a/gcc/testsuite/gfortran.dg/transfer_simplify_11.f90 b/gcc/testsuite/gfortran.dg/transfer_simplify_11.f90
new file mode 100644
index 00000000000..ce7a4ad5e56
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/transfer_simplify_11.f90
@@ -0,0 +1,8 @@
+! { dg-do run }
+! PR Fortran/82841
+!
+ integer, parameter :: N = 2
+ character(len=1) :: chr(N)
+ chr = transfer(repeat("x",ncopies=N),[character(len=1) ::], N)
+ if (chr(1) /= 'x' .and. chr(2) /= 'x') call abort
+end
diff --git a/gcc/testsuite/gfortran.dg/typebound_call_29.f90 b/gcc/testsuite/gfortran.dg/typebound_call_29.f90
new file mode 100644
index 00000000000..b07e67ff72c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/typebound_call_29.f90
@@ -0,0 +1,46 @@
+! { dg-do compile }
+!
+! PR 82932: [OOP] ICE in update_compcall_arglist, at fortran/resolve.c:5837
+!
+! Contributed by Janus Weil <janus@gcc.gnu.org>
+
+module m
+
+ implicit none
+
+ type, abstract :: AT
+ contains
+ procedure(init_ifc), deferred :: sinit
+ procedure(missing_ifc), deferred :: missing
+ generic :: init => sinit
+ end type
+
+ abstract interface
+ subroutine init_ifc(data)
+ import AT
+ class(AT) :: data
+ end subroutine
+ subroutine missing_ifc(data)
+ import AT
+ class(AT) :: data
+ end subroutine
+ end interface
+
+end module
+
+
+program p
+
+ use m
+
+ implicit none
+
+ type, extends(AT) :: ET ! { dg-error "must be ABSTRACT" }
+ contains
+ procedure :: sinit
+ end type
+
+ type(ET) :: c
+ call c%init()
+
+end
diff --git a/gcc/testsuite/gfortran.dg/typebound_proc_27.f03 b/gcc/testsuite/gfortran.dg/typebound_proc_27.f03
index 29332c4169c..06484942277 100644
--- a/gcc/testsuite/gfortran.dg/typebound_proc_27.f03
+++ b/gcc/testsuite/gfortran.dg/typebound_proc_27.f03
@@ -1,6 +1,6 @@
! { dg-do run }
! { dg-options "-fdump-tree-original" }
-!
+!
! PR fortran/47586
! Missing deep copy for data pointer returning functions when the type
! has allocatable components
@@ -77,15 +77,15 @@ end program prog
! statements.
! It is assumed that if the number of allocate is right, the number of
! deep copies is right too.
-! { dg-final { scan-tree-dump-times "__builtin_malloc" 12 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_malloc" 15 "original" } }
!
! Realloc are only used for assignments to `that%i'. Don't know why.
! { dg-final { scan-tree-dump-times "__builtin_realloc" 6 "original" } }
-!
+!
! No leak: Only assignments to `this' use malloc. Assignments to `that%i'
! take the realloc path after the first assignment, so don't count as a malloc.
-! { dg-final { scan-tree-dump-times "__builtin_free" 7 "original" } }
+! { dg-final { scan-tree-dump-times "__builtin_free" 10 "original" } }
!
diff --git a/gcc/testsuite/gfortran.dg/vect/vect-8.f90 b/gcc/testsuite/gfortran.dg/vect/vect-8.f90
index c86cf008dd2..2a60c81bde0 100644
--- a/gcc/testsuite/gfortran.dg/vect/vect-8.f90
+++ b/gcc/testsuite/gfortran.dg/vect/vect-8.f90
@@ -705,5 +705,5 @@ RETURN
END SUBROUTINE kernel
! { dg-final { scan-tree-dump-times "vectorized 17 loops" 1 "vect" { target { ! vect_intdouble_cvt } } } }
-! { dg-final { scan-tree-dump-times "vectorized 21 loops" 1 "vect" { target { vect_intdouble_cvt && { ! vect_ieee_add_reduc } } } } }
-! { dg-final { scan-tree-dump-times "vectorized 25 loops" 1 "vect" { target { vect_intdouble_cvt && vect_ieee_add_reduc } } } }
+! { dg-final { scan-tree-dump-times "vectorized 21 loops" 1 "vect" { target { vect_intdouble_cvt && { ! vect_fold_left_plus } } } } }
+! { dg-final { scan-tree-dump-times "vectorized 23 loops" 1 "vect" { target { vect_intdouble_cvt && vect_fold_left_plus } } } }
diff --git a/gcc/testsuite/gnat.dg/controlled2.adb b/gcc/testsuite/gnat.dg/controlled2.adb
index 4fa61aff805..69665c942ae 100644
--- a/gcc/testsuite/gnat.dg/controlled2.adb
+++ b/gcc/testsuite/gnat.dg/controlled2.adb
@@ -1,4 +1,5 @@
--- { dg-do compile }
+-- { dg-do compile }
+-- { dg-options "-gnatws" }
with controlled1; use controlled1;
package body controlled2 is
diff --git a/gcc/testsuite/gnat.dg/controlled4.adb b/gcc/testsuite/gnat.dg/controlled4.adb
index b823cc9f4e5..f8159c9204d 100644
--- a/gcc/testsuite/gnat.dg/controlled4.adb
+++ b/gcc/testsuite/gnat.dg/controlled4.adb
@@ -1,4 +1,5 @@
--- { dg-do compile }
+-- { dg-do compile }
+-- { dg-options "-gnatws" }
package body controlled4 is
procedure Test_Suite is
diff --git a/gcc/testsuite/gnat.dg/delta_aggr.adb b/gcc/testsuite/gnat.dg/delta_aggr.adb
new file mode 100644
index 00000000000..57e0a69693a
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/delta_aggr.adb
@@ -0,0 +1,51 @@
+-- { dg-do compile }
+-- { dg-options "-gnat2020" }
+
+procedure Delta_Aggr is
+ type T1 is tagged record
+ F1, F2, F3 : Integer := 0;
+ end record;
+
+ function Make (X : Integer) return T1 is
+ begin
+ return (10, 20, 30);
+ end Make;
+
+ package Pkg is
+ type T2 is new T1 with private;
+ X, Y : constant T2;
+ function Make (X : Integer) return T2;
+ private
+ type T2 is new T1 with
+ record
+ F4 : Integer := 0;
+ end record;
+ X : constant T2 := (0, 0, 0, 0);
+ Y : constant T2 := (1, 2, 0, 0);
+ end Pkg;
+
+ package body Pkg is
+ function Make (X : Integer) return T2 is
+ begin
+ return (X, X ** 2, X ** 3, X ** 4);
+ end Make;
+ end Pkg;
+
+ use Pkg;
+
+ Z : T2 := (Y with delta F1 => 111);
+
+ -- a legal delta aggregate whose type is a private extension
+ pragma Assert (Y = (X with delta F1 => 1, F2 => 2));
+ pragma assert (Y.F2 = X.F1);
+
+begin
+ Z := (X with delta F1 => 1);
+
+ -- The base of the delta aggregate can be overloaded, in which case
+ -- the candidate interpretations for the aggregate are those of the
+ -- base, to be resolved from context.
+
+ Z := (Make (2) with delta F1 => 1);
+ null;
+end Delta_Aggr;
diff --git a/gcc/testsuite/gnat.dg/elab3.adb b/gcc/testsuite/gnat.dg/elab3.adb
new file mode 100644
index 00000000000..2c0a4b2df25
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/elab3.adb
@@ -0,0 +1,9 @@
+-- { dg-do compile }
+
+with Elab3_Pkg;
+
+package body Elab3 is
+ package Inst is new Elab3_Pkg (False, ABE);
+
+ procedure ABE is begin null; end ABE;
+end Elab3;
diff --git a/gcc/testsuite/gnat.dg/elab3.ads b/gcc/testsuite/gnat.dg/elab3.ads
new file mode 100644
index 00000000000..92fd4c3821b
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/elab3.ads
@@ -0,0 +1,3 @@
+package Elab3 is
+ procedure ABE;
+end Elab3;
diff --git a/gcc/testsuite/gnat.dg/elab3_pkg.adb b/gcc/testsuite/gnat.dg/elab3_pkg.adb
new file mode 100644
index 00000000000..76616d00eaf
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/elab3_pkg.adb
@@ -0,0 +1,11 @@
+package body Elab3_Pkg is
+ procedure Elaborator is
+ begin
+ Proc;
+ end Elaborator;
+
+begin
+ if Elaborate then
+ Elaborator;
+ end if;
+end Elab3_Pkg;
diff --git a/gcc/testsuite/gnat.dg/elab3_pkg.ads b/gcc/testsuite/gnat.dg/elab3_pkg.ads
new file mode 100644
index 00000000000..b4abf3a6a42
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/elab3_pkg.ads
@@ -0,0 +1,7 @@
+generic
+ Elaborate : Boolean := True;
+ with procedure Proc;
+
+package Elab3_Pkg is
+ procedure Elaborator;
+end Elab3_Pkg;
diff --git a/gcc/testsuite/gnat.dg/finalized.adb b/gcc/testsuite/gnat.dg/finalized.adb
index 36400d53ecc..a8d2f8808c6 100644
--- a/gcc/testsuite/gnat.dg/finalized.adb
+++ b/gcc/testsuite/gnat.dg/finalized.adb
@@ -1,4 +1,5 @@
-- { dg-do compile }
+-- { dg-options "-gnatws" }
with Ada.Finalization; use Ada.Finalization;
procedure finalized is
diff --git a/gcc/testsuite/gnat.dg/gcov/check.adb b/gcc/testsuite/gnat.dg/gcov/check.adb
new file mode 100644
index 00000000000..b3cb8e36b92
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/gcov/check.adb
@@ -0,0 +1,27 @@
+-- { dg-options "-fprofile-arcs -ftest-coverage" }
+-- { dg-do run { target native } } */
+
+procedure Check is
+
+ function Add1 (I1, I2 : Integer) return Integer is
+ begin
+ return I1 + I2; -- count(1)
+ end;
+
+ function Add2 (I1, I2 : Integer) return Integer is
+ pragma Suppress (Overflow_Check);
+ begin
+ return I1 + I2; -- count(1)
+ end;
+
+begin
+ if Add1 (1, 2) /= 3 then
+ raise Program_Error;
+ end if;
+
+ if Add2 (1, 2) /= 3 then
+ raise Program_Error;
+ end if;
+end;
+
+-- { dg-final { run-gcov check.adb } }
diff --git a/gcc/testsuite/gnat.dg/gcov/gcov.exp b/gcc/testsuite/gnat.dg/gcov/gcov.exp
new file mode 100644
index 00000000000..732ff877638
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/gcov/gcov.exp
@@ -0,0 +1,44 @@
+# Copyright (C) 1997-2017 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Gcov test driver.
+
+# Load support procs.
+load_lib gnat-dg.exp
+load_lib gcov.exp
+
+global GCC_UNDER_TEST
+
+# For now find gcov in the same directory as $GCC_UNDER_TEST.
+if { ![is_remote host] && [string match "*/*" [lindex $GCC_UNDER_TEST 0]] } {
+ set GCOV [file dirname [lindex $GCC_UNDER_TEST 0]]/gcov
+} else {
+ set GCOV gcov
+}
+
+# Initialize harness.
+dg-init
+
+# Delete old .gcda files.
+set files [glob -nocomplain *.gcda]
+if { $files != "" } {
+ eval "remote_file build delete $files"
+}
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.adb]] "" ""
+
+dg-finish
diff --git a/gcc/testsuite/gnat.dg/opt69.adb b/gcc/testsuite/gnat.dg/opt69.adb
new file mode 100644
index 00000000000..e8c94dae2dc
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/opt69.adb
@@ -0,0 +1,28 @@
+-- { dg-do compile }
+-- { dg-options "-O" }
+
+with Ada.Text_IO;
+
+procedure Opt69 is
+
+ procedure Inner
+ (A : String := (1 .. 15 => ASCII.NUL);
+ B : String := (1 .. 5 => ASCII.NUL);
+ C : String := (1 .. 5 => ASCII.NUL))
+ is
+ Aa : String (1 .. 15);
+ Bb : String (1 .. 5);
+ Cc : String (1 .. 5);
+ begin
+ Aa := A;
+ Bb := B;
+ Cc := C;
+
+ Ada.Text_IO.Put_Line (Aa);
+ Ada.Text_IO.Put_Line (Bb);
+ Ada.Text_IO.Put_Line (Cc);
+ end;
+
+begin
+ Inner;
+end;
diff --git a/gcc/testsuite/gnat.dg/out_param.adb b/gcc/testsuite/gnat.dg/out_param.adb
new file mode 100644
index 00000000000..14a2f94ea2a
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/out_param.adb
@@ -0,0 +1,21 @@
+-- { dg-do compile }
+-- { dg-options "-gnat83" }
+
+procedure Out_Param
+ (Source : in String; Dest : out String; Char_Count : out Natural) is
+begin
+ --| Logic_Step:
+ --| Copy string Source to string Dest
+ Dest := (others => ' ');
+ Char_Count := 0;
+ if Source'Length > 0 and then Dest'Length > 0 then
+ if Source'Length > Dest'Length then
+ Char_Count := Dest'Length;
+ else
+ Dest (Dest'First .. (Dest'First + Source'Length - 1)) := Source;
+ Char_Count := Source'Length;
+ end if;
+ else
+ null;
+ end if;
+end Out_Param;
diff --git a/gcc/testsuite/gnat.dg/overriding_ops2.adb b/gcc/testsuite/gnat.dg/overriding_ops2.adb
new file mode 100644
index 00000000000..9ab2f5c507e
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/overriding_ops2.adb
@@ -0,0 +1,8 @@
+-- { dg-do compile }
+
+package body Overriding_Ops2 is
+ overriding procedure Finalize (Self : in out Consumer) is
+ begin
+ null;
+ end Finalize;
+end Overriding_Ops2;
diff --git a/gcc/testsuite/gnat.dg/overriding_ops2.ads b/gcc/testsuite/gnat.dg/overriding_ops2.ads
new file mode 100644
index 00000000000..695cffb1947
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/overriding_ops2.ads
@@ -0,0 +1,12 @@
+with Overriding_Ops2_Pkg.High;
+
+package Overriding_Ops2 is
+ type Consumer is tagged limited private;
+private
+ type Consumer is
+ limited
+ new Overriding_Ops2_Pkg.High.High_Level_Session
+ with null record;
+
+ overriding procedure Finalize (Self : in out Consumer);
+end Overriding_Ops2;
diff --git a/gcc/testsuite/gnat.dg/overriding_ops2_pkg-high.ads b/gcc/testsuite/gnat.dg/overriding_ops2_pkg-high.ads
new file mode 100644
index 00000000000..46eb4629f4e
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/overriding_ops2_pkg-high.ads
@@ -0,0 +1,5 @@
+package Overriding_Ops2_Pkg.High is
+ type High_Level_Session is new Session_Type with private;
+private
+ type High_Level_Session is new Session_Type with null record;
+end Overriding_Ops2_Pkg.High;
diff --git a/gcc/testsuite/gnat.dg/overriding_ops2_pkg.ads b/gcc/testsuite/gnat.dg/overriding_ops2_pkg.ads
new file mode 100644
index 00000000000..85c8f0b6afb
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/overriding_ops2_pkg.ads
@@ -0,0 +1,9 @@
+with Ada.Finalization;
+
+package Overriding_Ops2_Pkg is
+ type Session_Type is abstract tagged limited private;
+ procedure Finalize (Session : in out Session_Type);
+private
+ type Session_Type is
+ abstract new Ada.Finalization.Limited_Controlled with null record;
+end Overriding_Ops2_Pkg;
diff --git a/gcc/testsuite/gnat.dg/unreferenced.adb b/gcc/testsuite/gnat.dg/unreferenced.adb
new file mode 100644
index 00000000000..5b047c26a61
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/unreferenced.adb
@@ -0,0 +1,11 @@
+-- { dg-do compile }
+-- { dg-options "-gnatd.F" }
+
+procedure Unreferenced is
+ X : aliased Integer;
+ Y : access Integer := X'Access;
+ Z : Integer renames Y.all;
+ pragma Unreferenced (Z);
+begin
+ null;
+end Unreferenced;
diff --git a/gcc/testsuite/gnat.dg/vect18.adb b/gcc/testsuite/gnat.dg/vect18.adb
index 91b1175248d..8739f9f1eb6 100644
--- a/gcc/testsuite/gnat.dg/vect18.adb
+++ b/gcc/testsuite/gnat.dg/vect18.adb
@@ -1,5 +1,5 @@
-- { dg-do compile { target i?86-*-* x86_64-*-* } }
--- { dg-options "-O3 -msse2 -fdump-tree-vect-details" }
+-- { dg-options "-O3 -msse2 -fdump-tree-vect-details -fno-predictive-commoning" }
package body Vect18 is
diff --git a/gcc/testsuite/lib/gcc-dg.exp b/gcc/testsuite/lib/gcc-dg.exp
index d8f9b7bd2bb..98366dc1fc3 100644
--- a/gcc/testsuite/lib/gcc-dg.exp
+++ b/gcc/testsuite/lib/gcc-dg.exp
@@ -1092,24 +1092,27 @@ proc process-message { msgproc msgprefix dgargs } {
set newentry [lindex ${dg-messages} end]
set expmsg [lindex $newentry 2]
+ set column ""
# Handle column numbers from the specified expression (if there is
# one) and set up the search expression that will be used by DejaGnu.
- if [regexp "^(\[0-9\]+):" $expmsg "" column] {
+ if [regexp {^-:} $expmsg] {
+ # The expected column is -, so shouldn't appear.
+ set expmsg [string range $expmsg 2 end]
+ } elseif [regexp {^[0-9]+:} $expmsg column] {
# The expression in the directive included a column number.
- # Remove "COLUMN:" from the original expression and move it
+ # Remove it from the original expression and move it
# to the proper place in the search expression.
- regsub "^\[0-9\]+:" $expmsg "" expmsg
- set expmsg "$column: $msgprefix\[^\n\]*$expmsg"
+ set expmsg [string range $expmsg [string length $column] end]
} elseif [string match "" [lindex $newentry 0]] {
# The specified line number is 0; don't expect a column number.
- set expmsg "$msgprefix\[^\n\]*$expmsg"
} else {
# There is no column number in the search expression, but we
# should expect one in the message itself.
- set expmsg "\[0-9\]+: $msgprefix\[^\n\]*$expmsg"
+ set column {[0-9]+:}
}
-
+ set expmsg "$column $msgprefix\[^\n\]*$expmsg"
set newentry [lreplace $newentry 2 2 $expmsg]
+
set dg-messages [lreplace ${dg-messages} end end $newentry]
verbose "process-message:\n${dg-messages}" 2
}
diff --git a/gcc/testsuite/lib/scanasm.exp b/gcc/testsuite/lib/scanasm.exp
index a66bb282531..33286152f30 100644
--- a/gcc/testsuite/lib/scanasm.exp
+++ b/gcc/testsuite/lib/scanasm.exp
@@ -78,7 +78,9 @@ proc dg-scan { name positive testcase output_file orig_args } {
proc scan-assembler { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].s"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].s"
dg-scan "scan-assembler" 1 $testcase $output_file $args
}
@@ -89,7 +91,9 @@ force_conventional_output_for scan-assembler
proc scan-assembler-not { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].s"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].s"
dg-scan "scan-assembler-not" 0 $testcase $output_file $args
}
@@ -117,7 +121,9 @@ proc hidden-scan-for { symbol } {
proc scan-hidden { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].s"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].s"
set symbol [lindex $args 0]
@@ -133,7 +139,9 @@ proc scan-hidden { args } {
proc scan-not-hidden { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].s"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].s"
set symbol [lindex $args 0]
set hidden_scan [hidden-scan-for $symbol]
@@ -163,7 +171,9 @@ proc scan-file-not { output_file args } {
proc scan-stack-usage { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].su"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].su"
dg-scan "scan-file" 1 $testcase $output_file $args
}
@@ -173,7 +183,9 @@ proc scan-stack-usage { args } {
proc scan-stack-usage-not { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].su"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].su"
dg-scan "scan-file-not" 0 $testcase $output_file $args
}
@@ -230,12 +242,14 @@ proc scan-assembler-times { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set pattern [lindex $args 0]
set times [lindex $args 1]
set pp_pattern [make_pattern_printable $pattern]
# This must match the rule in gcc-dg.exp.
- set output_file "[file rootname [file tail $testcase]].s"
+ set output_file "[file rootname [file tail $filename]].s"
set files [glob -nocomplain $output_file]
if { $files == "" } {
@@ -292,9 +306,11 @@ proc scan-assembler-dem { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set pattern [lindex $args 0]
set pp_pattern [make_pattern_printable $pattern]
- set output_file "[file rootname [file tail $testcase]].s"
+ set output_file "[file rootname [file tail $filename]].s"
set files [glob -nocomplain $output_file]
if { $files == "" } {
@@ -346,9 +362,11 @@ proc scan-assembler-dem-not { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set pattern [lindex $args 0]
set pp_pattern [make_pattern_printable $pattern]
- set output_file "[file rootname [file tail $testcase]].s"
+ set output_file "[file rootname [file tail $filename]].s"
set files [glob -nocomplain $output_file]
if { $files == "" } {
@@ -401,6 +419,8 @@ proc object-size { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set what [lindex $args 0]
set where [lsearch { text data bss total } $what]
if { $where == -1 } {
@@ -418,7 +438,7 @@ proc object-size { args } {
return
}
- set output_file "[file rootname [file tail $testcase]].o"
+ set output_file "[file rootname [file tail $filename]].o"
if ![file_on_host exists $output_file] {
verbose -log "$testcase: $output_file does not exist"
unresolved "$testcase object-size $what $cmp $with"
@@ -512,7 +532,9 @@ proc dg-function-on-line { args } {
proc scan-lto-assembler { args } {
set testcase [testname-for-summary]
- set output_file "[file rootname [file tail $testcase]].exe.ltrans0.s"
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
+ set output_file "[file rootname [file tail $filename]].exe.ltrans0.s"
verbose "output_file: $output_file"
dg-scan "scan-assembler" 1 $testcase $output_file $args
}
diff --git a/gcc/testsuite/lib/scandump.exp b/gcc/testsuite/lib/scandump.exp
index 4a64ac6e05d..a2425a23c1e 100644
--- a/gcc/testsuite/lib/scandump.exp
+++ b/gcc/testsuite/lib/scandump.exp
@@ -45,11 +45,13 @@ proc scan-dump { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set printable_pattern [make_pattern_printable [lindex $args 1]]
set suf [dump-suffix [lindex $args 2]]
set testname "$testcase scan-[lindex $args 0]-dump $suf \"$printable_pattern\""
- set src [file tail [lindex $testcase 0]]
+ set src [file tail $filename]
set output_file "[glob -nocomplain $src.[lindex $args 2]]"
if { $output_file == "" } {
verbose -log "$testcase: dump file does not exist"
@@ -86,11 +88,13 @@ proc scan-dump-times { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set times [lindex $args 2]
set suf [dump-suffix [lindex $args 3]]
set printable_pattern [make_pattern_printable [lindex $args 1]]
set testname "$testcase scan-[lindex $args 0]-dump-times $suf \"$printable_pattern\" [lindex $args 2]"
- set src [file tail [lindex $testcase 0]]
+ set src [file tail $filename]
set output_file "[glob -nocomplain $src.[lindex $args 3]]"
if { $output_file == "" } {
verbose -log "$testcase: dump file does not exist"
@@ -128,10 +132,12 @@ proc scan-dump-not { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set printable_pattern [make_pattern_printable [lindex $args 1]]
set suf [dump-suffix [lindex $args 2]]
set testname "$testcase scan-[lindex $args 0]-dump-not $suf \"$printable_pattern\""
- set src [file tail [lindex $testcase 0]]
+ set src [file tail $filename]
set output_file "[glob -nocomplain $src.[lindex $args 2]]"
if { $output_file == "" } {
verbose -log "$testcase: dump file does not exist"
@@ -181,10 +187,12 @@ proc scan-dump-dem { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set printable_pattern [make_pattern_printable [lindex $args 1]]
set suf [dump-suffix [lindex $args 2]]
set testname "$testcase scan-[lindex $args 0]-dump-dem $suf \"$printable_pattern\""
- set src [file tail [lindex $testcase 0]]
+ set src [file tail $filename]
set output_file "[glob -nocomplain $src.[lindex $args 2]]"
if { $output_file == "" } {
verbose -log "$testcase: dump file does not exist"
@@ -233,10 +241,12 @@ proc scan-dump-dem-not { args } {
}
set testcase [testname-for-summary]
+ # The name might include a list of options; extract the file name.
+ set filename [lindex $testcase 0]
set printable_pattern [make_pattern_printable [lindex $args 1]
set suf [dump-suffix [lindex $args 2]]
set testname "$testcase scan-[lindex $args 0]-dump-dem-not $suf \"$printable_pattern\""
- set src [file tail [lindex $testcase 0]]
+ set src [file tail $filename]
set output_file "[glob -nocomplain $src.[lindex $args 2]]"
if { $output_file == "" } {
verbose -log "$testcase: dump file does not exist"
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index b2096723426..fc656b96b6a 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -1175,8 +1175,8 @@ proc check_effective_target_pie { } {
return 1;
}
if { [istarget *-*-solaris2.1\[1-9\]*] } {
- # Full PIE support was added in Solaris 11.x and Solaris 12, but gcc
- # errors out if missing, so check for that.
+ # Full PIE support was added in Solaris 11.3, but gcc errors out
+ # if missing, so check for that.
return [check_no_compiler_messages pie executable {
int main (void) { return 0; }
} "-pie -fpie"]
@@ -3450,6 +3450,19 @@ proc check_effective_target_arm_vect_no_misalign { } {
}
+# Return 1 if this is an ARM target supporting -mfloat-abi=soft. Some
+# multilibs may be incompatible with this option.
+
+proc check_effective_target_arm_soft_ok { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_soft_ok executable {
+ int main() { return 0;}
+ } "-mfloat-abi=soft"]
+ } else {
+ return 0
+ }
+}
+
# Return 1 if this is an ARM target supporting -mfpu=vfp
# -mfloat-abi=softfp. Some multilibs may be incompatible with these
# options.
@@ -5700,14 +5713,6 @@ proc check_effective_target_vect_perm3_int { } {
&& [vect_perm_supported 3 32] }]
}
-# Return 1 if the target supports SLP permutation of 5 vectors when each
-# element has 32 bits.
-
-proc check_effective_target_vect_perm5_int { } {
- return [expr { [check_effective_target_vect_perm]
- && [vect_perm_supported 5 32] }]
-}
-
# Return 1 if the target plus current options supports vector permutation
# on byte-sized elements, 0 otherwise.
#
@@ -6542,6 +6547,12 @@ proc check_effective_target_vect_masked_store { } {
return [check_effective_target_aarch64_sve]
}
+# Return 1 if the target supports vector scatter stores.
+
+proc check_effective_target_vect_scatter_store { } {
+ return [check_effective_target_aarch64_sve]
+}
+
# Return 1 if the target supports vector conditional operations, 0 otherwise.
proc check_effective_target_vect_condition { } {
@@ -6844,12 +6855,6 @@ proc check_effective_target_vect64 { } {
return [expr { [lsearch -exact [available_vector_sizes] 64] >= 0 }]
}
-# Return 1 if the target supports vectors of 256 bits.
-
-proc check_effective_target_vect256 { } {
- return [expr { [lsearch -exact [available_vector_sizes] 256] >= 0 }]
-}
-
# Return 1 if the target supports vector copysignf calls.
proc check_effective_target_vect_call_copysignf { } {
@@ -7176,42 +7181,21 @@ proc check_effective_target_vect_call_roundf { } {
return $et_vect_call_roundf_saved($et_index)
}
-# Return 1 if the target supports vector gather operations.
-
-proc check_effective_target_vect_gather { } {
- return [check_effective_target_aarch64_sve]
-}
-
-# Return 1 if the target supports vector scatter operations.
-
-proc check_effective_target_vect_scatter { } {
- return [check_effective_target_aarch64_sve]
-}
-
-# Return 1 if the target supports both vector gather and vector scatter
-# operations.
-
-proc check_effective_target_vect_gather_scatter { } {
- return [expr { [check_effective_target_vect_gather]
- && [check_effective_target_vect_scatter] }]
-}
-
-# Return 1 if the target supports a non-reassociating form of floating-point
-# addition reduction, i.e. one that is suitable for -fno-associative-math.
+# Return 1 if the target supports AND, OR and XOR reduction.
-proc check_effective_target_vect_ieee_add_reduc { } {
+proc check_effective_target_vect_logical_reduc { } {
return [check_effective_target_aarch64_sve]
}
-# Return 1 if the target supports AND, OR and XOR reduction.
+# Return 1 if the target supports the fold_extract_last optab.
-proc check_effective_target_vect_logical_reduc { } {
+proc check_effective_target_vect_fold_extract_last { } {
return [check_effective_target_aarch64_sve]
}
-# Return 1 if the target supports last-selected-element reduction.
+# Return 1 if the target supports the fold_left_plus optab.
-proc check_effective_target_vect_last_reduc { } {
+proc check_effective_target_vect_fold_left_plus { } {
return [check_effective_target_aarch64_sve]
}
@@ -9204,10 +9188,6 @@ proc check_effective_target_supports_stack_clash_protection { } {
# Return 1 if the target creates a frame pointer for non-leaf functions
# Note we ignore cases where we apply tail call optimization here.
proc check_effective_target_frame_pointer_for_non_leaf { } {
- if { [istarget aarch*-*-*] } {
- return 1
- }
-
# Solaris/x86 defaults to -fno-omit-frame-pointer.
if { [istarget i?86-*-solaris*] || [istarget x86_64-*-solaris*] } {
return 1
diff --git a/gcc/testsuite/obj-c++.dg/comp-types-8.mm b/gcc/testsuite/obj-c++.dg/comp-types-8.mm
index 490f4ff1938..6db76bb20f3 100644
--- a/gcc/testsuite/obj-c++.dg/comp-types-8.mm
+++ b/gcc/testsuite/obj-c++.dg/comp-types-8.mm
@@ -1,4 +1,5 @@
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
/* We used to ICE because we removed the cast to List_linked*
in -[ListIndex_linked next]. */
diff --git a/gcc/testsuite/obj-c++.dg/demangle-3.mm b/gcc/testsuite/obj-c++.dg/demangle-3.mm
index 01e6c618e5e..afb83d75bfe 100644
--- a/gcc/testsuite/obj-c++.dg/demangle-3.mm
+++ b/gcc/testsuite/obj-c++.dg/demangle-3.mm
@@ -1,5 +1,6 @@
/* Test demangling an Objective-C method in error messages. */
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
#include <objc/objc.h>
diff --git a/gcc/testsuite/obj-c++.dg/super-class-1.mm b/gcc/testsuite/obj-c++.dg/super-class-1.mm
index f8cccb3d4c8..ad14a58c29a 100644
--- a/gcc/testsuite/obj-c++.dg/super-class-1.mm
+++ b/gcc/testsuite/obj-c++.dg/super-class-1.mm
@@ -1,6 +1,7 @@
/* Test calling super from within a category method. */
/* { dg-do compile } */
+/* { dg-additional-options "-Wno-return-type" } */
#include <objc/objc.h>
diff --git a/gcc/toplev.c b/gcc/toplev.c
index e5292d4b314..2598ced1128 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -83,6 +83,7 @@ along with GCC; see the file COPYING3. If not see
#include "edit-context.h"
#include "tree-pass.h"
#include "dumpfile.h"
+#include "ipa-fnsummary.h"
#if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO)
#include "dbxout.h"
@@ -524,10 +525,9 @@ compile_file (void)
/* Do dbx symbols. */
timevar_push (TV_SYMOUT);
- #if defined DWARF2_DEBUGGING_INFO || defined DWARF2_UNWIND_INFO
- if (dwarf2out_do_frame ())
- dwarf2out_frame_finish ();
- #endif
+#if defined DWARF2_DEBUGGING_INFO || defined DWARF2_UNWIND_INFO
+ dwarf2out_frame_finish ();
+#endif
(*debug_hooks->finish) (main_input_filename);
timevar_pop (TV_SYMOUT);
@@ -2249,6 +2249,7 @@ toplev::finalize (void)
/* Needs to be called before cgraph_c_finalize since it uses symtab. */
ipa_reference_c_finalize ();
+ ipa_fnsummary_c_finalize ();
cgraph_c_finalize ();
cgraphunit_c_finalize ();
diff --git a/gcc/tracer.c b/gcc/tracer.c
index 58caf13b0de..0c7a9536735 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -135,8 +135,6 @@ better_p (const_edge e1, const_edge e2)
if (e1->count ().initialized_p () && e2->count ().initialized_p ()
&& ((e1->count () > e2->count ()) || (e1->count () < e2->count ())))
return e1->count () > e2->count ();
- if (EDGE_FREQUENCY (e1) != EDGE_FREQUENCY (e2))
- return EDGE_FREQUENCY (e1) > EDGE_FREQUENCY (e2);
/* This is needed to avoid changes in the decision after
CFG is modified. */
if (e1->src != e2->src)
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index ef5655aa61a..63cac31829e 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -5065,9 +5065,7 @@ ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
node->create_edge (cgraph_node::get_create
(builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
- g, gimple_bb (g)->count,
- compute_call_stmt_bb_frequency (node->decl,
- gimple_bb (g)));
+ g, gimple_bb (g)->count);
}
/* Construct a call to TM_GETTMCLONE and insert it before GSI. */
@@ -5116,9 +5114,7 @@ ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
gsi_insert_before (gsi, g, GSI_SAME_STMT);
- node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count,
- compute_call_stmt_bb_frequency (node->decl,
- gimple_bb (g)));
+ node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count);
/* Cast return value from tm_gettmclone* into appropriate function
pointer. */
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 105e5a1dde7..d75ea80c956 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -4118,7 +4118,7 @@ verify_gimple_assign_binary (gassign *stmt)
/* Continue with generic binary expression handling. */
break;
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
if (!VECTOR_TYPE_P (rhs2_type)
|| !useless_type_conversion_p (lhs_type, TREE_TYPE (rhs2_type))
|| !useless_type_conversion_p (lhs_type, rhs1_type))
@@ -9105,7 +9105,7 @@ pass_warn_function_return::execute (function *fun)
/* If we see "return;" in some basic block, then we do reach the end
without returning a value. */
- else if (warn_return_type
+ else if (warn_return_type > 0
&& !TREE_NO_WARNING (fun->decl)
&& EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) > 0
&& !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fun->decl))))
@@ -9259,7 +9259,7 @@ execute_fixup_cfg (void)
gimple_stmt_iterator gsi;
int todo = 0;
cgraph_node *node = cgraph_node::get (current_function_decl);
- profile_count num = node->count;
+ profile_count num = node->count.ipa ();
profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
bool scale = num.initialized_p () && den.ipa_p ()
&& (den.nonzero_p () || num == profile_count::zero ())
@@ -9275,7 +9275,15 @@ execute_fixup_cfg (void)
FOR_EACH_BB_FN (bb, cfun)
{
if (scale)
- bb->count = bb->count.apply_scale (num, den);
+ {
+ if (num == profile_count::zero ())
+ {
+ if (!(bb->count == profile_count::zero ()))
+ bb->count = bb->count.global0 ();
+ }
+ else
+ bb->count = bb->count.apply_scale (num, den);
+ }
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
gimple *stmt = gsi_stmt (gsi);
@@ -9365,11 +9373,8 @@ execute_fixup_cfg (void)
if (!cfun->after_inlining)
{
gcall *call_stmt = dyn_cast <gcall *> (stmt);
- int freq
- = compute_call_stmt_bb_frequency (current_function_decl,
- bb);
node->create_edge (cgraph_node::get_create (fndecl),
- call_stmt, bb->count, freq);
+ call_stmt, bb->count);
}
}
}
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index 9b7f08c586c..526793723dc 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -122,8 +122,7 @@ convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
at block BB. */
static bool
-cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi,
- bool first_p)
+cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
{
edge taken_edge;
bool retval = false;
@@ -146,25 +145,14 @@ cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi,
switch (gimple_code (stmt))
{
case GIMPLE_COND:
- /* During a first iteration on the CFG only remove trivially
- dead edges but mark other conditions for re-evaluation. */
- if (first_p)
- {
- val = const_binop (gimple_cond_code (stmt), boolean_type_node,
- gimple_cond_lhs (stmt),
- gimple_cond_rhs (stmt));
- if (! val)
- bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
- }
- else
- {
- code_helper rcode;
- tree ops[3] = {};
- if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
- no_follow_ssa_edges)
- && rcode == INTEGER_CST)
- val = ops[0];
- }
+ {
+ code_helper rcode;
+ tree ops[3] = {};
+ if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
+ no_follow_ssa_edges)
+ && rcode == INTEGER_CST)
+ val = ops[0];
+ }
break;
case GIMPLE_SWITCH:
@@ -235,7 +223,7 @@ cleanup_call_ctrl_altering_flag (gimple *bb_end)
true if anything changes. */
static bool
-cleanup_control_flow_bb (basic_block bb, bool first_p)
+cleanup_control_flow_bb (basic_block bb)
{
gimple_stmt_iterator gsi;
bool retval = false;
@@ -258,7 +246,7 @@ cleanup_control_flow_bb (basic_block bb, bool first_p)
|| gimple_code (stmt) == GIMPLE_SWITCH)
{
gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
- retval |= cleanup_control_expr_graph (bb, gsi, first_p);
+ retval |= cleanup_control_expr_graph (bb, gsi);
}
else if (gimple_code (stmt) == GIMPLE_GOTO
&& TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
@@ -732,6 +720,45 @@ cleanup_tree_cfg_bb (basic_block bb)
return false;
}
+/* Do cleanup_control_flow_bb in PRE order. */
+
+static bool
+cleanup_control_flow_pre ()
+{
+ bool retval = false;
+
+ auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
+ bitmap_clear (visited);
+
+ stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
+
+ while (! stack.is_empty ())
+ {
+ /* Look at the edge on the top of the stack. */
+ edge_iterator ei = stack.last ();
+ basic_block dest = ei_edge (ei)->dest;
+
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && ! bitmap_bit_p (visited, dest->index))
+ {
+ bitmap_set_bit (visited, dest->index);
+ retval |= cleanup_control_flow_bb (dest);
+ if (EDGE_COUNT (dest->succs) > 0)
+ stack.quick_push (ei_start (dest->succs));
+ }
+ else
+ {
+ if (!ei_one_before_end_p (ei))
+ ei_next (&stack.last ());
+ else
+ stack.pop ();
+ }
+ }
+
+ return retval;
+}
+
/* Iterate the cfg cleanups, while anything changes. */
static bool
@@ -752,17 +779,11 @@ cleanup_tree_cfg_1 (void)
/* We cannot use FOR_EACH_BB_FN for the BB iterations below
since the basic blocks may get removed. */
- /* Start by iterating over all basic blocks looking for edge removal
- opportunities. Do this first because incoming SSA form may be
- invalid and we want to avoid performing SSA related tasks such
+ /* Start by iterating over all basic blocks in PRE order looking for
+ edge removal opportunities. Do this first because incoming SSA form
+ may be invalid and we want to avoid performing SSA related tasks such
as propgating out a PHI node during BB merging in that state. */
- n = last_basic_block_for_fn (cfun);
- for (i = NUM_FIXED_BLOCKS; i < n; i++)
- {
- bb = BASIC_BLOCK_FOR_FN (cfun, i);
- if (bb)
- retval |= cleanup_control_flow_bb (bb, true);
- }
+ retval |= cleanup_control_flow_pre ();
/* After doing the above SSA form should be valid (or an update SSA
should be required). */
@@ -789,7 +810,7 @@ cleanup_tree_cfg_1 (void)
if (!bb)
continue;
- retval |= cleanup_control_flow_bb (bb, false);
+ retval |= cleanup_control_flow_bb (bb);
retval |= cleanup_tree_cfg_bb (bb);
}
diff --git a/gcc/tree-chkp.c b/gcc/tree-chkp.c
index f73db4000ce..b7c3fd50786 100644
--- a/gcc/tree-chkp.c
+++ b/gcc/tree-chkp.c
@@ -4232,18 +4232,12 @@ chkp_copy_bounds_for_assign (gimple *assign, struct cgraph_edge *edge)
{
tree fndecl = gimple_call_fndecl (stmt);
struct cgraph_node *callee = cgraph_node::get_create (fndecl);
- struct cgraph_edge *new_edge;
gcc_assert (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDSTX)
|| chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDLDX)
|| chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET));
- new_edge = edge->caller->create_edge (callee,
- as_a <gcall *> (stmt),
- edge->count,
- edge->frequency);
- new_edge->frequency = compute_call_stmt_bb_frequency
- (edge->caller->decl, gimple_bb (stmt));
+ edge->caller->create_edge (callee, as_a <gcall *> (stmt), edge->count);
}
gsi_prev (&iter);
}
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 54705940a24..1cf25bac569 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -5335,9 +5335,9 @@ free_data_refs (vec<data_reference_p> datarefs)
}
/* Common routine implementing both dr_direction_indicator and
- dr_zero_step_indicator. Return USEFUL_MIN if the indicator
- is known to be >= USEFUL_MIN and -1 if the indicator is
- known to be negative. */
+ dr_zero_step_indicator. Return USEFUL_MIN if the indicator is known
+ to be >= USEFUL_MIN and -1 if the indicator is known to be negative.
+ Return the step as the indicator otherwise. */
static tree
dr_step_indicator (struct data_reference *dr, int useful_min)
diff --git a/gcc/tree-data-ref.h b/gcc/tree-data-ref.h
index be5a3b32e63..ceeca4c08c6 100644
--- a/gcc/tree-data-ref.h
+++ b/gcc/tree-data-ref.h
@@ -475,9 +475,8 @@ dr_alignment (data_reference *dr)
extern bool dr_may_alias_p (const struct data_reference *,
const struct data_reference *, bool);
-extern tree dr_direction_indicator (struct data_reference *);
-extern tree dr_zero_step_indicator (struct data_reference *);
-extern bool dr_known_forward_stride_p (struct data_reference *);
+extern bool dr_equal_offsets_p (struct data_reference *,
+ struct data_reference *);
extern bool runtime_alias_check_p (ddr_p, struct loop *, bool);
extern int data_ref_compare_tree (tree, tree);
diff --git a/gcc/tree-emutls.c b/gcc/tree-emutls.c
index 951e7d3f513..9136a0b2856 100644
--- a/gcc/tree-emutls.c
+++ b/gcc/tree-emutls.c
@@ -417,7 +417,7 @@ gen_emutls_addr (tree decl, struct lower_emutls_data *d)
gimple_seq_add_stmt (&d->seq, x);
- d->cfun_node->create_edge (d->builtin_node, x, d->bb->count, d->bb_freq);
+ d->cfun_node->create_edge (d->builtin_node, x, d->bb->count);
/* We may be adding a new reference to a new variable to the function.
This means we have to play with the ipa-reference web. */
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 9eac215e4dc..1cc6a87343d 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -59,6 +59,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-chkp.h"
#include "stringpool.h"
#include "attribs.h"
+#include "sreal.h"
/* I'm not real happy about this, but we need to handle gimple and
non-gimple trees. */
@@ -1770,8 +1771,8 @@ copy_bb (copy_body_data *id, basic_block bb,
basic_block copy_basic_block;
tree decl;
basic_block prev;
- bool scale = !num.initialized_p ()
- || (den.nonzero_p () || num == profile_count::zero ());
+
+ profile_count::adjust_for_ipa_scaling (&num, &den);
/* Search for previous copied basic block. */
prev = bb->prev_bb;
@@ -1781,10 +1782,7 @@ copy_bb (copy_body_data *id, basic_block bb,
/* create_basic_block() will append every new block to
basic_block_info automatically. */
copy_basic_block = create_basic_block (NULL, (basic_block) prev->aux);
- if (scale)
- copy_basic_block->count = bb->count.apply_scale (num, den);
- else if (num.initialized_p ())
- copy_basic_block->count = bb->count;
+ copy_basic_block->count = bb->count.apply_scale (num, den);
copy_gsi = gsi_start_bb (copy_basic_block);
@@ -2004,23 +2002,16 @@ copy_bb (copy_body_data *id, basic_block bb,
edge = id->src_node->get_edge (orig_stmt);
if (edge)
{
- int edge_freq = edge->frequency;
- int new_freq;
struct cgraph_edge *old_edge = edge;
+ profile_count old_cnt = edge->count;
edge = edge->clone (id->dst_node, call_stmt,
gimple_uid (stmt),
- profile_count::one (),
- profile_count::one (),
- CGRAPH_FREQ_BASE,
+ num, den,
true);
- /* We could also just rescale the frequency, but
- doing so would introduce roundoff errors and make
- verifier unhappy. */
- new_freq = compute_call_stmt_bb_frequency (id->dst_node->decl,
- copy_basic_block);
-
- /* Speculative calls consist of two edges - direct and indirect.
- Duplicate the whole thing and distribute frequencies accordingly. */
+
+ /* Speculative calls consist of two edges - direct and
+ indirect. Duplicate the whole thing and distribute
+ frequencies accordingly. */
if (edge->speculative)
{
struct cgraph_edge *direct, *indirect;
@@ -2028,42 +2019,22 @@ copy_bb (copy_body_data *id, basic_block bb,
gcc_assert (!edge->indirect_unknown_callee);
old_edge->speculative_call_info (direct, indirect, ref);
+
+ profile_count indir_cnt = indirect->count;
indirect = indirect->clone (id->dst_node, call_stmt,
gimple_uid (stmt),
- profile_count::one (),
- profile_count::one (),
- CGRAPH_FREQ_BASE,
+ num, den,
true);
- if (old_edge->frequency + indirect->frequency)
- {
- edge->frequency = MIN (RDIV ((gcov_type)new_freq * old_edge->frequency,
- (old_edge->frequency + indirect->frequency)),
- CGRAPH_FREQ_MAX);
- indirect->frequency = MIN (RDIV ((gcov_type)new_freq * indirect->frequency,
- (old_edge->frequency + indirect->frequency)),
- CGRAPH_FREQ_MAX);
- }
+
+ profile_probability prob
+ = indir_cnt.probability_in (old_cnt + indir_cnt);
+ indirect->count
+ = copy_basic_block->count.apply_probability (prob);
+ edge->count = copy_basic_block->count - indirect->count;
id->dst_node->clone_reference (ref, stmt);
}
else
- {
- edge->frequency = new_freq;
- if (dump_file
- && profile_status_for_fn (cfun) != PROFILE_ABSENT
- && (edge_freq > edge->frequency + 10
- || edge_freq < edge->frequency - 10))
- {
- fprintf (dump_file, "Edge frequency estimated by "
- "cgraph %i diverge from inliner's estimate %i\n",
- edge_freq,
- edge->frequency);
- fprintf (dump_file,
- "Orig bb: %i, orig bb freq %i, new bb freq %i\n",
- bb->index,
- bb->count.to_frequency (cfun),
- copy_basic_block->count.to_frequency (cfun));
- }
- }
+ edge->count = copy_basic_block->count;
}
break;
@@ -2106,15 +2077,10 @@ copy_bb (copy_body_data *id, basic_block bb,
if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES)
id->dst_node->create_edge_including_clones
(dest, orig_stmt, call_stmt, bb->count,
- compute_call_stmt_bb_frequency (id->dst_node->decl,
- copy_basic_block),
CIF_ORIGINALLY_INDIRECT_CALL);
else
id->dst_node->create_edge (dest, call_stmt,
- bb->count,
- compute_call_stmt_bb_frequency
- (id->dst_node->decl,
- copy_basic_block))->inline_failed
+ bb->count)->inline_failed
= CIF_ORIGINALLY_INDIRECT_CALL;
if (dump_file)
{
@@ -2691,6 +2657,8 @@ copy_cfg_body (copy_body_data * id, profile_count,
profile_count den = ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count;
profile_count num = entry_block_map->count;
+ profile_count::adjust_for_ipa_scaling (&num, &den);
+
cfun_to_copy = id->src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
/* Register specific tree functions. */
@@ -3912,7 +3880,7 @@ estimate_operator_cost (enum tree_code code, eni_weights *weights,
case REDUC_AND_EXPR:
case REDUC_IOR_EXPR:
case REDUC_XOR_EXPR:
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
case DOT_PROD_EXPR:
@@ -4482,7 +4450,6 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
{
cgraph_edge *edge;
tree virtual_offset = NULL;
- int freq = cg_edge->frequency;
profile_count count = cg_edge->count;
tree op;
gimple_stmt_iterator iter = gsi_for_stmt (stmt);
@@ -4492,9 +4459,7 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
gimple_uid (stmt),
profile_count::one (),
profile_count::one (),
- CGRAPH_FREQ_BASE,
true);
- edge->frequency = freq;
edge->count = count;
if (id->src_node->thunk.virtual_offset_p)
virtual_offset = size_int (id->src_node->thunk.virtual_value);
@@ -4712,11 +4677,12 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, "Inlining ");
- print_generic_expr (dump_file, id->src_fn);
- fprintf (dump_file, " to ");
- print_generic_expr (dump_file, id->dst_fn);
- fprintf (dump_file, " with frequency %i\n", cg_edge->frequency);
+ fprintf (dump_file, "Inlining %s to %s with frequency %4.2f\n",
+ xstrdup_for_dump (id->src_node->dump_name ()),
+ xstrdup_for_dump (id->dst_node->dump_name ()),
+ cg_edge->sreal_frequency ().to_double ());
+ id->src_node->dump (dump_file);
+ id->dst_node->dump (dump_file);
}
/* This is it. Duplicate the callee body. Assume callee is
@@ -5099,7 +5065,7 @@ optimize_inline_calls (tree fn)
}
/* Fold queued statements. */
- counts_to_freqs ();
+ update_max_bb_count ();
fold_marked_statements (last, id.statements_to_fold);
delete id.statements_to_fold;
@@ -6076,7 +6042,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
- counts_to_freqs ();
+ update_max_bb_count ();
fold_marked_statements (0, id.statements_to_fold);
delete id.statements_to_fold;
delete_unreachable_blocks_update_callgraph (&id);
@@ -6096,20 +6062,16 @@ tree_function_versioning (tree old_decl, tree new_decl,
struct cgraph_edge *e;
rebuild_frequencies ();
- new_version_node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ();
+ new_version_node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
for (e = new_version_node->callees; e; e = e->next_callee)
{
basic_block bb = gimple_bb (e->call_stmt);
- e->frequency = compute_call_stmt_bb_frequency (current_function_decl,
- bb);
- e->count = bb->count.ipa ();
+ e->count = bb->count;
}
for (e = new_version_node->indirect_calls; e; e = e->next_callee)
{
basic_block bb = gimple_bb (e->call_stmt);
- e->frequency = compute_call_stmt_bb_frequency (current_function_decl,
- bb);
- e->count = bb->count.ipa ();
+ e->count = bb->count;
}
}
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index 53b0a7e4440..df9b280a73e 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -2541,7 +2541,7 @@ valid_reduction_p (gimple *stmt)
allowed for in-order reductions. */
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vect_reduction_type reduc_type = STMT_VINFO_REDUC_TYPE (stmt_info);
- return reduc_type != STRICT_FP_REDUCTION;
+ return reduc_type != FOLD_LEFT_REDUCTION;
}
/* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index fbd0dbdf924..9b97c02e06d 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -370,6 +370,7 @@ extern gimple_opt_pass *make_pass_tree_loop_init (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lim (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_tree_unswitch (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_loop_split (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_predcom (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_predcom (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_iv_canon (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_scev_cprop (gcc::context *ctxt);
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 2cf47930dbe..73ebd951dba 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -234,7 +234,7 @@ along with GCC; see the file COPYING3. If not see
/* The maximum number of iterations between the considered memory
references. */
-#define MAX_DISTANCE (target_avail_regs < 16 ? 4 : 8)
+#define MAX_DISTANCE (target_avail_regs < 16 ? 3 : 7)
/* Data references (or phi nodes that carry data reference values across
loop iterations). */
@@ -379,6 +379,71 @@ static bitmap looparound_phis;
static hash_map<tree, name_expansion *> *name_expansions;
+/* True if we're running the early predcom pass and should only handle
+ cases that aid vectorization. Specifically this means that:
+
+ - only CT_INVARIANT and CT_STORE_LOAD chains are used
+ - the maximum distance for a CT_STORE_LOAD chain is 1 iteration,
+ and at that distance the store must come after the load
+ - there's no unrolling or detection of looparound phis.
+
+ The idea is handle inductions that go via memory, such as:
+
+ for (int i = 1; i < n; ++i)
+ x[i] = x[i - 1] + 1;
+
+ As it stands this loop could never be vectorized, because a loop body
+ that contains a read of x[j] followed by a write to x[j + 1] would
+ have its vectorization factor limited to 1. Transforming it to:
+
+ int tmp = x[0];
+ for (int i = 0; i < n; ++i)
+ {
+ tmp += 1;
+ x[i] = tmp:
+ }
+
+ exposes the fact that the stored value is a simple vectorizable
+ induction with start value x[0] and step 1.
+
+ [ Commoning is not always useful even in this situation. For example,
+ carrying over the value of x[i] won't help us to vectorize:
+
+ for (int i = 1; i < n; ++i)
+ {
+ y[i] = x[i - 1];
+ x[i] += i;
+ }
+
+ There's no real need to restrict things further though, because we're
+ unable to vectorize these load/store combinations in their current
+ form whatever happens. ]
+
+ We require the store to come after the load when the distance is 1
+ to avoid cases like:
+
+ for (int i = 1; i < n; ++i)
+ {
+ x[i] = ...;
+ ... = x[i - 1];
+ }
+
+ These accesses effectively have a distance somewhere between 1 and 2,
+ since after commoning the value stored in the previous iteration would
+ still be live at the next store. This means that the combination
+ isn't useful for exposing simple inductions.
+
+ Also, unlike the motivating case above, this combination does not
+ prevent vectorization. If a write to x[j + 1] comes before a read
+ of x[j], the vectorized write completes for all vector elements
+ before the read starts for any vector elements. */
+
+static bool only_simple_p;
+
+/* The maximum loop carry distance for this execution of the pass. */
+
+static int max_distance;
+
/* Dumps data reference REF to FILE. */
extern void dump_dref (FILE *, dref);
@@ -1024,6 +1089,17 @@ order_drefs (const void *a, const void *b)
return (*da)->pos - (*db)->pos;
}
+/* Compares two drefs A and B by their position. Callback for qsort. */
+
+static int
+order_drefs_by_pos (const void *a, const void *b)
+{
+ const dref *const da = (const dref *) a;
+ const dref *const db = (const dref *) b;
+
+ return (*da)->pos - (*db)->pos;
+}
+
/* Returns root of the CHAIN. */
static inline dref
@@ -1056,7 +1132,13 @@ add_ref_to_chain (chain_p chain, dref ref)
gcc_assert (wi::les_p (root->offset, ref->offset));
widest_int dist = ref->offset - root->offset;
- if (wi::leu_p (MAX_DISTANCE, dist))
+ /* When running before vectorization, only allow the maximum distance
+ if the consumer comes before the producer. See the comment above
+ ONLY_SIMPLE_P for details. */
+ if (wi::ltu_p (max_distance, dist)
+ || (only_simple_p
+ && wi::eq_p (max_distance, dist)
+ && root->pos < ref->pos))
{
free (ref);
return;
@@ -1296,7 +1378,9 @@ add_looparound_copies (struct loop *loop, chain_p chain)
dref ref, root = get_chain_root (chain);
gphi *phi;
- if (chain->type == CT_STORE_STORE)
+ /* There's no point doing this when running before vectorization,
+ since we won't unroll the loop or combine chains. */
+ if (only_simple_p || chain->type == CT_STORE_STORE)
return;
FOR_EACH_VEC_ELT (chain->refs, i, ref)
@@ -1335,14 +1419,21 @@ determine_roots_comp (struct loop *loop,
/* Trivial component. */
if (comp->refs.length () <= 1)
- return;
+ {
+ if (comp->refs.length () == 1)
+ {
+ free (comp->refs[0]);
+ comp->refs.truncate (0);
+ }
+ return;
+ }
comp->refs.qsort (order_drefs);
FOR_EACH_VEC_ELT (comp->refs, i, a)
{
if (!chain
|| (!comp->eliminate_store_p && DR_IS_WRITE (a->ref))
- || wi::leu_p (MAX_DISTANCE, a->offset - last_ofs))
+ || wi::ltu_p (max_distance, a->offset - last_ofs))
{
if (nontrivial_chain_p (chain))
{
@@ -1352,6 +1443,15 @@ determine_roots_comp (struct loop *loop,
else
release_chain (chain);
+ /* Only create CT_STORE_LOAD and CT_INVARIANT chains when
+ running before vectorization. */
+ if (only_simple_p && !DR_IS_WRITE (a->ref))
+ {
+ free (a);
+ chain = NULL;
+ continue;
+ }
+
if (DR_IS_READ (a->ref))
type = CT_LOAD;
else
@@ -2116,6 +2216,10 @@ determine_unroll_factor (vec<chain_p> chains)
unsigned factor = 1, af, nfactor, i;
unsigned max = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ /* Do not unroll when running before vectorization. */
+ if (only_simple_p)
+ return 1;
+
FOR_EACH_VEC_ELT (chains, i, chain)
{
if (chain->type == CT_INVARIANT)
@@ -2524,11 +2628,10 @@ remove_name_from_operation (gimple *stmt, tree op)
}
/* Reassociates the expression in that NAME1 and NAME2 are used so that they
- are combined in a single statement, and returns this statement. Note the
- statement is inserted before INSERT_BEFORE if it's not NULL. */
+ are combined in a single statement, and returns this statement. */
static gimple *
-reassociate_to_the_same_stmt (tree name1, tree name2, gimple *insert_before)
+reassociate_to_the_same_stmt (tree name1, tree name2)
{
gimple *stmt1, *stmt2, *root1, *root2, *s1, *s2;
gassign *new_stmt, *tmp_stmt;
@@ -2585,12 +2688,6 @@ reassociate_to_the_same_stmt (tree name1, tree name2, gimple *insert_before)
var = create_tmp_reg (type, "predreastmp");
new_name = make_ssa_name (var);
new_stmt = gimple_build_assign (new_name, code, name1, name2);
- if (insert_before && stmt_dominates_stmt_p (insert_before, s1))
- bsi = gsi_for_stmt (insert_before);
- else
- bsi = gsi_for_stmt (s1);
-
- gsi_insert_before (&bsi, new_stmt, GSI_SAME_STMT);
var = create_tmp_reg (type, "predreastmp");
tmp_name = make_ssa_name (var);
@@ -2607,6 +2704,7 @@ reassociate_to_the_same_stmt (tree name1, tree name2, gimple *insert_before)
s1 = gsi_stmt (bsi);
update_stmt (s1);
+ gsi_insert_before (&bsi, new_stmt, GSI_SAME_STMT);
gsi_insert_before (&bsi, tmp_stmt, GSI_SAME_STMT);
return new_stmt;
@@ -2615,11 +2713,10 @@ reassociate_to_the_same_stmt (tree name1, tree name2, gimple *insert_before)
/* Returns the statement that combines references R1 and R2. In case R1
and R2 are not used in the same statement, but they are used with an
associative and commutative operation in the same expression, reassociate
- the expression so that they are used in the same statement. The combined
- statement is inserted before INSERT_BEFORE if it's not NULL. */
+ the expression so that they are used in the same statement. */
static gimple *
-stmt_combining_refs (dref r1, dref r2, gimple *insert_before)
+stmt_combining_refs (dref r1, dref r2)
{
gimple *stmt1, *stmt2;
tree name1 = name_for_ref (r1);
@@ -2630,7 +2727,7 @@ stmt_combining_refs (dref r1, dref r2, gimple *insert_before)
if (stmt1 == stmt2)
return stmt1;
- return reassociate_to_the_same_stmt (name1, name2, insert_before);
+ return reassociate_to_the_same_stmt (name1, name2);
}
/* Tries to combine chains CH1 and CH2 together. If this succeeds, the
@@ -2643,8 +2740,7 @@ combine_chains (chain_p ch1, chain_p ch2)
enum tree_code op = ERROR_MARK;
bool swap = false;
chain_p new_chain;
- int i, j, num;
- gimple *root_stmt;
+ unsigned i;
tree rslt_type = NULL_TREE;
if (ch1 == ch2)
@@ -2665,9 +2761,6 @@ combine_chains (chain_p ch1, chain_p ch2)
return NULL;
}
- ch1->combined = true;
- ch2->combined = true;
-
if (swap)
std::swap (ch1, ch2);
@@ -2679,69 +2772,65 @@ combine_chains (chain_p ch1, chain_p ch2)
new_chain->rslt_type = rslt_type;
new_chain->length = ch1->length;
- gimple *insert = NULL;
- num = ch1->refs.length ();
- i = (new_chain->length == 0) ? num - 1 : 0;
- j = (new_chain->length == 0) ? -1 : 1;
- /* For ZERO length chain, process refs in reverse order so that dominant
- position is ready when it comes to the root ref.
- For non-ZERO length chain, process refs in order. See PR79663. */
- for (; num > 0; num--, i += j)
- {
- r1 = ch1->refs[i];
- r2 = ch2->refs[i];
+ for (i = 0; (ch1->refs.iterate (i, &r1)
+ && ch2->refs.iterate (i, &r2)); i++)
+ {
nw = XCNEW (struct dref_d);
+ nw->stmt = stmt_combining_refs (r1, r2);
nw->distance = r1->distance;
- /* For ZERO length chain, insert combined stmt of root ref at dominant
- position. */
- nw->stmt = stmt_combining_refs (r1, r2, i == 0 ? insert : NULL);
- /* For ZERO length chain, record dominant position where combined stmt
- of root ref should be inserted. In this case, though all root refs
- dominate following ones, it's possible that combined stmt doesn't.
- See PR70754. */
- if (new_chain->length == 0
- && (insert == NULL || stmt_dominates_stmt_p (nw->stmt, insert)))
- insert = nw->stmt;
-
new_chain->refs.safe_push (nw);
}
- if (new_chain->length == 0)
- {
- /* Restore the order for ZERO length chain's refs. */
- num = new_chain->refs.length () >> 1;
- for (i = 0, j = new_chain->refs.length () - 1; i < num; i++, j--)
- std::swap (new_chain->refs[i], new_chain->refs[j]);
- /* For ZERO length chain, has_max_use_after must be true since root
- combined stmt must dominates others. */
- new_chain->has_max_use_after = true;
- return new_chain;
- }
+ ch1->combined = true;
+ ch2->combined = true;
+ return new_chain;
+}
- new_chain->has_max_use_after = false;
- root_stmt = get_chain_root (new_chain)->stmt;
- for (i = 1; new_chain->refs.iterate (i, &nw); i++)
- {
- if (nw->distance == new_chain->length
- && !stmt_dominates_stmt_p (nw->stmt, root_stmt))
- {
- new_chain->has_max_use_after = true;
- break;
- }
- }
+/* Recursively update position information of all offspring chains to ROOT
+ chain's position information. */
- return new_chain;
+static void
+update_pos_for_combined_chains (chain_p root)
+{
+ chain_p ch1 = root->ch1, ch2 = root->ch2;
+ dref ref, ref1, ref2;
+ for (unsigned j = 0; (root->refs.iterate (j, &ref)
+ && ch1->refs.iterate (j, &ref1)
+ && ch2->refs.iterate (j, &ref2)); ++j)
+ ref1->pos = ref2->pos = ref->pos;
+
+ if (ch1->type == CT_COMBINATION)
+ update_pos_for_combined_chains (ch1);
+ if (ch2->type == CT_COMBINATION)
+ update_pos_for_combined_chains (ch2);
}
-/* Try to combine the CHAINS. */
+/* Returns true if statement S1 dominates statement S2. */
+
+static bool
+pcom_stmt_dominates_stmt_p (gimple *s1, gimple *s2)
+{
+ basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
+
+ if (!bb1 || s1 == s2)
+ return true;
+
+ if (bb1 == bb2)
+ return gimple_uid (s1) < gimple_uid (s2);
+
+ return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
+}
+
+/* Try to combine the CHAINS in LOOP. */
static void
-try_combine_chains (vec<chain_p> *chains)
+try_combine_chains (struct loop *loop, vec<chain_p> *chains)
{
unsigned i, j;
chain_p ch1, ch2, cch;
auto_vec<chain_p> worklist;
+ bool combined_p = false;
FOR_EACH_VEC_ELT (*chains, i, ch1)
if (chain_can_be_combined_p (ch1))
@@ -2763,6 +2852,78 @@ try_combine_chains (vec<chain_p> *chains)
{
worklist.safe_push (cch);
chains->safe_push (cch);
+ combined_p = true;
+ break;
+ }
+ }
+ }
+ if (!combined_p)
+ return;
+
+ /* Setup UID for all statements in dominance order. */
+ basic_block *bbs = get_loop_body (loop);
+ renumber_gimple_stmt_uids_in_blocks (bbs, loop->num_nodes);
+ free (bbs);
+
+ /* Re-association in combined chains may generate statements different to
+ order of references of the original chain. We need to keep references
+ of combined chain in dominance order so that all uses will be inserted
+ after definitions. Note:
+ A) This is necessary for all combined chains.
+ B) This is only necessary for ZERO distance references because other
+ references inherit value from loop carried PHIs.
+
+ We first update position information for all combined chains. */
+ dref ref;
+ for (i = 0; chains->iterate (i, &ch1); ++i)
+ {
+ if (ch1->type != CT_COMBINATION || ch1->combined)
+ continue;
+
+ for (j = 0; ch1->refs.iterate (j, &ref); ++j)
+ ref->pos = gimple_uid (ref->stmt);
+
+ update_pos_for_combined_chains (ch1);
+ }
+ /* Then sort references according to newly updated position information. */
+ for (i = 0; chains->iterate (i, &ch1); ++i)
+ {
+ if (ch1->type != CT_COMBINATION && !ch1->combined)
+ continue;
+
+ /* Find the first reference with non-ZERO distance. */
+ if (ch1->length == 0)
+ j = ch1->refs.length();
+ else
+ {
+ for (j = 0; ch1->refs.iterate (j, &ref); ++j)
+ if (ref->distance != 0)
+ break;
+ }
+
+ /* Sort all ZERO distance references by position. */
+ qsort (&ch1->refs[0], j, sizeof (ch1->refs[0]), order_drefs_by_pos);
+
+ if (ch1->combined)
+ continue;
+
+ /* For ZERO length chain, has_max_use_after must be true since root
+ combined stmt must dominates others. */
+ if (ch1->length == 0)
+ {
+ ch1->has_max_use_after = true;
+ continue;
+ }
+ /* Check if there is use at max distance after root for combined chains
+ and set flag accordingly. */
+ ch1->has_max_use_after = false;
+ gimple *root_stmt = get_chain_root (ch1)->stmt;
+ for (j = 1; ch1->refs.iterate (j, &ref); ++j)
+ {
+ if (ref->distance == ch1->length
+ && !pcom_stmt_dominates_stmt_p (ref->stmt, root_stmt))
+ {
+ ch1->has_max_use_after = true;
break;
}
}
@@ -3099,8 +3260,11 @@ tree_predictive_commoning_loop (struct loop *loop)
prepare_initializers (loop, chains);
loop_closed_ssa = prepare_finalizers (loop, chains);
- /* Try to combine the chains that are always worked with together. */
- try_combine_chains (&chains);
+ /* During the main pass, try to combine the chains that are always
+ worked with together. For the early pass it should be better
+ to leave this to the vectorizer. */
+ if (!only_simple_p)
+ try_combine_chains (loop, &chains);
insert_init_seqs (loop, chains);
@@ -3162,14 +3326,18 @@ end: ;
return (unroll ? 1 : 0) | (loop_closed_ssa ? 2 : 0);
}
-/* Runs predictive commoning. */
+/* Runs predictive commoning. EARLY_P is true if we are running before
+ vectorization. */
unsigned
-tree_predictive_commoning (void)
+tree_predictive_commoning (bool early_p)
{
struct loop *loop;
unsigned ret = 0, changed = 0;
+ only_simple_p = early_p;
+ max_distance = early_p ? 1 : MAX_DISTANCE;
+
initialize_original_copy_tables ();
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
if (optimize_loop_for_speed_p (loop))
@@ -3191,19 +3359,51 @@ tree_predictive_commoning (void)
return ret;
}
-/* Predictive commoning Pass. */
+/* Predictive commoning pass. EARLY_P is true if we are running before
+ vectorization. */
static unsigned
-run_tree_predictive_commoning (struct function *fun)
+run_tree_predictive_commoning (struct function *fun, bool early_p)
{
if (number_of_loops (fun) <= 1)
return 0;
- return tree_predictive_commoning ();
+ return tree_predictive_commoning (early_p);
}
namespace {
+const pass_data pass_data_early_predcom =
+{
+ GIMPLE_PASS, /* type */
+ "epcom", /* name */
+ OPTGROUP_LOOP, /* optinfo_flags */
+ TV_PREDCOM, /* tv_id */
+ PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa_only_virtuals, /* todo_flags_finish */
+};
+
+class pass_early_predcom : public gimple_opt_pass
+{
+public:
+ pass_early_predcom (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_early_predcom, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return flag_predictive_commoning && flag_tree_loop_vectorize;
+ }
+ virtual unsigned int execute (function *fun)
+ {
+ return run_tree_predictive_commoning (fun, true);
+ }
+}; // class pass_early_predcom
+
const pass_data pass_data_predcom =
{
GIMPLE_PASS, /* type */
@@ -3228,7 +3428,7 @@ public:
virtual bool gate (function *) { return flag_predictive_commoning != 0; }
virtual unsigned int execute (function *fun)
{
- return run_tree_predictive_commoning (fun);
+ return run_tree_predictive_commoning (fun, false);
}
}; // class pass_predcom
@@ -3236,9 +3436,13 @@ public:
} // anon namespace
gimple_opt_pass *
+make_pass_early_predcom (gcc::context *ctxt)
+{
+ return new pass_early_predcom (ctxt);
+}
+
+gimple_opt_pass *
make_pass_predcom (gcc::context *ctxt)
{
return new pass_predcom (ctxt);
}
-
-
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index a39cebff2b0..00a94c5f15c 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -2786,7 +2786,15 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
pp_string (pp, "OBJ_TYPE_REF(");
dump_generic_node (pp, OBJ_TYPE_REF_EXPR (node), spc, flags, false);
pp_semicolon (pp);
- if (!(flags & TDF_SLIM) && virtual_method_call_p (node))
+ /* We omit the class type for -fcompare-debug because we may
+ drop TYPE_BINFO early depending on debug info, and then
+ virtual_method_call_p would return false, whereas when
+ TYPE_BINFO is preserved it may still return true and then
+ we'd print the class type. Compare tree and rtl dumps for
+ libstdc++-prettyprinters/shared_ptr.cc with and without -g,
+ for example, at occurrences of OBJ_TYPE_REF. */
+ if (!(flags & (TDF_SLIM | TDF_COMPARE_DEBUG))
+ && virtual_method_call_p (node))
{
pp_string (pp, "(");
dump_generic_node (pp, obj_type_ref_class (node), spc, flags, false);
@@ -3214,7 +3222,7 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
break;
case VEC_SERIES_EXPR:
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
@@ -3610,7 +3618,7 @@ op_code_prio (enum tree_code code)
case REDUC_MAX_EXPR:
case REDUC_MIN_EXPR:
case REDUC_PLUS_EXPR:
- case STRICT_REDUC_PLUS_EXPR:
+ case FOLD_LEFT_PLUS_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
@@ -3732,8 +3740,8 @@ op_symbol_code (enum tree_code code)
case REDUC_PLUS_EXPR:
return "r+";
- case STRICT_REDUC_PLUS_EXPR:
- return "strictr+";
+ case FOLD_LEFT_PLUS_EXPR:
+ return "fl+";
case WIDEN_SUM_EXPR:
return "w+";
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 61653afaab7..705fb7be9e4 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -97,6 +97,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "symbol-summary.h"
+#include "ipa-param-manipulation.h"
#include "ipa-prop.h"
#include "params.h"
#include "dbgcnt.h"
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index b9f25a3ad78..2a95d7fc1f0 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -635,15 +635,11 @@ tree
ao_ref_base (ao_ref *ref)
{
bool reverse;
- poly_int64 offset, size, max_size;
if (ref->base)
return ref->base;
- ref->base = get_ref_base_and_extent (ref->ref, &offset, &size,
- &max_size, &reverse);
- ref->offset = offset;
- ref->size = size;
- ref->max_size = max_size;
+ ref->base = get_ref_base_and_extent (ref->ref, &ref->offset, &ref->size,
+ &ref->max_size, &reverse);
return ref->base;
}
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 057d51dcf37..68d3c3df8f7 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -164,7 +164,8 @@ coalesce_cost (int frequency, bool optimize_for_size)
static inline int
coalesce_cost_bb (basic_block bb)
{
- return coalesce_cost (bb->count.to_frequency (cfun), optimize_bb_for_size_p (bb));
+ return coalesce_cost (bb->count.to_frequency (cfun),
+ optimize_bb_for_size_p (bb));
}
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 5103d12cf87..59393d190ef 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1780,7 +1780,6 @@ execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
- int freq_sum = 0;
profile_count count_sum = profile_count::zero ();
int nbbs = 0, ncount = 0;
profile_probability flag_probability = profile_probability::uninitialized ();
@@ -1802,7 +1801,6 @@ execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
it != flag_bbs->end (); ++it)
{
- freq_sum += (*it)->count.to_frequency (cfun);
if ((*it)->count.initialized_p ())
count_sum += (*it)->count, ncount ++;
if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index e550c850e25..224732e7059 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1525,14 +1525,14 @@ find_induction_variables (struct ivopts_data *data)
static struct iv_use *
record_use (struct iv_group *group, tree *use_p, struct iv *iv,
- gimple *stmt, enum use_type use_type, tree mem_type,
+ gimple *stmt, enum use_type type, tree mem_type,
tree addr_base, poly_uint64 addr_offset)
{
struct iv_use *use = XCNEW (struct iv_use);
use->id = group->vuses.length ();
use->group_id = group->id;
- use->type = use_type;
+ use->type = type;
use->mem_type = mem_type;
use->iv = iv;
use->stmt = stmt;
@@ -1588,11 +1588,13 @@ record_group (struct ivopts_data *data, enum use_type type)
}
/* Record a use of TYPE at *USE_P in STMT whose value is IV in a group.
- New group will be created if there is no existing group for the use. */
+ New group will be created if there is no existing group for the use.
+ MEM_TYPE is the type of memory being addressed, or NULL if this
+ isn't an address reference. */
static struct iv_use *
record_group_use (struct ivopts_data *data, tree *use_p,
- struct iv *iv, gimple *stmt, enum use_type use_type,
+ struct iv *iv, gimple *stmt, enum use_type type,
tree mem_type)
{
tree addr_base = NULL;
@@ -1600,7 +1602,7 @@ record_group_use (struct ivopts_data *data, tree *use_p,
poly_uint64 addr_offset = 0;
/* Record non address type use in a new group. */
- if (address_p (use_type) && iv->base_object)
+ if (address_p (type) && iv->base_object)
{
unsigned int i;
@@ -1625,9 +1627,9 @@ record_group_use (struct ivopts_data *data, tree *use_p,
}
if (!group)
- group = record_group (data, use_type);
+ group = record_group (data, type);
- return record_use (group, use_p, iv, stmt, use_type, mem_type,
+ return record_use (group, use_p, iv, stmt, type, mem_type,
addr_base, addr_offset);
}
diff --git a/gcc/tree-ssa-phiprop.c b/gcc/tree-ssa-phiprop.c
index 7dcb9ee49a4..494158be0d1 100644
--- a/gcc/tree-ssa-phiprop.c
+++ b/gcc/tree-ssa-phiprop.c
@@ -354,7 +354,7 @@ propagate_with_phi (basic_block bb, gphi *phi, struct phiprop_d *phivn,
/* Found a proper dereference with an aggregate copy. Just
insert aggregate copies on the edges instead. */
- if (!is_gimple_reg_type (TREE_TYPE (TREE_TYPE (ptr))))
+ if (!is_gimple_reg_type (TREE_TYPE (gimple_assign_lhs (use_stmt))))
{
if (!gimple_vdef (use_stmt))
goto next;
diff --git a/gcc/tree-ssa-sink.c b/gcc/tree-ssa-sink.c
index 1c5d7dd7556..5d801d35921 100644
--- a/gcc/tree-ssa-sink.c
+++ b/gcc/tree-ssa-sink.c
@@ -226,8 +226,10 @@ select_best_block (basic_block early_bb,
/* If BEST_BB is at the same nesting level, then require it to have
significantly lower execution frequency to avoid gratutious movement. */
if (bb_loop_depth (best_bb) == bb_loop_depth (early_bb)
- && best_bb->count.to_frequency (cfun)
- < (early_bb->count.to_frequency (cfun) * threshold / 100.0))
+ /* If result of comparsion is unknown, preffer EARLY_BB.
+ Thus use !(...>=..) rather than (...<...) */
+ && !(best_bb->count.apply_scale (100, 1)
+ > (early_bb->count.apply_scale (threshold, 1))))
return best_bb;
/* No better block found, so return EARLY_BB, which happens to be the
diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index 5922616d2cc..db272aa1acb 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -40,12 +40,17 @@ along with GCC; see the file COPYING3. If not see
#include "expr.h"
#include "tree-dfa.h"
#include "domwalk.h"
+#include "tree-ssa-alias.h"
#include "tree-ssa-propagate.h"
#include "params.h"
#include "ipa-chkp.h"
#include "tree-hash-traits.h"
#include "builtins.h"
#include "target.h"
+#include "diagnostic-core.h"
+#include "diagnostic.h"
+#include "intl.h"
+#include "attribs.h"
/* A vector indexed by SSA_NAME_VERSION. 0 means unknown, positive value
is an index into strinfo vector, negative value stands for
@@ -147,6 +152,9 @@ struct decl_stridxlist_map
mappings. */
static hash_map<tree_decl_hash, stridxlist> *decl_to_stridxlist_htab;
+typedef std::pair<int, location_t> stridx_strlenloc;
+static hash_map<tree, stridx_strlenloc> strlen_to_stridx;
+
/* Obstack for struct stridxlist and struct decl_stridxlist_map. */
static struct obstack stridx_obstack;
@@ -1200,6 +1208,9 @@ handle_builtin_strlen (gimple_stmt_iterator *gsi)
si->nonzero_chars = lhs;
gcc_assert (si->full_string_p);
}
+
+ location_t loc = gimple_location (stmt);
+ strlen_to_stridx.put (lhs, stridx_strlenloc (idx, loc));
return;
}
}
@@ -1243,6 +1254,9 @@ handle_builtin_strlen (gimple_stmt_iterator *gsi)
strinfo *si = new_strinfo (src, idx, lhs, true);
set_strinfo (idx, si);
find_equal_ptrs (src, idx);
+
+ location_t loc = gimple_location (stmt);
+ strlen_to_stridx.put (lhs, stridx_strlenloc (idx, loc));
}
}
@@ -1609,6 +1623,368 @@ handle_builtin_strcpy (enum built_in_function bcode, gimple_stmt_iterator *gsi)
fprintf (dump_file, "not possible.\n");
}
+/* Return true if LEN depends on a call to strlen(SRC) in an interesting
+ way. LEN can either be an integer expression, or a pointer (to char).
+ When it is the latter (such as in recursive calls to self) is is
+ assumed to be the argument in some call to strlen() whose relationship
+ to SRC is being ascertained. */
+
+static bool
+is_strlen_related_p (tree src, tree len)
+{
+ if (TREE_CODE (TREE_TYPE (len)) == POINTER_TYPE
+ && operand_equal_p (src, len, 0))
+ return true;
+
+ if (TREE_CODE (len) != SSA_NAME)
+ return false;
+
+ gimple *def_stmt = SSA_NAME_DEF_STMT (len);
+ if (!def_stmt)
+ return false;
+
+ if (is_gimple_call (def_stmt))
+ {
+ tree func = gimple_call_fndecl (def_stmt);
+ if (!valid_builtin_call (def_stmt)
+ || DECL_FUNCTION_CODE (func) != BUILT_IN_STRLEN)
+ return false;
+
+ tree arg = gimple_call_arg (def_stmt, 0);
+ return is_strlen_related_p (src, arg);
+ }
+
+ if (!is_gimple_assign (def_stmt))
+ return false;
+
+ tree_code code = gimple_assign_rhs_code (def_stmt);
+ tree rhs1 = gimple_assign_rhs1 (def_stmt);
+ tree rhstype = TREE_TYPE (rhs1);
+
+ if ((TREE_CODE (rhstype) == POINTER_TYPE && code == POINTER_PLUS_EXPR)
+ || (INTEGRAL_TYPE_P (rhstype)
+ && (code == BIT_AND_EXPR
+ || code == NOP_EXPR)))
+ {
+ /* Pointer plus (an integer) and integer cast or truncation are
+ considered among the (potentially) related expressions to strlen.
+ Others are not. */
+ return is_strlen_related_p (src, rhs1);
+ }
+
+ return false;
+}
+
+/* A helper of handle_builtin_stxncpy. Check to see if the specified
+ bound is a) equal to the size of the destination DST and if so, b)
+ if it's immediately followed by DST[CNT - 1] = '\0'. If a) holds
+ and b) does not, warn. Otherwise, do nothing. Return true if
+ diagnostic has been issued.
+
+ The purpose is to diagnose calls to strncpy and stpncpy that do
+ not nul-terminate the copy while allowing for the idiom where
+ such a call is immediately followed by setting the last element
+ to nul, as in:
+ char a[32];
+ strncpy (a, s, sizeof a);
+ a[sizeof a - 1] = '\0';
+*/
+
+static bool
+maybe_diag_stxncpy_trunc (gimple_stmt_iterator gsi, tree src, tree cnt)
+{
+ if (!warn_stringop_truncation)
+ return false;
+
+ gimple *stmt = gsi_stmt (gsi);
+
+ wide_int cntrange[2];
+
+ if (TREE_CODE (cnt) == INTEGER_CST)
+ cntrange[0] = cntrange[1] = wi::to_wide (cnt);
+ else if (TREE_CODE (cnt) == SSA_NAME)
+ {
+ enum value_range_type rng = get_range_info (cnt, cntrange, cntrange + 1);
+ if (rng == VR_RANGE)
+ ;
+ else if (rng == VR_ANTI_RANGE)
+ {
+ wide_int maxobjsize = wi::to_wide (TYPE_MAX_VALUE (ptrdiff_type_node));
+
+ if (wi::ltu_p (cntrange[1], maxobjsize))
+ {
+ cntrange[0] = cntrange[1] + 1;
+ cntrange[1] = maxobjsize;
+ }
+ else
+ {
+ cntrange[1] = cntrange[0] - 1;
+ cntrange[0] = wi::zero (TYPE_PRECISION (TREE_TYPE (cnt)));
+ }
+ }
+ else
+ return false;
+ }
+ else
+ return false;
+
+ /* Negative value is the constant string length. If it's less than
+ the lower bound there is no truncation. */
+ int sidx = get_stridx (src);
+ if (sidx < 0 && wi::gtu_p (cntrange[0], ~sidx))
+ return false;
+
+ tree dst = gimple_call_arg (stmt, 0);
+
+ /* See if the destination is declared with attribute "nonstring"
+ and if so, avoid the truncation warning. */
+ if (TREE_CODE (dst) == SSA_NAME)
+ {
+ if (SSA_NAME_IS_DEFAULT_DEF (dst))
+ dst = SSA_NAME_VAR (dst);
+ else
+ {
+ gimple *def = SSA_NAME_DEF_STMT (dst);
+
+ if (is_gimple_assign (def)
+ && gimple_assign_rhs_code (def) == ADDR_EXPR)
+ dst = gimple_assign_rhs1 (def);
+ }
+ }
+
+ tree dstdecl = dst;
+ if (TREE_CODE (dstdecl) == ADDR_EXPR)
+ dstdecl = TREE_OPERAND (dstdecl, 0);
+
+ {
+ tree d = dstdecl;
+ if (TREE_CODE (d) == COMPONENT_REF)
+ d = TREE_OPERAND (d, 1);
+
+ if (DECL_P (d) && lookup_attribute ("nonstring", DECL_ATTRIBUTES (d)))
+ return false;
+ }
+
+ /* Look for dst[i] = '\0'; after the stxncpy() call and if found
+ avoid the truncation warning. */
+ gsi_next (&gsi);
+ gimple *next_stmt = gsi_stmt (gsi);
+
+ if (!gsi_end_p (gsi) && is_gimple_assign (next_stmt))
+ {
+ poly_int64 off;
+ dstdecl = get_addr_base_and_unit_offset (dstdecl, &off);
+
+ tree lhs = gimple_assign_lhs (next_stmt);
+ tree lhsbase = get_addr_base_and_unit_offset (lhs, &off);
+ if (lhsbase && operand_equal_p (dstdecl, lhsbase, 0))
+ return false;
+ }
+
+ int prec = TYPE_PRECISION (TREE_TYPE (cnt));
+ wide_int lenrange[2];
+ if (strinfo *sisrc = sidx > 0 ? get_strinfo (sidx) : NULL)
+ {
+ lenrange[0] = (sisrc->nonzero_chars
+ && TREE_CODE (sisrc->nonzero_chars) == INTEGER_CST
+ ? wi::to_wide (sisrc->nonzero_chars)
+ : wi::zero (prec));
+ lenrange[1] = lenrange[0];
+ }
+ else if (sidx < 0)
+ lenrange[0] = lenrange[1] = wi::shwi (~sidx, prec);
+ else
+ {
+ tree range[2];
+ get_range_strlen (src, range);
+ if (range[0])
+ {
+ lenrange[0] = wi::to_wide (range[0], prec);
+ lenrange[1] = wi::to_wide (range[1], prec);
+ }
+ else
+ {
+ lenrange[0] = wi::shwi (0, prec);
+ lenrange[1] = wi::shwi (-1, prec);
+ }
+ }
+
+ location_t callloc = gimple_location (stmt);
+ tree func = gimple_call_fndecl (stmt);
+
+ if (lenrange[0] != 0 || !wi::neg_p (lenrange[1]))
+ {
+ /* If the longest source string is shorter than the lower bound
+ of the specified count the copy is definitely nul-terminated. */
+ if (wi::ltu_p (lenrange[1], cntrange[0]))
+ return false;
+
+ if (wi::neg_p (lenrange[1]))
+ {
+ /* The length of one of the strings is unknown but at least
+ one has non-zero length and that length is stored in
+ LENRANGE[1]. Swap the bounds to force a "may be truncated"
+ warning below. */
+ lenrange[1] = lenrange[0];
+ lenrange[0] = wi::shwi (0, prec);
+ }
+
+ if (wi::geu_p (lenrange[0], cntrange[1]))
+ {
+ /* The shortest string is longer than the upper bound of
+ the count so the truncation is certain. */
+ if (cntrange[0] == cntrange[1])
+ return warning_at (callloc, OPT_Wstringop_truncation,
+ integer_onep (cnt)
+ ? G_("%qD output truncated copying %E byte "
+ "from a string of length %wu")
+ : G_("%qD output truncated copying %E bytes "
+ "from a string of length %wu"),
+ func, cnt, lenrange[0].to_uhwi ());
+
+ return warning_at (callloc, OPT_Wstringop_truncation,
+ "%qD output truncated copying between %wu "
+ "and %wu bytes from a string of length %wu",
+ func, cntrange[0].to_uhwi (),
+ cntrange[1].to_uhwi (), lenrange[0].to_uhwi ());
+ }
+ else if (wi::geu_p (lenrange[1], cntrange[1]))
+ {
+ /* The longest string is longer than the upper bound of
+ the count so the truncation is possible. */
+ if (cntrange[0] == cntrange[1])
+ return warning_at (callloc, OPT_Wstringop_truncation,
+ integer_onep (cnt)
+ ? G_("%qD output may be truncated copying %E "
+ "byte from a string of length %wu")
+ : G_("%qD output may be truncated copying %E "
+ "bytes from a string of length %wu"),
+ func, cnt, lenrange[1].to_uhwi ());
+
+ return warning_at (callloc, OPT_Wstringop_truncation,
+ "%qD output may be truncated copying between %wu "
+ "and %wu bytes from a string of length %wu",
+ func, cntrange[0].to_uhwi (),
+ cntrange[1].to_uhwi (), lenrange[1].to_uhwi ());
+ }
+
+ if (cntrange[0] != cntrange[1]
+ && wi::leu_p (cntrange[0], lenrange[0])
+ && wi::leu_p (cntrange[1], lenrange[0] + 1))
+ {
+ /* If the source (including the terminating nul) is longer than
+ the lower bound of the specified count but shorter than the
+ upper bound the copy may (but need not) be truncated. */
+ return warning_at (callloc, OPT_Wstringop_truncation,
+ "%qD output may be truncated copying between %wu "
+ "and %wu bytes from a string of length %wu",
+ func, cntrange[0].to_uhwi (),
+ cntrange[1].to_uhwi (), lenrange[0].to_uhwi ());
+ }
+ }
+
+ if (tree dstsize = compute_objsize (dst, 1))
+ {
+ /* The source length is uknown. Try to determine the destination
+ size and see if it matches the specified bound. If not, bail.
+ Otherwise go on to see if it should be diagnosed for possible
+ truncation. */
+ if (!dstsize)
+ return false;
+
+ if (wi::to_wide (dstsize) != cntrange[1])
+ return false;
+
+ if (cntrange[0] == cntrange[1])
+ return warning_at (callloc, OPT_Wstringop_truncation,
+ "%qD specified bound %E equals destination size",
+ func, cnt);
+ }
+
+ return false;
+}
+
+/* Check the size argument to the built-in forms of stpncpy and strncpy
+ to see if it's derived from calling strlen() on the source argument
+ and if so, issue a warning. */
+
+static void
+handle_builtin_stxncpy (built_in_function, gimple_stmt_iterator *gsi)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+
+ bool with_bounds = gimple_call_with_bounds_p (stmt);
+
+ tree src = gimple_call_arg (stmt, with_bounds ? 2 : 1);
+ tree len = gimple_call_arg (stmt, with_bounds ? 3 : 2);
+
+ /* If the length argument was computed from strlen(S) for some string
+ S retrieve the strinfo index for the string (PSS->FIRST) alonng with
+ the location of the strlen() call (PSS->SECOND). */
+ stridx_strlenloc *pss = strlen_to_stridx.get (len);
+ if (!pss || pss->first <= 0)
+ {
+ if (maybe_diag_stxncpy_trunc (*gsi, src, len))
+ gimple_set_no_warning (stmt, true);
+
+ return;
+ }
+
+ int sidx = get_stridx (src);
+ strinfo *sisrc = sidx > 0 ? get_strinfo (sidx) : NULL;
+
+ /* Strncpy() et al. cannot modify the source string. Prevent the rest
+ of the pass from invalidating the strinfo data. */
+ if (sisrc)
+ sisrc->dont_invalidate = true;
+
+ /* Retrieve the strinfo data for the string S that LEN was computed
+ from as some function F of strlen (S) (i.e., LEN need not be equal
+ to strlen(S)). */
+ strinfo *silen = get_strinfo (pss->first);
+
+ location_t callloc = gimple_location (stmt);
+
+ tree func = gimple_call_fndecl (stmt);
+
+ bool warned = false;
+
+ /* When -Wstringop-truncation is set, try to determine truncation
+ before diagnosing possible overflow. Truncation is implied by
+ the LEN argument being equal to strlen(SRC), regardless of
+ whether its value is known. Otherwise, issue the more generic
+ -Wstringop-overflow which triggers for LEN arguments that in
+ any meaningful way depend on strlen(SRC). */
+ if (warn_stringop_truncation
+ && sisrc == silen
+ && is_strlen_related_p (src, len))
+ warned = warning_at (callloc, OPT_Wstringop_truncation,
+ "%qD output truncated before terminating nul "
+ "copying as many bytes from a string as its length",
+ func);
+ else if (silen && is_strlen_related_p (src, silen->ptr))
+ warned = warning_at (callloc, OPT_Wstringop_overflow_,
+ "%qD specified bound depends on the length "
+ "of the source argument", func);
+ if (warned)
+ {
+ location_t strlenloc = pss->second;
+ if (strlenloc != UNKNOWN_LOCATION && strlenloc != callloc)
+ inform (strlenloc, "length computed here");
+ }
+}
+
+/* Check the size argument to the built-in forms of strncat to see if
+ it's derived from calling strlen() on the source argument and if so,
+ issue a warning. */
+
+static void
+handle_builtin_strncat (built_in_function bcode, gimple_stmt_iterator *gsi)
+{
+ /* Same as stxncpy(). */
+ handle_builtin_stxncpy (bcode, gsi);
+}
+
/* Handle a memcpy-like ({mem{,p}cpy,__mem{,p}cpy_chk}) call.
If strlen of the second argument is known and length of the third argument
is that plus one, strlen of the first argument is the same after this
@@ -2515,6 +2891,19 @@ strlen_optimize_stmt (gimple_stmt_iterator *gsi)
case BUILT_IN_STPCPY_CHK_CHKP:
handle_builtin_strcpy (DECL_FUNCTION_CODE (callee), gsi);
break;
+
+ case BUILT_IN_STRNCAT:
+ case BUILT_IN_STRNCAT_CHK:
+ handle_builtin_strncat (DECL_FUNCTION_CODE (callee), gsi);
+ break;
+
+ case BUILT_IN_STPNCPY:
+ case BUILT_IN_STPNCPY_CHK:
+ case BUILT_IN_STRNCPY:
+ case BUILT_IN_STRNCPY_CHK:
+ handle_builtin_stxncpy (DECL_FUNCTION_CODE (callee), gsi);
+ break;
+
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMCPY_CHK:
case BUILT_IN_MEMPCPY:
@@ -2578,6 +2967,10 @@ strlen_optimize_stmt (gimple_stmt_iterator *gsi)
else if (code == EQ_EXPR || code == NE_EXPR)
fold_strstr_to_strncmp (gimple_assign_rhs1 (stmt),
gimple_assign_rhs2 (stmt), stmt);
+
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ if (stridx_strlenloc *ps = strlen_to_stridx.get (rhs1))
+ strlen_to_stridx.put (lhs, stridx_strlenloc (*ps));
}
else if (TREE_CODE (lhs) != SSA_NAME && !TREE_SIDE_EFFECTS (lhs))
{
@@ -2829,6 +3222,8 @@ pass_strlen::execute (function *fun)
laststmt.len = NULL_TREE;
laststmt.stridx = 0;
+ strlen_to_stridx.empty ();
+
return 0;
}
diff --git a/gcc/tree-ssa-tail-merge.c b/gcc/tree-ssa-tail-merge.c
index 97e90233d58..e90283b9cc8 100644
--- a/gcc/tree-ssa-tail-merge.c
+++ b/gcc/tree-ssa-tail-merge.c
@@ -1556,52 +1556,33 @@ replace_block_by (basic_block bb1, basic_block bb2)
pred_edge, UNKNOWN_LOCATION);
}
- bb2->count += bb1->count;
- /* FIXME: Fix merging of probabilities. They need to be redistributed
- according to the relative counts of merged BBs. */
-#if 0
/* Merge the outgoing edge counts from bb1 onto bb2. */
- profile_count out_sum = profile_count::zero ();
- int out_freq_sum = 0;
edge e1, e2;
+ edge_iterator ei;
- /* Recompute the edge probabilities from the new merged edge count.
- Use the sum of the new merged edge counts computed above instead
- of bb2's merged count, in case there are profile count insanities
- making the bb count inconsistent with the edge weights. */
- FOR_EACH_EDGE (e1, ei, bb1->succs)
- {
- if (e1->count ().initialized_p ())
- out_sum += e1->count ();
- out_freq_sum += EDGE_FREQUENCY (e1);
- }
- FOR_EACH_EDGE (e1, ei, bb2->succs)
- {
- if (e1->count ().initialized_p ())
- out_sum += e1->count ();
- out_freq_sum += EDGE_FREQUENCY (e1);
- }
- FOR_EACH_EDGE (e1, ei, bb1->succs)
- {
- e2 = find_edge (bb2, e1->dest);
- gcc_assert (e2);
- if (out_sum > 0 && e2->count ().initialized_p ())
- {
- e2->probability = e2->count ().probability_in (bb2->count);
- }
- else if (bb1->count.to_frequency (cfun) && bb2->count.to_frequency (cfun))
- e2->probability = e1->probability;
- else if (bb2->count.to_frequency (cfun) && !bb1->count.to_frequency (cfun))
- ;
- else if (out_freq_sum)
- e2->probability = profile_probability::from_reg_br_prob_base
- (GCOV_COMPUTE_SCALE (EDGE_FREQUENCY (e1)
- + EDGE_FREQUENCY (e2),
- out_freq_sum));
- out_sum += e2->count ();
- }
-#endif
+ if (bb2->count.initialized_p ())
+ FOR_EACH_EDGE (e1, ei, bb1->succs)
+ {
+ e2 = find_edge (bb2, e1->dest);
+ gcc_assert (e2);
+
+ /* If probabilities are same, we are done.
+ If counts are nonzero we can distribute accordingly. In remaining
+ cases just avreage the values and hope for the best. */
+ if (e1->probability == e2->probability)
+ ;
+ else if (bb1->count.nonzero_p () || bb2->count.nonzero_p ())
+ e2->probability
+ = e2->probability
+ * bb2->count.probability_in (bb1->count + bb2->count)
+ + e1->probability
+ * bb1->count.probability_in (bb1->count + bb2->count);
+ else
+ e2->probability = e2->probability * profile_probability::even ()
+ + e1->probability * profile_probability::even ();
+ }
+ bb2->count += bb1->count;
/* Move over any user labels from bb1 after the bb2 labels. */
gimple_stmt_iterator gsi1 = gsi_start_bb (bb1);
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 1dab0f1fab4..045905eceb7 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -691,8 +691,7 @@ static bool
compute_path_counts (struct redirection_data *rd,
ssa_local_info_t *local_info,
profile_count *path_in_count_ptr,
- profile_count *path_out_count_ptr,
- int *path_in_freq_ptr)
+ profile_count *path_out_count_ptr)
{
edge e = rd->incoming_edges->e;
vec<jump_thread_edge *> *path = THREAD_PATH (e);
@@ -700,7 +699,6 @@ compute_path_counts (struct redirection_data *rd,
profile_count nonpath_count = profile_count::zero ();
bool has_joiner = false;
profile_count path_in_count = profile_count::zero ();
- int path_in_freq = 0;
/* Start by accumulating incoming edge counts to the path's first bb
into a couple buckets:
@@ -740,7 +738,6 @@ compute_path_counts (struct redirection_data *rd,
source block. */
gcc_assert (ein_path->last ()->e == elast);
path_in_count += ein->count ();
- path_in_freq += EDGE_FREQUENCY (ein);
}
else if (!ein_path)
{
@@ -751,10 +748,6 @@ compute_path_counts (struct redirection_data *rd,
}
}
- /* This is needed due to insane incoming frequencies. */
- if (path_in_freq > BB_FREQ_MAX)
- path_in_freq = BB_FREQ_MAX;
-
/* Now compute the fraction of the total count coming into the first
path bb that is from the current threading path. */
profile_count total_count = e->dest->count;
@@ -843,7 +836,6 @@ compute_path_counts (struct redirection_data *rd,
*path_in_count_ptr = path_in_count;
*path_out_count_ptr = path_out_count;
- *path_in_freq_ptr = path_in_freq;
return has_joiner;
}
@@ -954,7 +946,6 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
edge elast = path->last ()->e;
profile_count path_in_count = profile_count::zero ();
profile_count path_out_count = profile_count::zero ();
- int path_in_freq = 0;
/* First determine how much profile count to move from original
path to the duplicate path. This is tricky in the presence of
@@ -963,8 +954,7 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
non-joiner case the path_in_count and path_out_count should be the
same. */
bool has_joiner = compute_path_counts (rd, local_info,
- &path_in_count, &path_out_count,
- &path_in_freq);
+ &path_in_count, &path_out_count);
for (unsigned int count = 0, i = 1; i < path->length (); i++)
{
@@ -2184,7 +2174,6 @@ thread_through_all_blocks (bool may_peel_loop_headers)
{
bool retval = false;
unsigned int i;
- bitmap_iterator bi;
struct loop *loop;
auto_bitmap threaded_blocks;
@@ -2288,14 +2277,33 @@ thread_through_all_blocks (bool may_peel_loop_headers)
initialize_original_copy_tables ();
- /* First perform the threading requests that do not affect
- loop structure. */
- EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
- {
- basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+ /* The order in which we process jump threads can be important.
+
+ Consider if we have two jump threading paths A and B. If the
+ target edge of A is the starting edge of B and we thread path A
+ first, then we create an additional incoming edge into B->dest that
+ we can not discover as a jump threading path on this iteration.
+
+ If we instead thread B first, then the edge into B->dest will have
+ already been redirected before we process path A and path A will
+ natually, with no further work, target the redirected path for B.
- if (EDGE_COUNT (bb->preds) > 0)
- retval |= thread_block (bb, true);
+ An post-order is sufficient here. Compute the ordering first, then
+ process the blocks. */
+ if (!bitmap_empty_p (threaded_blocks))
+ {
+ int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ unsigned int postorder_num = post_order_compute (postorder, false, false);
+ for (unsigned int i = 0; i < postorder_num; i++)
+ {
+ unsigned int indx = postorder[i];
+ if (bitmap_bit_p (threaded_blocks, indx))
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, indx);
+ retval |= thread_block (bb, true);
+ }
+ }
+ free (postorder);
}
/* Then perform the threading through loop headers. We start with the
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 446a3917238..694cec6b0e6 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -169,11 +169,10 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
return true;
}
-/* If VALUE is not an integer, record that loop LOOP_VINFO needs to
- check that VALUE is nonzero. */
+/* Record that loop LOOP_VINFO needs to check that VALUE is nonzero. */
static void
-check_nonzero_value (loop_vec_info loop_vinfo, tree value)
+vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
{
vec<tree> checks = LOOP_VINFO_CHECK_NONZERO (loop_vinfo);
for (unsigned int i = 0; i < checks.length(); ++i)
@@ -199,16 +198,12 @@ vect_preserves_scalar_order_p (gimple *stmt_a, gimple *stmt_b)
stmt_vec_info stmtinfo_a = vinfo_for_stmt (stmt_a);
stmt_vec_info stmtinfo_b = vinfo_for_stmt (stmt_b);
- /* Check whether all statements grouped with STMT_A come after
- all statements grouped with STMT_B. In this case vectorized
- STMT_A will come after vectorized STMT_B. */
+ /* Check whether the groups that contain the statements overlap. */
if (vect_group_first_uid (stmtinfo_a)
- >= vect_group_last_uid (stmtinfo_b))
+ > vect_group_last_uid (stmtinfo_b))
return true;
-
- /* Likewise with the roles reversed. */
if (vect_group_first_uid (stmtinfo_b)
- >= vect_group_last_uid (stmtinfo_a))
+ > vect_group_last_uid (stmtinfo_a))
return true;
/* STMT_A and STMT_B belong to overlapping groups. All loads in a
@@ -478,7 +473,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
{
tree indicator = dr_zero_step_indicator (dra);
if (TREE_CODE (indicator) != INTEGER_CST)
- check_nonzero_value (loop_vinfo, indicator);
+ vect_check_nonzero_value (loop_vinfo, indicator);
else if (integer_zerop (indicator))
{
if (dump_enabled_p ())
@@ -2643,7 +2638,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
groupsize = 0;
/* Not consecutive access is possible only if it is a part of interleaving. */
- if (!GROUP_FIRST_ELEMENT (stmt_info))
+ if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
@@ -2654,9 +2649,8 @@ vect_analyze_group_access_1 (struct data_reference *dr)
&& (dr_step % type_size) == 0
&& groupsize > 0)
{
- GROUP_FIRST_ELEMENT (stmt_info) = stmt;
- GROUP_SIZE (stmt_info) = groupsize;
- GROUP_NUM_STMTS (stmt_info) = 1;
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
+ GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
GROUP_GAP (stmt_info) = groupsize - 1;
if (dump_enabled_p ())
{
@@ -2690,10 +2684,10 @@ vect_analyze_group_access_1 (struct data_reference *dr)
return true;
}
- if (GROUP_FIRST_ELEMENT (stmt_info) == stmt)
+ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
{
/* First stmt in the interleaving chain. Check the chain. */
- gimple *next = GROUP_NEXT_ELEMENT (stmt_info);
+ gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree prev_init = DR_INIT (data_ref);
@@ -2750,8 +2744,13 @@ vect_analyze_group_access_1 (struct data_reference *dr)
{
/* FORNOW: SLP of accesses with gaps is not supported. */
slp_impossible = true;
- /* By construction, there can be no gap here for stores. */
- gcc_assert (DR_IS_READ (data_ref));
+ if (DR_IS_WRITE (data_ref))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "interleaved store with gaps\n");
+ return false;
+ }
gaps += diff - 1;
}
@@ -2783,16 +2782,22 @@ vect_analyze_group_access_1 (struct data_reference *dr)
/* Check that the size of the interleaving is equal to count for stores,
i.e., that there are no gaps. */
- if (groupsize != count && !DR_IS_READ (dr))
- slp_impossible = true;
+ if (groupsize != count
+ && !DR_IS_READ (dr))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "interleaved store with gaps\n");
+ return false;
+ }
/* If there is a gap after the last load in the group it is the
difference between the groupsize and the last accessed
element.
When there is no gap, this difference should be 0. */
- GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
- GROUP_NUM_STMTS (stmt_info) = count;
- GROUP_SIZE (stmt_info) = groupsize;
+ GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
+
+ GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
@@ -2804,10 +2809,10 @@ vect_analyze_group_access_1 (struct data_reference *dr)
dump_printf (MSG_NOTE, "of size %u starting with ",
(unsigned)groupsize);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- if (GROUP_GAP (stmt_info) != 0)
+ if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
dump_printf_loc (MSG_NOTE, vect_location,
"There is a gap of %u elements after the group\n",
- GROUP_GAP (stmt_info));
+ GROUP_GAP (vinfo_for_stmt (stmt)));
}
/* SLP: create an SLP data structure for every interleaving group of
@@ -2864,6 +2869,9 @@ vect_analyze_data_ref_access (struct data_reference *dr)
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
+ if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ return true;
+
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -3264,8 +3272,11 @@ vect_vfa_access_size (data_reference *dr)
if (STMT_VINFO_VEC_STMT (stmt_vinfo)
&& (vect_supportable_dr_alignment (dr, false)
== dr_explicit_realign_optimized))
- /* We might access a full vector's worth. */
- access_size += tree_to_uhwi (STMT_VINFO_VECTYPE (stmt_vinfo)) - ref_size;
+ {
+ /* We might access a full vector's worth. */
+ tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
+ access_size += tree_to_uhwi (TYPE_SIZE_UNIT (vectype)) - ref_size;
+ }
return access_size;
}
@@ -3315,8 +3326,8 @@ vect_compile_time_alias (struct data_reference *a, struct data_reference *b,
else
const_length_b = tree_to_poly_uint64 (segment_length_b);
- segment_length_a += access_size_a;
- segment_length_b += access_size_b;
+ const_length_a += access_size_a;
+ const_length_b += access_size_b;
if (ranges_must_overlap_p (offset_a, const_length_a,
offset_b, const_length_b))
@@ -3382,8 +3393,8 @@ dump_lower_bound (int dump_kind, const vec_lower_bound &lower_bound)
by EXPR, UNSIGNED_P and MIN_VALUE. */
static void
-check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
- poly_uint64 min_value)
+vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
+ poly_uint64 min_value)
{
vec<vec_lower_bound> lower_bounds = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo);
for (unsigned int i = 0; i < lower_bounds.length (); ++i)
@@ -3463,7 +3474,8 @@ vectorizable_with_step_bound_p (data_reference *dr_a, data_reference *dr_b,
&& !vect_preserves_scalar_order_p (DR_STMT (dr_a), DR_STMT (dr_b)))
return false;
- /* There is no alias if abs (DR_STEP) >= GAP. */
+ /* There is no alias if abs (DR_STEP) is greater than or equal to
+ the bytes spanned by the combination of the two accesses. */
*lower_bound_out = init_b + vect_get_scalar_dr_size (dr_b) - init_a;
return true;
}
@@ -3507,7 +3519,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
/* Convert the checks for nonzero steps into bound tests. */
tree value;
FOR_EACH_VEC_ELT (LOOP_VINFO_CHECK_NONZERO (loop_vinfo), i, value)
- check_lower_bound (loop_vinfo, value, true, 1);
+ vect_check_lower_bound (loop_vinfo, value, true, 1);
}
if (may_alias_ddrs.is_empty ())
@@ -3609,8 +3621,8 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
dump_dec (MSG_NOTE, lower_bound);
dump_printf (MSG_NOTE, ")\n");
}
- check_lower_bound (loop_vinfo, DR_STEP (dr_a), unsigned_p,
- lower_bound);
+ vect_check_lower_bound (loop_vinfo, DR_STEP (dr_a), unsigned_p,
+ lower_bound);
continue;
}
@@ -3733,14 +3745,80 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
return true;
}
-/* Return true if a non-affine read or write in STMT is suitable for a
- gather load or scatter store. Describe the operation in *INFO if so.
+/* Check whether we can use an internal function for a gather load
+ or scatter store. READ_P is true for loads and false for stores.
+ MASKED_P is true if the load or store is conditional. MEMORY_TYPE is
+ the type of the memory elements being loaded or stored. OFFSET_BITS
+ is the number of bits in each scalar offset and OFFSET_SIGN is the
+ sign of the offset. SCALE is the amount by which the offset should
+ be multiplied *after* it has been converted to address width.
- MASKED_P says whether the load or store must be masked. */
+ Return true if the function is supported, storing the function
+ id in *IFN_OUT and the type of a vector element in *ELEMENT_TYPE_OUT. */
+
+bool
+vect_gather_scatter_fn_p (bool read_p, bool masked_p, tree vectype,
+ tree memory_type, unsigned int offset_bits,
+ signop offset_sign, int scale,
+ internal_fn *ifn_out, tree *element_type_out)
+{
+ unsigned int memory_bits = tree_to_uhwi (TYPE_SIZE (memory_type));
+ unsigned int element_bits = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype)));
+ if (offset_bits > element_bits)
+ /* Internal functions require the offset to be the same width as
+ the vector elements. We can extend narrower offsets, but it isn't
+ safe to truncate wider offsets. */
+ return false;
+
+ if (element_bits != memory_bits)
+ /* For now the vector elements must be the same width as the
+ memory elements. */
+ return false;
+
+ /* Work out which function we need. */
+ internal_fn ifn;
+ if (read_p)
+ ifn = masked_p ? IFN_MASK_GATHER_LOAD : IFN_GATHER_LOAD;
+ else
+ ifn = masked_p ? IFN_MASK_SCATTER_STORE : IFN_SCATTER_STORE;
+
+ /* Test whether the target supports this combination. */
+ if (!internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type,
+ offset_sign, scale))
+ return false;
+
+ *ifn_out = ifn;
+ *element_type_out = TREE_TYPE (vectype);
+ return true;
+}
+
+/* CALL is a call to an internal gather load or scatter store function.
+ Describe the operation in INFO. */
+
+static void
+vect_describe_gather_scatter_call (gcall *call, gather_scatter_info *info)
+{
+ stmt_vec_info stmt_info = vinfo_for_stmt (call);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+
+ info->ifn = gimple_call_internal_fn (call);
+ info->decl = NULL_TREE;
+ info->base = gimple_call_arg (call, 0);
+ info->offset = gimple_call_arg (call, 1);
+ info->offset_dt = vect_unknown_def_type;
+ info->offset_vectype = NULL_TREE;
+ info->scale = TREE_INT_CST_LOW (gimple_call_arg (call, 2));
+ info->element_type = TREE_TYPE (vectype);
+ info->memory_type = TREE_TYPE (DR_REF (dr));
+}
+
+/* Return true if a non-affine read or write in STMT is suitable for a
+ gather load or scatter store. Describe the operation in *INFO if so. */
bool
vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
- gather_scatter_info *info, bool masked_p)
+ gather_scatter_info *info)
{
HOST_WIDE_INT scale = 1;
poly_int64 pbitpos, pbitsize;
@@ -3748,18 +3826,39 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree offtype = NULL_TREE;
- tree decl = NULL_TREE;
- tree base, off;
+ tree decl = NULL_TREE, base, off;
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree memory_type = TREE_TYPE (DR_REF (dr));
machine_mode pmode;
int punsignedp, reversep, pvolatilep = 0;
+ internal_fn ifn;
+ tree element_type;
+ bool masked_p = false;
+
+ /* See whether this is already a call to a gather/scatter internal function.
+ If not, see whether it's a masked load or store. */
+ gcall *call = dyn_cast <gcall *> (stmt);
+ if (call && gimple_call_internal_p (call))
+ {
+ ifn = gimple_call_internal_fn (stmt);
+ if (internal_gather_scatter_fn_p (ifn))
+ {
+ vect_describe_gather_scatter_call (call, info);
+ return true;
+ }
+ masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
+ }
+
+ /* True if we should aim to use internal functions rather than
+ built-in functions. */
+ bool use_ifn_p = (DR_IS_READ (dr)
+ ? supports_vec_gather_load_p ()
+ : supports_vec_scatter_store_p ());
base = DR_REF (dr);
/* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
see if we can use the def stmt of the address. */
- if (is_gimple_call (stmt)
- && gimple_call_internal_p (stmt)
- && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
- || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ if (masked_p
&& TREE_CODE (base) == MEM_REF
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
&& integer_zerop (TREE_OPERAND (base, 1))
@@ -3890,7 +3989,17 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
case MULT_EXPR:
if (scale == 1 && tree_fits_shwi_p (op1))
{
- scale = tree_to_shwi (op1);
+ int new_scale = tree_to_shwi (op1);
+ /* Only treat this as a scaling operation if the target
+ supports it. */
+ if (use_ifn_p
+ && !vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p,
+ vectype, memory_type, 1,
+ TYPE_SIGN (TREE_TYPE (op0)),
+ new_scale, &ifn,
+ &element_type))
+ break;
+ scale = new_scale;
off = op0;
continue;
}
@@ -3908,6 +4017,15 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
off = op0;
continue;
}
+
+ /* The internal functions need the offset to be the same width
+ as the elements of VECTYPE. Don't include operations that
+ cast the offset from that width to a different width. */
+ if (use_ifn_p
+ && (int_size_in_bytes (TREE_TYPE (vectype))
+ == int_size_in_bytes (TREE_TYPE (off))))
+ break;
+
if (TYPE_PRECISION (TREE_TYPE (op0))
< TYPE_PRECISION (TREE_TYPE (off)))
{
@@ -3932,53 +4050,43 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
if (offtype == NULL_TREE)
offtype = TREE_TYPE (off);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- if (DR_IS_READ (dr)
- ? targetm.vectorize.builtin_gather
- : targetm.vectorize.builtin_scatter)
+ if (use_ifn_p)
{
- if (DR_IS_READ (dr))
- decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
- else
- decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
-
- if (!decl)
+ if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
+ memory_type, TYPE_PRECISION (offtype),
+ TYPE_SIGN (offtype), scale, &ifn,
+ &element_type))
return false;
}
else
{
- machine_mode vecmode = TYPE_MODE (vectype);
- bool off_unsigned = TYPE_UNSIGNED (offtype);
-
- /* The offset will eventually need to be converted into the same mode as
- the vector mode before it can be used in the gather/scatter. */
-
- scalar_int_mode offmode;
- if (!int_mode_for_mode (GET_MODE_INNER (vecmode)).exists (&offmode))
- return false;
+ if (DR_IS_READ (dr))
+ {
+ if (targetm.vectorize.builtin_gather)
+ decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
+ }
+ else
+ {
+ if (targetm.vectorize.builtin_scatter)
+ decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
+ }
- unsigned int offmode_bitsize = GET_MODE_BITSIZE (offmode);
- if (!targetm.gather_scatter_supports_scale_p
- (DR_IS_READ (dr), offmode_bitsize, scale))
+ if (!decl)
return false;
- offtype = (off_unsigned
- ? make_unsigned_type (offmode_bitsize)
- : make_signed_type (offmode_bitsize));
-
- if (get_gather_scatter_internal_fn (DR_IS_READ (dr), vectype,
- offtype, masked_p) == IFN_LAST)
- return false;
+ ifn = IFN_LAST;
+ element_type = TREE_TYPE (vectype);
}
+ info->ifn = ifn;
info->decl = decl;
info->base = base;
- info->u.offset = off;
- info->offset_type = TREE_TYPE (off);
- info->widened_offset_type = offtype;
+ info->offset = off;
info->offset_dt = vect_unknown_def_type;
info->offset_vectype = NULL_TREE;
info->scale = scale;
+ info->element_type = element_type;
+ info->memory_type = memory_type;
return true;
}
@@ -4060,7 +4168,7 @@ again:
= DR_IS_READ (dr)
&& !TREE_THIS_VOLATILE (DR_REF (dr))
&& (targetm.vectorize.builtin_gather != NULL
- || supports_vec_gather_load_p ());
+ || supports_vec_gather_load_p ());
bool maybe_scatter
= DR_IS_WRITE (dr)
&& !TREE_THIS_VOLATILE (DR_REF (dr))
@@ -4415,8 +4523,8 @@ again:
{
gather_scatter_info gs_info;
if (!vect_check_gather_scatter (stmt, as_a <loop_vec_info> (vinfo),
- &gs_info, false)
- || !get_vectype_for_scalar_type (TREE_TYPE (gs_info.u.offset)))
+ &gs_info)
+ || !get_vectype_for_scalar_type (TREE_TYPE (gs_info.offset)))
{
STMT_VINFO_DATA_REF (stmt_info) = NULL;
free_data_ref (dr);
@@ -4839,27 +4947,6 @@ vect_create_addr_base_for_vector_ref (gimple *stmt,
return entry->final_addr;
}
-/* Return a tree that represents STEP multiplied by the vectorization
- factor. */
-
-static tree
-vect_mult_by_vf (loop_vec_info loop_vinfo, tree step)
-{
- hash_map<tree, tree> *map = &LOOP_VINFO_VF_MULT_MAP (loop_vinfo);
- bool existed;
- tree &entry = map->get_or_insert (step, &existed);
- if (!existed)
- {
- gimple_seq seq = NULL;
- tree vf = LOOP_VINFO_CAP (loop_vinfo).niters;
- vf = gimple_convert (&seq, TREE_TYPE (step), vf);
- entry = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step), vf, step);
- edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
- gsi_insert_seq_on_edge_immediate (pe, seq);
- }
- return entry;
-}
-
/* Function vect_create_data_ref_ptr.
Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
@@ -4869,40 +4956,26 @@ vect_mult_by_vf (loop_vec_info loop_vinfo, tree step)
the callers to this function to create a memory reference expression for
vector load/store access.
- Some loops operate on more than one consecutive instance of AGGR_TYPE.
- There are two approaches to updating ap in this case. The usual one
- is to advance ap by only one AGGR_TYPE at first and leave callers
- to use bump_vector_ptr both to access other AGGR_TYPEs and to adjust
- the increment of ap. See the comment above bump_vector_ptr for details.
- The advantage of this approach is that only one pointer is live at once.
- ivopts can later introduce multiple pointers if that's more efficient,
- but we don't have enough information to make that decision during
- vectorization.
-
- However, when using a runtime-capped VF, the increment of ap
- depends on the runtime VF and we can't easily defer parts of it to
- bump_vector_ptr. In this case we emit the full increment now.
- The GROUP_SIZE parameter exists for this case.
-
Input:
1. STMT: a stmt that references memory. Expected to be of the form
GIMPLE_ASSIGN <name, data-ref> or
GIMPLE_ASSIGN <data-ref, name>.
2. AGGR_TYPE: the type of the reference, which should be either a vector
or an array.
- 3. GROUP_SIZE: how many consecutive vectors the loop accesses.
- The loop should advance ap by GROUP_SIZE * VF elements of
- type TREE_TYPE (AGGR_TYPE).
- 4. AT_LOOP: the loop where the vector memref is to be created.
- 5. OFFSET (optional): an offset to be added to the initial address accessed
+ 3. AT_LOOP: the loop where the vector memref is to be created.
+ 4. OFFSET (optional): an offset to be added to the initial address accessed
by the data-ref in STMT.
- 6. BSI: location where the new stmts are to be placed if there is no loop
- 7. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
+ 5. BSI: location where the new stmts are to be placed if there is no loop
+ 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
pointing to the initial address.
- 8. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
+ 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
to the initial address accessed by the data-ref in STMT. This is
similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
in bytes.
+ 8. IV_STEP (optional, defaults to NULL): the amount that should be added
+ to the IV during each iteration of the loop. NULL says to move
+ by one copy of AGGR_TYPE up or down, depending on the step of the
+ data reference.
Output:
1. Declare a new ptr to vector_type, and have it point to the base of the
@@ -4932,11 +5005,11 @@ vect_mult_by_vf (loop_vec_info loop_vinfo, tree step)
4. Return the pointer. */
tree
-vect_create_data_ref_ptr (gimple *stmt, tree aggr_type,
- unsigned int group_size, struct loop *at_loop,
+vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
tree offset, tree *initial_address,
gimple_stmt_iterator *gsi, gimple **ptr_incr,
- bool only_init, bool *inv_p, tree byte_offset)
+ bool only_init, bool *inv_p, tree byte_offset,
+ tree iv_step)
{
const char *base_name;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
@@ -4960,9 +5033,9 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type,
tree step;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
+ gcc_assert (iv_step != NULL_TREE
+ || TREE_CODE (aggr_type) == ARRAY_TYPE
|| TREE_CODE (aggr_type) == VECTOR_TYPE);
- gcc_assert (only_init || !loop_vinfo || group_size != 0);
if (loop_vinfo)
{
@@ -5102,42 +5175,29 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type,
aptr = aggr_ptr_init;
else
{
- standard_iv_increment_position (loop, &incr_gsi, &insert_after);
-
- tree iv_step;
- if (LOOP_VINFO_FIRSTFAULTING_EXECUTION (loop_vinfo))
+ if (iv_step == NULL_TREE)
{
- /* The step is the first faulting iter multiplied by the type
- size. */
- gimple_seq seq = NULL;
- iv_step = gimple_build (&seq, MULT_EXPR, sizetype,
- LOOP_VINFO_NONFAULTING (loop_vinfo).niters,
- TYPE_SIZE_UNIT (TREE_TYPE (aggr_type)));
-
- gsi_insert_seq_before (&incr_gsi, seq, GSI_SAME_STMT);
- }
- else if (*inv_p)
- iv_step = size_zero_node;
- else
- {
- if (use_capped_vf (loop_vinfo))
- {
- gcc_assert (group_size != 0);
- tree elt_type = TREE_TYPE (DR_REF (dr));
- iv_step = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (elt_type),
- size_int (group_size));
- iv_step = vect_mult_by_vf (loop_vinfo, iv_step);
- }
- else
- /* The step of the aggregate pointer is the type size. */
- iv_step = TYPE_SIZE_UNIT (aggr_type);
- if (tree_int_cst_sgn (step) == -1)
- iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step),
- iv_step);
+ /* The caller must provide an IV_STEP for capped VF and
+ first-faulting loads. */
+ gcc_assert (!use_capped_vf (loop_vinfo)
+ && !LOOP_VINFO_FIRSTFAULTING_EXECUTION (loop_vinfo));
+
+ /* The step of the aggregate pointer is the type size. */
+ iv_step = TYPE_SIZE_UNIT (aggr_type);
+ /* One exception to the above is when the scalar step of the load in
+ LOOP is zero. In this case the step here is also zero. */
+ if (*inv_p)
+ iv_step = size_zero_node;
+ else if (tree_int_cst_sgn (step) == -1)
+ iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
}
- create_iv (aggr_ptr_init, iv_step, aggr_ptr, loop, &incr_gsi,
- insert_after, &indx_before_incr, &indx_after_incr);
+ standard_iv_increment_position (loop, &incr_gsi, &insert_after);
+
+ create_iv (aggr_ptr_init,
+ fold_convert (aggr_ptr_type, iv_step),
+ aggr_ptr, loop, &incr_gsi, insert_after,
+ &indx_before_incr, &indx_after_incr);
incr = gsi_stmt (incr_gsi);
set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
@@ -5263,7 +5323,7 @@ bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
if (use == dataref_ptr)
SET_USE (use_p, new_dataref_ptr);
else
- gcc_assert (tree_int_cst_compare (use, update) == 0);
+ gcc_assert (operand_equal_p (use, update, 0));
}
return new_dataref_ptr;
@@ -5290,15 +5350,18 @@ vect_create_destination_var (tree scalar_dest, tree vectype)
: vect_scalar_var;
type = vectype ? vectype : TREE_TYPE (scalar_dest);
- gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
-
- name = get_name (scalar_dest);
- if (name)
- new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
+ if (TREE_CODE (scalar_dest) == SSA_NAME)
+ {
+ name = get_name (scalar_dest);
+ if (name)
+ new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
+ else
+ new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
+ vec_dest = vect_get_new_vect_var (type, kind, new_name);
+ free (new_name);
+ }
else
- new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
- vec_dest = vect_get_new_vect_var (type, kind, new_name);
- free (new_name);
+ vec_dest = vect_get_new_vect_var (type, kind, NULL);
return vec_dest;
}
@@ -5327,9 +5390,9 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
/* Powers of 2 use a tree of interleaving operations. See whether the
target supports them directly. */
if (count != 3
- && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI, vectype,
- OPTIMIZE_FOR_SPEED)
&& direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_LO, vectype,
+ OPTIMIZE_FOR_SPEED)
+ && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI, vectype,
OPTIMIZE_FOR_SPEED))
return true;
@@ -5415,7 +5478,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
/* Return TRUE if vec_{mask_}store_lanes is available for COUNT vectors of
- type VECTYPE. MASKED_P says whether masked form is needed. */
+ type VECTYPE. MASKED_P says whether the masked form is needed. */
bool
vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
@@ -5578,15 +5641,15 @@ vect_permute_store_chain (vec<tree> dr_chain,
/* If length is not equal to 3 then only power of 2 is supported. */
gcc_assert (pow2p_hwi (length));
- if (direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI, vectype,
+ if (direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_LO, vectype,
OPTIMIZE_FOR_SPEED)
- && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_LO, vectype,
+ && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI, vectype,
OPTIMIZE_FOR_SPEED))
{
/* We could support the case where only one of the optabs is
implemented, but that seems unlikely. */
- perm_mask_high = NULL_TREE;
perm_mask_low = NULL_TREE;
+ perm_mask_high = NULL_TREE;
}
else
{
@@ -5599,11 +5662,11 @@ vect_permute_store_chain (vec<tree> dr_chain,
sel[i * 2] = i;
sel[i * 2 + 1] = i + nelt;
}
- perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
+ perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < nelt; i++)
sel[i] += nelt / 2;
- perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
+ perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
}
for (i = 0, n = log_length; i < n; i++)
@@ -5616,36 +5679,36 @@ vect_permute_store_chain (vec<tree> dr_chain,
/* Create interleaving stmt:
high = VEC_PERM_EXPR <vect1, vect2,
{0, nelt, 1, nelt + 1, ...}> */
- high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
- if (perm_mask_high)
- perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
- vect2, perm_mask_high);
+ low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
+ if (perm_mask_low)
+ perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
+ vect2, perm_mask_low);
else
{
perm_stmt = gimple_build_call_internal
- (IFN_VEC_INTERLEAVE_HI, 2, vect1, vect2);
- gimple_set_lhs (perm_stmt, high);
+ (IFN_VEC_INTERLEAVE_LO, 2, vect1, vect2);
+ gimple_set_lhs (perm_stmt, low);
}
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- (*result_chain)[2 * j] = high;
+ (*result_chain)[2 * j] = low;
/* Create interleaving stmt:
- low = VEC_PERM_EXPR <vect1, vect2,
+ high = VEC_PERM_EXPR <vect1, vect2,
{nelt / 2, nelt * 3 / 2,
nelt / 2 + 1, nelt * 3 / 2 + 1,
...}> */
- low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
- if (perm_mask_low)
- perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
- vect2, perm_mask_low);
+ high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
+ if (perm_mask_high)
+ perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
+ vect2, perm_mask_high);
else
{
perm_stmt = gimple_build_call_internal
- (IFN_VEC_INTERLEAVE_LO, 2, vect1, vect2);
- gimple_set_lhs (perm_stmt, low);
+ (IFN_VEC_INTERLEAVE_HI, 2, vect1, vect2);
+ gimple_set_lhs (perm_stmt, high);
}
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- (*result_chain)[2 * j + 1] = low;
+ (*result_chain)[2 * j + 1] = high;
}
memcpy (dr_chain.address (), result_chain->address (),
length * sizeof (tree));
@@ -5820,8 +5883,7 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (!compute_in_loop);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- ptr = vect_create_data_ref_ptr (stmt, vectype, 0,
- loop_for_initial_load,
+ ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
NULL_TREE, &init_addr, NULL, &inc,
true, &inv_p);
if (TREE_CODE (ptr) == SSA_NAME)
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index 8811679e2bf..df7dd3b7a1a 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -347,9 +347,9 @@ vect_maybe_permute_loop_masks (gimple_seq *seq, rgroup_masks *dest_rgm,
return true;
}
if (dest_masktype == src_masktype
- && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI, src_masktype,
- OPTIMIZE_FOR_SPEED)
&& direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_LO, src_masktype,
+ OPTIMIZE_FOR_SPEED)
+ && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI, src_masktype,
OPTIMIZE_FOR_SPEED))
{
/* The destination requires twice as many mask bits as the source, so
@@ -358,8 +358,8 @@ vect_maybe_permute_loop_masks (gimple_seq *seq, rgroup_masks *dest_rgm,
{
tree src = src_rgm->masks[i / 2];
tree dest = dest_rgm->masks[i];
- internal_fn ifn = (i & 1 ? IFN_VEC_INTERLEAVE_LO
- : IFN_VEC_INTERLEAVE_HI);
+ internal_fn ifn = (i & 1 ? IFN_VEC_INTERLEAVE_HI
+ : IFN_VEC_INTERLEAVE_LO);
gcall *stmt = gimple_build_call_internal (ifn, 2, src, src);
gimple_call_set_lhs (stmt, dest);
gimple_seq_add_stmt (seq, stmt);
@@ -627,11 +627,10 @@ vect_set_nonspeculative_masks (loop_vec_info loop_vinfo,
of the vectorized loop handles CAPPED_VF iterations of the scalar loop,
where CAPPED_VF is bounded by the compile-time vectorization factor.
- If NSCALARITERS_SKIP is nonnull, the first iteration of the
- vectorized loop starts with NSCALARITERS_SKIP dummy iterations of the
- scalar loop before the real work starts. The mask elements for these
- dummy iterations must be 0, to ensure that the extra iterations do not
- have an effect.
+ If NITERS_SKIP is nonnull, the first iteration of the vectorized loop
+ starts with NITERS_SKIP dummy iterations of the scalar loop before
+ the real work starts. The mask elements for these dummy iterations
+ must be 0, to ensure that the extra iterations do not have an effect.
It is known that:
@@ -658,10 +657,6 @@ vect_set_loop_masks_directly (struct loop *loop, loop_vec_info loop_vinfo,
tree niters, tree niters_skip,
bool might_wrap_p)
{
- tree index_before_incr, index_after_incr;
- gimple_stmt_iterator incr_gsi;
- bool insert_after;
-
tree compare_type = LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo);
tree mask_type = rgm->mask_type;
unsigned int nscalars_per_iter = rgm->max_nscalars_per_iter;
@@ -690,6 +685,9 @@ vect_set_loop_masks_directly (struct loop *loop, loop_vec_info loop_vinfo,
/* Create an induction variable that counts the number of scalars
processed. */
+ tree index_before_incr, index_after_incr;
+ gimple_stmt_iterator incr_gsi;
+ bool insert_after;
tree zero_index = build_int_cst (compare_type, 0);
standard_iv_increment_position (loop, &incr_gsi, &insert_after);
create_iv (zero_index, nscalars_step, NULL_TREE, loop, &incr_gsi,
@@ -875,7 +873,9 @@ vect_set_loop_masks_directly (struct loop *loop, loop_vec_info loop_vinfo,
/* Make LOOP iterate NITERS times using masking and WHILE_ULT calls.
LOOP_VINFO describes the vectorization of LOOP. NITERS is the
- number of scalar iterations that should be handled by the vector loop.
+ number of iterations of the original scalar loop that should be
+ handled by the vector loop. NITERS_MAYBE_ZERO and FINAL_IV are
+ as for vect_set_loop_condition.
Insert the branch-back condition before LOOP_COND_GSI and return the
final gcond. */
@@ -894,11 +894,11 @@ vect_set_loop_condition_masked (struct loop *loop, loop_vec_info loop_vinfo,
unsigned HOST_WIDE_INT max_vf = vect_max_vf (loop_vinfo);
tree orig_niters = niters;
- /* Type of the initial value of niters. */
+ /* Type of the initial value of NITERS. */
tree ni_actual_type = TREE_TYPE (niters);
unsigned int ni_actual_precision = TYPE_PRECISION (ni_actual_type);
- /* Convert niters to the same size as the compare. */
+ /* Convert NITERS to the same size as the compare. */
if (compare_precision > ni_actual_precision
&& niters_maybe_zero)
{
@@ -1011,8 +1011,8 @@ vect_set_loop_condition_masked (struct loop *loop, loop_vec_info loop_vinfo,
Subtract one from this to get the latch count. */
tree step = build_int_cst (compare_type,
LOOP_VINFO_VECT_FACTOR (loop_vinfo));
- tree niters_minus_one = fold_build2 (MINUS_EXPR, compare_type, niters,
- build_one_cst (compare_type));
+ tree niters_minus_one = fold_build2 (PLUS_EXPR, compare_type, niters,
+ build_minus_one_cst (compare_type));
loop->nb_iterations = fold_build2 (TRUNC_DIV_EXPR, compare_type,
niters_minus_one, step);
@@ -1159,14 +1159,22 @@ vect_set_loop_condition_unmasked (struct loop *loop, tree niters,
return cond_stmt;
}
-/* Make LOOP iterate N == (NITERS - STEP) / STEP + 1 times,
- where NITERS is known to be outside the range [1, STEP - 1].
- This is equivalent to making the loop execute NITERS / STEP
- times when NITERS is nonzero and (1 << M) / STEP times otherwise,
- where M is the precision of NITERS.
+/* If we're using fully-masked loops, make LOOP iterate:
- NITERS_MAYBE_ZERO is true if NITERS can be zero, false it is known
- to be >= STEP. In the latter case N is always NITERS / STEP.
+ N == (NITERS - 1) / STEP + 1
+
+ times. When NITERS is zero, this is equivalent to making the loop
+ execute (1 << M) / STEP times, where M is the precision of NITERS.
+ NITERS_MAYBE_ZERO is true if this last case might occur.
+
+ If we're not using fully-masked loops, make LOOP iterate:
+
+ N == (NITERS - STEP) / STEP + 1
+
+ times, where NITERS is known to be outside the range [1, STEP - 1].
+ This is equivalent to making the loop execute NITERS / STEP times
+ when NITERS is nonzero and (1 << M) / STEP times otherwise.
+ NITERS_MAYBE_ZERO again indicates whether this last case might occur.
If FINAL_IV is nonnull, it is an SSA name that should be set to
N * STEP on exit from the loop.
@@ -1827,7 +1835,6 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
}
}
-
/* Return a gimple value containing the misalignment (measured in vector
elements) for the loop described by LOOP_VINFO, i.e. how many elements
it is away from a perfectly aligned address. Add any new statements
@@ -1841,7 +1848,6 @@ get_misalign_in_elems (gimple **seq, loop_vec_info loop_vinfo)
stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- /* For speculative loops we need to align to the vector size. */
unsigned int target_align = DR_TARGET_ALIGNMENT (dr);
gcc_assert (target_align != 0);
@@ -1869,7 +1875,6 @@ get_misalign_in_elems (gimple **seq, loop_vec_info loop_vinfo)
return misalign_in_elems;
}
-
/* Function vect_gen_prolog_loop_niters
Generate the number of iterations which should be peeled as prolog for the
@@ -2038,10 +2043,7 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters,
vect_update_init_of_dr (dr, niters, code);
}
-
-/* Function vect_prepare_for_masked_peels
-
- For the information recorded in LOOP_VINFO prepare the loop for peeling
+/* For the information recorded in LOOP_VINFO prepare the loop for peeling
by masking. This involves calculating the number of iterations to
be peeled and then aligning all memory references appropriately. */
@@ -2054,7 +2056,7 @@ vect_prepare_for_masked_peels (loop_vec_info loop_vinfo)
gcc_assert (vect_use_loop_mask_for_alignment_p (loop_vinfo));
/* From the information recorded in LOOP_VINFO get the number of iterations
- that need peeling from the loop via masking. */
+ that need to be skipped via masking. */
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
{
poly_int64 misalign = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
@@ -2201,7 +2203,6 @@ vect_gen_vector_loop_niters (loop_vec_info loop_vinfo, tree niters,
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
tree log_vf = NULL_TREE;
- bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
/* If epilogue loop is required because of data accesses with gaps, we
subtract one iteration from the total number of iterations here for
@@ -2232,19 +2233,17 @@ vect_gen_vector_loop_niters (loop_vec_info loop_vinfo, tree niters,
(niters - vf) >> log2(vf) + 1 by using the fact that we know ratio
will be at least one. */
log_vf = build_int_cst (type, exact_log2 (const_vf));
- if (niters_no_overflow && !final_iter_may_be_partial)
+ if (niters_no_overflow)
niters_vector = fold_build2 (RSHIFT_EXPR, type, ni_minus_gap, log_vf);
else
- {
- tree sub = build_int_cst (type, final_iter_may_be_partial ? 1 : vf);
- niters_vector
- = fold_build2 (PLUS_EXPR, type,
- fold_build2 (RSHIFT_EXPR, type,
- fold_build2 (MINUS_EXPR, type,
- ni_minus_gap, sub),
- log_vf),
- build_int_cst (type, 1));
- }
+ niters_vector
+ = fold_build2 (PLUS_EXPR, type,
+ fold_build2 (RSHIFT_EXPR, type,
+ fold_build2 (MINUS_EXPR, type,
+ ni_minus_gap,
+ build_int_cst (type, vf)),
+ log_vf),
+ build_int_cst (type, 1));
step_vector = build_one_cst (type);
}
else
@@ -2698,7 +2697,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
bool check_profitability, bool niters_no_overflow)
{
edge e, guard_e;
- tree type, guard_cond;
+ tree guard_cond;
basic_block guard_bb, guard_to;
profile_probability prob_prolog, prob_vector, prob_epilog;
int estimated_vf;
@@ -2720,7 +2719,6 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
if (!prolog_peeling && !epilog_peeling)
return NULL;
- type = TREE_TYPE (niters);
prob_vector = profile_probability::guessed_always ().apply_scale (9, 10);
estimated_vf = vect_vf_for_cost (loop_vinfo);
if (estimated_vf == 2)
@@ -2747,6 +2745,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
/* Generate the number of iterations for the prolog loop. We do this here
so that we can also get the upper bound on the number of iterations. */
+ tree type = TREE_TYPE (niters);
tree niters_prolog;
int bound_prolog = 0;
if (prolog_peeling)
@@ -2814,9 +2813,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
first_loop = prolog;
reset_original_copy_tables ();
- /* Generate and update the number of iterations for prolog loop. */
- niters_prolog = vect_gen_prolog_loop_niters (loop_vinfo, anchor,
- &bound_prolog);
+ /* Update the number of iterations for prolog loop. */
tree step_prolog = build_one_cst (TREE_TYPE (niters_prolog));
vect_set_loop_condition (prolog, NULL, niters_prolog,
step_prolog, NULL_TREE, false);
@@ -3172,7 +3169,7 @@ vect_create_cond_for_unequal_addrs (loop_vec_info loop_vinfo, tree *cond_expr)
chain_cond_expr (cond_expr, part_cond_expr);
}
}
-
+
/* Create an expression that is true when all lower-bound conditions for
the vectorized loop are met. Chain this condition with *COND_EXPR. */
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 91a3610a1a0..d784754c6de 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -52,9 +52,6 @@ along with GCC; see the file COPYING3. If not see
#include "tree-if-conv.h"
#include "internal-fn.h"
-/* For lang_hooks.types.type_for_mode. */
-#include "langhooks.h"
-
/* Loop Vectorization Pass.
This pass tries to vectorize loops.
@@ -989,8 +986,6 @@ vect_fixup_reduc_chain (gimple *stmt)
gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
- GROUP_NUM_STMTS (vinfo_for_stmt (firstp))
- = GROUP_NUM_STMTS (vinfo_for_stmt (stmt));
GROUP_FIRST_UID (vinfo_for_stmt (firstp))
= GROUP_FIRST_UID (vinfo_for_stmt (stmt));
GROUP_LAST_UID (vinfo_for_stmt (firstp))
@@ -1172,7 +1167,6 @@ _loop_vec_info::_loop_vec_info (struct loop *loop_in)
scalar_loop (NULL),
orig_loop_info (NULL),
vect_addr_base_htab (31),
- gather_scatter_htab (31),
exit_test_mask (NULL_TREE),
exit_mask (NULL_TREE),
nonspeculative_seq (NULL)
@@ -1293,7 +1287,7 @@ _loop_vec_info::~_loop_vec_info ()
}
/* Return true if we can use CMP_TYPE as the comparison type to produce
- all masks required to fully-mask LOOP_VINFO. */
+ all masks required to mask LOOP_VINFO. */
static bool
can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
@@ -1372,10 +1366,11 @@ vect_verify_full_masking (loop_vec_info loop_vinfo)
tree cmp_type = NULL_TREE;
FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
{
- scalar_int_mode cmp_mode = cmp_mode_iter.require ();
- if (GET_MODE_BITSIZE (cmp_mode) >= min_ni_width)
+ unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
+ if (cmp_bits >= min_ni_width
+ && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
{
- tree this_type = lang_hooks.types.type_for_mode (cmp_mode, true);
+ tree this_type = build_nonstandard_integer_type (cmp_bits, true);
if (this_type
&& can_produce_all_loop_masks_p (loop_vinfo, this_type))
{
@@ -1384,7 +1379,7 @@ vect_verify_full_masking (loop_vec_info loop_vinfo)
operands to the WHILE are more likely to be reusable in
address calculations. */
cmp_type = this_type;
- if (GET_MODE_SIZE (cmp_mode) >= GET_MODE_SIZE (Pmode))
+ if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
break;
}
}
@@ -2057,11 +2052,9 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
return true;
}
-/* Function vect_analyze_loop_costing.
-
- Analyze cost of loop. Decide if it is worth while to vectorize.
- Return 1 if definitely yes, 0 if definitely no, or -1 if it's
- worth retrying. */
+/* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
+ is worthwhile to vectorize. Return 1 if definitely yes, 0 if
+ definitely no, or -1 if it's worth retrying. */
static int
vect_analyze_loop_costing (loop_vec_info loop_vinfo)
@@ -2461,10 +2454,10 @@ start_over:
{
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
dump_printf_loc (MSG_NOTE, vect_location,
- "Using a fully-masked loop.\n");
+ "using a fully-masked loop.\n");
else
dump_printf_loc (MSG_NOTE, vect_location,
- "Not using a fully-masked loop.\n");
+ "not using a fully-masked loop.\n");
}
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
@@ -2830,24 +2823,24 @@ vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
}
}
-/* Return true if the target supports strict math reductions for operation
- CODE and type TYPE. If the target supports it, store the reduction operation
- in REDUC_CODE. */
+/* Return true if the target supports in-order reductions for operation
+ CODE and type TYPE. If the target supports it, store the reduction
+ operation in *REDUC_CODE. */
+
static bool
-strict_reduction_code (tree_code code, tree type,
- tree_code *reduc_code)
+fold_left_reduction_code (tree_code code, tree type, tree_code *reduc_code)
{
switch (code)
{
case PLUS_EXPR:
- code = STRICT_REDUC_PLUS_EXPR;
+ code = FOLD_LEFT_PLUS_EXPR;
break;
default:
return false;
}
- if (!strict_reduction_support (code, type))
+ if (!target_supports_op_p (type, code, optab_vector))
return false;
*reduc_code = code;
@@ -2922,7 +2915,7 @@ neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
- struct loop *loop = (gimple_bb (stmt))->loop_father;
+ struct loop *loop = gimple_bb (stmt)->loop_father;
gcc_assert (loop);
switch (code)
@@ -3161,17 +3154,19 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
GROUP_SIZE (vinfo_for_stmt (first)) = size;
- GROUP_NUM_STMTS (vinfo_for_stmt (first)) = size;
GROUP_FIRST_UID (vinfo_for_stmt (first)) = first_uid;
GROUP_LAST_UID (vinfo_for_stmt (first)) = last_uid;
return true;
}
-/* Returns TRUE if we need to perform a strict math reduction for TYPE. */
+/* Returns true if we need an in-order reduction for operation CODE
+ on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
+ overflow must wrap. */
+
static bool
-needs_strict_reduction (tree type, tree_code code,
- bool need_wrapping_integral_overflow)
+needs_fold_left_reduction_p (tree type, tree_code code,
+ bool need_wrapping_integral_overflow)
{
/* CHECKME: check for !flag_finite_math_only too? */
if (SCALAR_FLOAT_TYPE_P (type))
@@ -3184,7 +3179,8 @@ needs_strict_reduction (tree type, tree_code code,
default:
return !flag_associative_math;
}
- else if (INTEGRAL_TYPE_P (type))
+
+ if (INTEGRAL_TYPE_P (type))
{
if (!operation_no_trapping_overflow (type, code))
return true;
@@ -3194,10 +3190,11 @@ needs_strict_reduction (tree type, tree_code code,
return true;
return false;
}
- else if (SAT_FIXED_POINT_TYPE_P (type))
+
+ if (SAT_FIXED_POINT_TYPE_P (type))
return true;
- else
- return false;
+
+ return false;
}
/* Function vect_is_simple_reduction
@@ -3242,9 +3239,6 @@ needs_strict_reduction (tree type, tree_code code,
if (a[i] < val)
ret_val = a[i];
- Record in DOUBLE_REDUC whether this is a double reduction.
- Record in STRICT_REDUC whether the reduction must be performed in order, i.e.
- cannot be reassociated.
*/
static gimple *
@@ -3529,9 +3523,9 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
outer-loop vectorization is safe. */
if (check_reduction
&& *v_reduc_type == TREE_CODE_REDUCTION
- && needs_strict_reduction (type, code,
- need_wrapping_integral_overflow))
- *v_reduc_type = STRICT_FP_REDUCTION;
+ && needs_fold_left_reduction_p (type, code,
+ need_wrapping_integral_overflow))
+ *v_reduc_type = FOLD_LEFT_REDUCTION;
/* Reduction is safe. We're dealing with one of the following:
1) integer arithmetic and no trapv
@@ -4327,7 +4321,7 @@ static void
vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
int ncopies)
{
- int prologue_cost = 0, epilogue_cost = 0;
+ int prologue_cost = 0, epilogue_cost = 0, inside_cost;
enum tree_code code;
optab optab;
tree vectype;
@@ -4346,13 +4340,11 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info));
/* Condition reductions generate two reductions in the loop. */
- if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
+ vect_reduction_type reduction_type
+ = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
+ if (reduction_type == COND_REDUCTION)
ncopies *= 2;
- /* Cost of reduction op inside loop. */
- unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
- stmt_info, 0, vect_body);
-
vectype = STMT_VINFO_VECTYPE (stmt_info);
mode = TYPE_MODE (vectype);
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
@@ -4362,14 +4354,31 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
code = gimple_assign_rhs_code (orig_stmt);
- /* Add in cost for initial definition.
- For cond reduction we have four vectors: initial index, step, initial
- result of the data reduction, initial value of the index reduction. */
- int prologue_stmts = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
- == COND_REDUCTION ? 4 : 1;
- prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts,
- scalar_to_vec, stmt_info, 0,
- vect_prologue);
+ if (reduction_type == EXTRACT_LAST_REDUCTION
+ || reduction_type == FOLD_LEFT_REDUCTION)
+ {
+ /* No extra instructions needed in the prologue. */
+ prologue_cost = 0;
+
+ /* Count NCOPIES FOLD_EXTRACT_LAST operations. */
+ inside_cost = add_stmt_cost (target_cost_data, ncopies, vec_to_scalar,
+ stmt_info, 0, vect_body);
+ }
+ else
+ {
+ /* Add in cost for initial definition.
+ For cond reduction we have four vectors: initial index, step,
+ initial result of the data reduction, initial value of the index
+ reduction. */
+ int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
+ prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts,
+ scalar_to_vec, stmt_info, 0,
+ vect_prologue);
+
+ /* Cost of reduction op inside loop. */
+ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
+ stmt_info, 0, vect_body);
+ }
/* Determine cost of epilogue code.
@@ -4380,10 +4389,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
{
if (reduc_code != ERROR_MARK)
{
- if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == STRICT_FP_REDUCTION)
- inside_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar,
- stmt_info, 0, vect_body);
- else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
+ if (reduction_type == COND_REDUCTION)
{
/* An EQ stmt and an COND_EXPR stmt. */
epilogue_cost += add_stmt_cost (target_cost_data, 2,
@@ -4408,7 +4414,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
vect_epilogue);
}
}
- else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
+ else if (reduction_type == COND_REDUCTION)
{
unsigned estimated_nunits = vect_nunits_for_cost (vectype);
/* Extraction of scalar elements. */
@@ -4422,10 +4428,12 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
scalar_stmt, stmt_info, 0,
vect_epilogue);
}
- else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
- != COND_REDUCTION_CLASTB)
+ else if (reduction_type == EXTRACT_LAST_REDUCTION
+ || reduction_type == FOLD_LEFT_REDUCTION)
+ /* No extra instructions need in the epilogue. */
+ ;
+ else
{
- /* Enforced by vectorizable_reduction. */
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
tree bitsize =
TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
@@ -4591,6 +4599,9 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val,
return vect_create_destination_var (init_val, vectype);
}
+ vect_reduction_type reduction_type
+ = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
+
/* In case of a nested reduction do not use an adjustment def as
that case is not supported by the epilogue generation correctly
if ncopies is not one. */
@@ -4664,8 +4675,8 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val,
if (adjustment_def)
{
*adjustment_def = NULL_TREE;
- if (! REDUCTION_IS_FULL_COND_REDUCTION_P
- (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo)))
+ if (reduction_type != COND_REDUCTION
+ && reduction_type != EXTRACT_LAST_REDUCTION)
{
init_def = vect_get_vec_def_for_operand (init_val, stmt);
break;
@@ -4772,6 +4783,8 @@ get_initial_defs_for_reduction (slp_tree slp_node,
init = gimple_build_vector (&ctor_seq, vector_type, elts);
else if (neutral_op)
{
+ /* Build a vector of the neutral value and shift the
+ other elements into place. */
init = gimple_build_vector_from_val (&ctor_seq, vector_type,
neutral_op);
int k = nunits;
@@ -4789,6 +4802,9 @@ get_initial_defs_for_reduction (slp_tree slp_node,
}
else
{
+ /* First time round, duplicate ELTS to fill the
+ required number of vectors, then cherry pick the
+ appropriate result for each iteration. */
if (vec_oprnds->is_empty ())
duplicate_and_interleave (&ctor_seq, vector_type, elts,
number_of_vectors,
@@ -5403,7 +5419,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
/* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
- with the vector (COND_REDUC_RES) of found indexes, choosing values
+ with the vector (INDUCTION_INDEX) of found indexes, choosing values
from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
otherwise. Only one value should match, resulting in a vector
(VEC_COND) with one data value and the rest zeros.
@@ -5592,6 +5608,10 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
}
else if (direct_slp_reduc)
{
+ /* Here we create one vector for each of the GROUP_SIZE results,
+ with the elements for other SLP statements replaced with the
+ neutral value. We can then do a normal reduction on each vector. */
+
/* Enforced by vectorizable_reduction. */
gcc_assert (new_phis.length () == 1);
gcc_assert (pow2p_hwi (group_size));
@@ -5599,6 +5619,9 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
vec<gimple *> orig_phis = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
gimple_seq seq = NULL;
+
+ /* Build a vector {0, 1, 2, ...}, with the same number of elements
+ and the same element size as VECTYPE. */
tree index = build_index_vector (vectype, 0, 1);
tree index_type = TREE_TYPE (index);
tree index_elt_type = TREE_TYPE (index_type);
@@ -5611,8 +5634,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
build_vector_from_val (index_type, index_mask));
/* Get a neutral vector value. This is simply a splat of the neutral
- scalar value if we have one, otherwise the initial vector is itself
- a neutral value. */
+ scalar value if we have one, otherwise the initial scalar value
+ is itself a neutral value. */
tree vector_identity = NULL_TREE;
if (neutral_op)
vector_identity = gimple_build_vector_from_val (&seq, vectype,
@@ -5633,7 +5656,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
/* Calculate the equivalent of:
- sel = (index == i);
+ sel[j] = (index[j] == i);
which selects the elements of NEW_PHI_RESULT that should
be included in the result. */
@@ -6167,30 +6190,30 @@ merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
return cond;
}
-/* Perform in-order reductions for strict FP math, as opposed to the
- tree-based method used for fast math. For SLP this only works for
- chained reductions, as non chained reductions would require changing
- the order. */
+/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
+ statement that sets the live-out value. REDUC_DEF_STMT is the phi
+ statement. CODE is the operation performed by STMT and OPS are
+ its scalar operands. REDUC_INDEX is the index of the operand in
+ OPS that is set by REDUC_DEF_STMT. REDUC_CODE is the code that
+ implements in-order reduction and VECTYPE_IN is the type of its
+ vector input. MASKS specifies the masks that should be used to
+ control the operation in a fully-masked loop. */
static bool
-vectorized_strict_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
- gimple **vec_stmt, slp_tree slp_node,
- gimple *reduc_def_stmt,
- tree_code code, tree_code reduc_code,
- int op_type, tree ops[3], tree vectype_in,
- int reduc_index, vec_loop_masks *masks)
+vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
+ gimple **vec_stmt, slp_tree slp_node,
+ gimple *reduc_def_stmt,
+ tree_code code, tree_code reduc_code,
+ tree ops[3], tree vectype_in,
+ int reduc_index, vec_loop_masks *masks)
{
- int i;
- int ncopies;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- tree def0, op0;
- tree expr = NULL_TREE;
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
gimple *new_stmt = NULL;
- auto_vec<tree> vec_oprnds0;
+ int ncopies;
if (slp_node)
ncopies = 1;
else
@@ -6198,19 +6221,20 @@ vectorized_strict_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (!nested_in_vect_loop_p (loop, stmt));
gcc_assert (ncopies == 1);
- gcc_assert (op_type == binary_op);
+ gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
- == STRICT_FP_REDUCTION);
+ == FOLD_LEFT_REDUCTION);
if (slp_node)
gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
TYPE_VECTOR_SUBPARTS (vectype_in)));
- op0 = ops[1 - reduc_index];
+ tree op0 = ops[1 - reduc_index];
int group_size = 1;
gimple *scalar_dest_def;
+ auto_vec<tree> vec_oprnds0;
if (slp_node)
{
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node);
@@ -6238,11 +6262,15 @@ vectorized_strict_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
vector_identity = build_zero_cst (vectype_out);
+ int i;
+ tree def0;
FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
{
tree mask = NULL_TREE;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
+
+ /* Handle MINUS by adding the negative. */
if (code == MINUS_EXPR)
{
tree negated = make_ssa_name (vectype_out);
@@ -6255,25 +6283,27 @@ vectorized_strict_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
def0 = merge_with_identity (gsi, mask, vectype_out, def0,
vector_identity);
- /* On first iteration the input is simply the scalar phi result, and for
- subsequent iterations it is the output of the preceding operation. */
-
- expr = build2 (reduc_code, scalar_type, reduc_var, def0);
+ /* On the first iteration the input is simply the scalar phi
+ result, and for subsequent iterations it is the output of
+ the preceding operation. */
+ tree expr = build2 (reduc_code, scalar_type, reduc_var, def0);
/* For chained SLP reductions the output of the previous reduction
- operation serves as the input of the next. For the final statement
- the output cannot be a temporary - we reuse the original
- scalar destination of the last statement. */
+ operation serves as the input of the next. For the final statement
+ the output cannot be a temporary - we reuse the original
+ scalar destination of the last statement. */
if (i == vec_num - 1)
- reduc_var = scalar_dest;
+ reduc_var = scalar_dest;
else
- reduc_var = vect_create_destination_var (scalar_dest, NULL);
-
+ reduc_var = vect_create_destination_var (scalar_dest, NULL);
new_stmt = gimple_build_assign (reduc_var, expr);
if (i == vec_num - 1)
- {
- SSA_NAME_DEF_STMT (reduc_var) = new_stmt;
+ {
+ SSA_NAME_DEF_STMT (reduc_var) = new_stmt;
+ /* For chained SLP stmt is the first statement in the group and
+ gsi points to the last statement in the group. For non SLP stmt
+ points to the same location as gsi. */
if (scalar_dest_def == gsi_stmt (*gsi))
vect_finish_replace_stmt (scalar_dest_def, new_stmt);
else
@@ -6287,14 +6317,14 @@ vectorized_strict_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
}
}
else
- {
- reduc_var = make_ssa_name (reduc_var, new_stmt);
- gimple_assign_set_lhs (new_stmt, reduc_var);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- }
+ {
+ reduc_var = make_ssa_name (reduc_var, new_stmt);
+ gimple_assign_set_lhs (new_stmt, reduc_var);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ }
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (!slp_node)
@@ -6481,7 +6511,10 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
return true;
}
- if (STMT_VINFO_REDUC_TYPE (stmt_info) == STRICT_FP_REDUCTION)
+ if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
+ /* Leave the scalar phi in place. Note that checking
+ STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
+ for reductions involving a single statement. */
return true;
gimple *reduc_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
@@ -6489,11 +6522,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
reduc_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt));
if (STMT_VINFO_VEC_REDUCTION_TYPE (vinfo_for_stmt (reduc_stmt))
- == COND_REDUCTION_CLASTB)
+ == EXTRACT_LAST_REDUCTION)
+ /* Leave the scalar phi in place. */
return true;
gcc_assert (is_gimple_assign (reduc_stmt));
-
for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
{
tree op = gimple_op (reduc_stmt, k);
@@ -6711,11 +6744,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
directy used in stmt. */
if (reduc_index == -1)
{
- if (STMT_VINFO_REDUC_TYPE (stmt_info) == STRICT_FP_REDUCTION)
+ if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "In-order reduction chain without SLP.\n");
+ "in-order reduction chain without SLP.\n");
return false;
}
@@ -6765,19 +6798,20 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (cond_reduc_dt == vect_constant_def);
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
}
- else if (direct_internal_fn_supported_p (IFN_CLASTB, vectype_in,
- OPTIMIZE_FOR_SPEED))
+ else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
+ vectype_in, OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Optimizing condition reduction with CLASTB.\n");
- STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = COND_REDUCTION_CLASTB;
+ "optimizing condition reduction with"
+ " FOLD_EXTRACT_LAST.\n");
+ STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
}
else if (cond_reduc_dt == vect_induction_def)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
- "Optimizing condition reduction based on "
+ "optimizing condition reduction based on "
"integer induction.\n");
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
= INTEGER_INDUC_COND_REDUCTION;
@@ -6935,9 +6969,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
(and also the same tree-code) when generating the epilog code and
when generating the code inside the loop. */
+ vect_reduction_type reduction_type
+ = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
if (orig_stmt
- && (!REDUCTION_IS_COND_REDUCTION_P
- (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info))))
+ && (reduction_type == TREE_CODE_REDUCTION
+ || reduction_type == FOLD_LEFT_REDUCTION))
{
/* This is a reduction pattern: get the vectype from the type of the
reduction variable, and get the tree-code from orig_stmt. */
@@ -6956,13 +6992,12 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
/* For simple condition reductions, replace with the actual expression
we want to base our reduction around. */
- if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION)
+ if (reduction_type == CONST_COND_REDUCTION)
{
orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
}
- else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
- == INTEGER_INDUC_COND_REDUCTION)
+ else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
orig_code = MAX_EXPR;
}
@@ -6984,17 +7019,15 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
epilog_reduc_code = ERROR_MARK;
- vect_reduction_type reduction_type
- = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
if (reduction_type == TREE_CODE_REDUCTION
- || reduction_type == STRICT_FP_REDUCTION
+ || reduction_type == FOLD_LEFT_REDUCTION
|| reduction_type == INTEGER_INDUC_COND_REDUCTION
|| reduction_type == CONST_COND_REDUCTION)
{
bool have_reduc_support;
- if (reduction_type == STRICT_FP_REDUCTION)
- have_reduc_support = strict_reduction_code (orig_code, vectype_out,
- &epilog_reduc_code);
+ if (reduction_type == FOLD_LEFT_REDUCTION)
+ have_reduc_support = fold_left_reduction_code (orig_code, vectype_out,
+ &epilog_reduc_code);
else
have_reduc_support
= reduction_code_for_scalar_code (orig_code, &epilog_reduc_code);
@@ -7047,7 +7080,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
epilog_reduc_code = REDUC_MAX_EXPR;
}
- if (reduction_type != COND_REDUCTION_CLASTB
+ if (reduction_type != EXTRACT_LAST_REDUCTION
&& epilog_reduc_code == ERROR_MARK
&& !nunits_out.is_constant ())
{
@@ -7058,7 +7091,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
return false;
}
- if ((double_reduc || REDUCTION_IS_COND_REDUCTION_P (reduction_type))
+ if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
&& ncopies > 1)
{
if (dump_enabled_p ())
@@ -7071,9 +7104,9 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
/* For SLP reductions, see if there is a neutral value we can use. */
tree neutral_op = NULL_TREE;
if (slp_node)
- neutral_op = neutral_op_for_slp_reduction
- (slp_node_instance->reduc_phis, code,
- GROUP_FIRST_ELEMENT (stmt_info) != NULL);
+ neutral_op
+ = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code,
+ GROUP_FIRST_ELEMENT (stmt_info) != NULL);
/* For double reductions, and for SLP reductions with a neutral value,
we construct a variable-length initial vector by loading a vector
@@ -7086,7 +7119,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Reduction on variable-length vectors requires"
+ "reduction on variable-length vectors requires"
" target support for a vector-shift-and-insert"
" operation.\n");
return false;
@@ -7109,8 +7142,8 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Unsupported form of SLP reduction for"
- " variable-width vectors: cannot build"
+ "unsupported form of SLP reduction for"
+ " variable-length vectors: cannot build"
" initial vector.\n");
return false;
}
@@ -7121,58 +7154,45 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Unsupported form of SLP reduction for"
- " variable-width vectors: the vector size"
+ "unsupported form of SLP reduction for"
+ " variable-length vectors: the vector size"
" is not a multiple of the number of results.\n");
return false;
}
}
- if (double_reduc && reduction_type == STRICT_FP_REDUCTION)
+ if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
{
- /* We can't support strict math reductions of code such as this:
+ /* We can't support in-order reductions of code such as this:
+
for (int i = 0; i < n1; ++i)
for (int j = 0; j < n2; ++j)
l += a[j];
- since gcc effectively transforms the loop when vectorizing:
+ since GCC effectively transforms the loop when vectorizing:
for (int i = 0; i < n1 / VF; ++i)
for (int j = 0; j < n2; ++j)
for (int k = 0; k < VF; ++k)
l += a[j];
- The strict code could implement the second loop above exactly. The
- problem is that the second loop is already wrong because it's a
- reassociation of the first.
- */
+ which is a reassociation of the original operation. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "double reduction not supported for strict math\n");
+ "in-order double reduction not supported.\n");
return false;
}
- /* TODO SVE: This restriction should be relaxed once we can support
- widening, narrowing operations. */
- if (reduction_type == STRICT_FP_REDUCTION && ncopies > 1)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "strict reduction with ncopies > 1.\n");
- return false;
- }
-
- if (reduction_type == STRICT_FP_REDUCTION
+ if (reduction_type == FOLD_LEFT_REDUCTION
&& slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
- /* We cannot support strict math reductions in this case because there is
+ /* We cannot in-order reductions in this case because there is
an implicit reassociation of the operations involved. */
if (dump_enabled_p ())
- dump_printf_loc
- (MSG_MISSED_OPTIMIZATION, vect_location,
- "non chained SLP reduction not supported for strict math.\n");
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "in-order unchained SLP reductions not supported.\n");
return false;
}
@@ -7282,6 +7302,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
return false;
}
+ if (slp_node)
+ vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ else
+ vec_num = 1;
+
internal_fn cond_fn = get_conditional_internal_fn (code, scalar_type);
/* In a speculative loop, the update must be predicated on the
@@ -7291,25 +7316,20 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
masks = &LOOP_VINFO_NONSPECULATIVE_MASKS (loop_vinfo);
- if (slp_node)
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- else
- vec_num = 1;
-
if (!vec_stmt) /* transformation not required. */
{
if (first_p)
vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies);
if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
{
- if (reduction_type != STRICT_FP_REDUCTION
+ if (reduction_type != FOLD_LEFT_REDUCTION
&& (cond_fn == IFN_LAST
|| !direct_internal_fn_supported_p (cond_fn, vectype_in,
OPTIMIZE_FOR_SPEED)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because no"
+ "can't use a fully-masked loop because no"
" conditional operation is available.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
@@ -7317,7 +7337,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop for chained"
+ "can't use a fully-masked loop for chained"
" reductions.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
@@ -7346,15 +7366,15 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& masks == &LOOP_VINFO_NONSPECULATIVE_MASKS (loop_vinfo))
gsi = &nonspeculative_gsi;
- if (reduction_type == STRICT_FP_REDUCTION)
- return vectorized_strict_reduction
+ if (reduction_type == FOLD_LEFT_REDUCTION)
+ return vectorize_fold_left_reduction
(stmt, gsi, vec_stmt, slp_node, reduc_def_stmt, code,
- epilog_reduc_code, op_type, ops, vectype_in, reduc_index, masks);
+ epilog_reduc_code, ops, vectype_in, reduc_index, masks);
- if (reduction_type == COND_REDUCTION_CLASTB)
+ if (reduction_type == EXTRACT_LAST_REDUCTION)
{
gcc_assert (!slp_node);
- return vectorizable_condition (stmt, gsi, vec_stmt,
+ return vectorizable_condition (stmt, gsi, vec_stmt,
NULL, reduc_index, NULL);
}
@@ -8217,42 +8237,6 @@ vectorizable_live_operation (gimple *stmt,
}
}
- /* Check if required operations can be supported. */
-
- if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
- OPTIMIZE_FOR_SPEED))
- {
- if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Not vectorized: "
- "Extract last reduction not supported.\n");
- return false;
- }
-
- if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because "
- "the target doesn't support extract last "
- "reduction.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
- /* Don't return - we can still vectorize without masking. */
- }
- }
-
- if (slp_node && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop; "
- "SLP statement is live after the loop.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
- /* Don't return - we can still vectorize without masking. */
- }
-
if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
{
/* Need to construct the type because on the checking stage we don't
@@ -8264,30 +8248,23 @@ vectorizable_live_operation (gimple *stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Not vectorized: Break after not supported.\n");
+ "not vectorized: break after not supported.\n");
return false;
}
- }
-
- if (ncopies > 1)
- {
- if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
+ if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
+ OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Not vectorized: "
- "Multiple ncopies not supported.\n");
+ "not vectorized: extract last not supported.\n");
return false;
}
-
- if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
+ if (ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because"
- " ncopies is greater than 1.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
- /* Don't return - we can still vectorize without masking. */
+ "not vectorized: ncopies is greater than 1.\n");
+ return false;
}
}
@@ -8296,9 +8273,39 @@ vectorizable_live_operation (gimple *stmt,
/* No transformation required. */
if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
{
- gcc_assert (ncopies == 1 && !slp_node);
- vect_record_loop_mask (loop_vinfo, &LOOP_VINFO_MASKS (loop_vinfo),
- 1, vectype);
+ if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
+ OPTIMIZE_FOR_SPEED))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't use a fully-masked loop because "
+ "the target doesn't support extract last "
+ "reduction.\n");
+ LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ }
+ else if (slp_node)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't use a fully-masked loop because an "
+ "SLP statement is live after the loop.\n");
+ LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ }
+ else if (ncopies > 1)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't use a fully-masked loop because"
+ " ncopies is greater than 1.\n");
+ LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ }
+ else
+ {
+ gcc_assert (ncopies == 1 && !slp_node);
+ vect_record_loop_mask (loop_vinfo,
+ &LOOP_VINFO_MASKS (loop_vinfo),
+ 1, vectype);
+ }
}
return true;
}
@@ -8350,19 +8357,16 @@ vectorizable_live_operation (gimple *stmt,
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
|| LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
{
- tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
- tree scalar_res = make_ssa_name (scalar_type);
tree mask;
- gimple *new_stmt;
-
if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
{
+ gcc_assert (ncopies == 1);
tree orig_mask = LOOP_VINFO_EXIT_MASK (loop_vinfo);
tree all_ones = build_minus_one_cst (TREE_TYPE (orig_mask));
mask = make_ssa_name (TREE_TYPE (orig_mask));
- new_stmt = gimple_build_call_internal (IFN_BREAK_AFTER, 2,
- all_ones, orig_mask);
+ gcall *new_stmt = gimple_build_call_internal (IFN_BREAK_AFTER, 2,
+ all_ones, orig_mask);
gimple_call_set_lhs (new_stmt, mask);
gimple_seq_add_stmt (&stmts, new_stmt);
}
@@ -8373,11 +8377,20 @@ vectorizable_live_operation (gimple *stmt,
1, vectype, 0);
}
- new_stmt = gimple_build_call_internal (IFN_EXTRACT_LAST, 2, vec_lhs,
- mask);
+ /* Emit:
+
+ SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
+
+ where VEC_LHS is the vectorized live-out result and MASK is
+ the loop mask for the final iteration. */
+ tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
+ tree scalar_res = make_ssa_name (scalar_type);
+ gcall *new_stmt = gimple_build_call_internal (IFN_EXTRACT_LAST,
+ 2, mask, vec_lhs);
gimple_call_set_lhs (new_stmt, scalar_res);
gimple_seq_add_stmt (&stmts, new_stmt);
+ /* Convert the extracted vector element to the required scalar type. */
new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
}
else
@@ -8778,24 +8791,17 @@ vect_transform_loop (loop_vec_info loop_vinfo)
&step_vector, &niters_vector_mult_vf, th,
check_profitability, niters_no_overflow);
- bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
if (niters_vector == NULL_TREE
&& !LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
{
gcc_assert (!LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo));
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && must_eq (lowest_vf, vf)
- && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
- {
- wide_int niters_vector_val
- = (final_iter_may_be_partial
- ? wi::udiv_ceil (wi::to_wide (LOOP_VINFO_NITERS (loop_vinfo)),
- lowest_vf)
- : wi::udiv_floor (wi::to_wide (LOOP_VINFO_NITERS (loop_vinfo)),
- lowest_vf));
+ && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
+ && must_eq (lowest_vf, vf))
+ {
niters_vector
- = wide_int_to_tree (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
- niters_vector_val);
+ = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
+ LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
step_vector = build_one_cst (TREE_TYPE (niters));
}
else
@@ -9064,13 +9070,11 @@ vect_transform_loop (loop_vec_info loop_vinfo)
{
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- /* Remove all the stores once we've vectorized the
- whole group. */
+ /* Interleaving. If IS_STORE is TRUE, the vectorization of the
+ interleaving chain was completed - free all the stores in
+ the chain. */
gsi_next (&si);
- gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- == GROUP_NUM_STMTS (vinfo_for_stmt (first_stmt)))
- vect_remove_stores (first_stmt);
+ vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
}
else
{
@@ -9092,6 +9096,25 @@ vect_transform_loop (loop_vec_info loop_vinfo)
gsi_next (&si);
}
} /* stmts in BB */
+
+ /* Stub out scalar statements that must not survive vectorization.
+ Doing this here helps with grouped statements, or statements that
+ are involved in patterns. */
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
+ if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
+ {
+ tree lhs = gimple_get_lhs (call);
+ if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
+ {
+ tree zero = build_zero_cst (TREE_TYPE (lhs));
+ gimple *new_stmt = gimple_build_assign (lhs, zero);
+ gsi_replace (&gsi, new_stmt, true);
+ }
+ }
+ }
} /* BBs in loop */
/* Provide the real definition of LOOP_VINFO_EXIT_MASK. */
@@ -9113,6 +9136,9 @@ vect_transform_loop (loop_vec_info loop_vinfo)
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
scale_profile_for_vect_loop (loop, assumed_vf);
+ /* True if the final iteration might not handle a full vector's
+ worth of scalar iterations. */
+ bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
/* The minimum number of iterations performed by the epilogue. This
is 1 when peeling for gaps because we always need a final scalar
iteration. */
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 066ec48c056..10aa8d6c2f0 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -69,6 +69,7 @@ static gimple *vect_recog_mixed_size_cond_pattern (vec<gimple *> *,
tree *, tree *);
static gimple *vect_recog_bool_pattern (vec<gimple *> *, tree *, tree *);
static gimple *vect_recog_mask_conversion_pattern (vec<gimple *> *, tree *, tree *);
+static gimple *vect_recog_gather_scatter_pattern (vec<gimple *> *, tree *, tree *);
struct vect_recog_func
{
@@ -93,6 +94,10 @@ static vect_recog_func vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
{ vect_recog_mult_pattern, "mult" },
{ vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
{ vect_recog_bool_pattern, "bool" },
+ /* This must come before mask conversion, and includes the parts
+ of mask conversion that are needed for gather and scatter
+ internal functions. */
+ { vect_recog_gather_scatter_pattern, "gather_scatter" },
{ vect_recog_mask_conversion_pattern, "mask_conversion" }
};
@@ -4100,6 +4105,207 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
return pattern_stmt;
}
+/* STMT is a load or store. If the load or store is conditional, return
+ the boolean condition under which it occurs, otherwise return null. */
+
+static tree
+vect_get_load_store_mask (gimple *stmt)
+{
+ if (gassign *def_assign = dyn_cast <gassign *> (stmt))
+ {
+ gcc_assert (gimple_assign_single_p (def_assign));
+ return NULL_TREE;
+ }
+
+ if (gcall *def_call = dyn_cast <gcall *> (stmt))
+ {
+ internal_fn ifn = gimple_call_internal_fn (def_call);
+ int mask_index = internal_fn_mask_index (ifn);
+ return gimple_call_arg (def_call, mask_index);
+ }
+
+ gcc_unreachable ();
+}
+
+/* Return the scalar offset type that an internal gather/scatter function
+ should use. GS_INFO describes the gather/scatter operation. */
+
+static tree
+vect_get_gather_scatter_offset_type (gather_scatter_info *gs_info)
+{
+ tree offset_type = TREE_TYPE (gs_info->offset);
+ unsigned int element_bits = tree_to_uhwi (TYPE_SIZE (gs_info->element_type));
+
+ /* Enforced by vect_check_gather_scatter. */
+ unsigned int offset_bits = TYPE_PRECISION (offset_type);
+ gcc_assert (element_bits >= offset_bits);
+
+ /* If the offset is narrower than the elements, extend it according
+ to its sign. */
+ if (element_bits > offset_bits)
+ return build_nonstandard_integer_type (element_bits,
+ TYPE_UNSIGNED (offset_type));
+
+ return offset_type;
+}
+
+/* Return MASK if MASK is suitable for masking an operation on vectors
+ of type VECTYPE, otherwise convert it into such a form and return
+ the result. Associate any conversion statements with STMT_INFO's
+ pattern. */
+
+static tree
+vect_convert_mask_for_vectype (tree mask, tree vectype,
+ stmt_vec_info stmt_info, vec_info *vinfo)
+{
+ tree mask_type = search_type_for_mask (mask, vinfo);
+ if (mask_type)
+ {
+ tree mask_vectype = get_mask_type_for_scalar_type (mask_type);
+ if (mask_vectype
+ && may_ne (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (mask_vectype)))
+ mask = build_mask_conversion (mask, vectype, stmt_info, vinfo);
+ }
+ return mask;
+}
+
+/* Return the equivalent of:
+
+ fold_convert (TYPE, VALUE)
+
+ with the expectation that the operation will be vectorized.
+ If new statements are needed, add them as pattern statements
+ to STMT_INFO. */
+
+static tree
+vect_add_conversion_to_patterm (tree type, tree value,
+ stmt_vec_info stmt_info,
+ vec_info *vinfo)
+{
+ if (useless_type_conversion_p (type, TREE_TYPE (value)))
+ return value;
+
+ tree new_value = vect_recog_temp_ssa_var (type, NULL);
+ gassign *conversion = gimple_build_assign (new_value, CONVERT_EXPR, value);
+ stmt_vec_info new_stmt_info = new_stmt_vec_info (conversion, vinfo);
+ set_vinfo_for_stmt (conversion, new_stmt_info);
+ STMT_VINFO_VECTYPE (new_stmt_info) = get_vectype_for_scalar_type (type);
+ append_pattern_def_seq (stmt_info, conversion);
+ return new_value;
+}
+
+/* Try to convert STMT into a call to a gather load or scatter store
+ internal function. Return the final statement on success and set
+ *TYPE_IN and *TYPE_OUT to the vector type being loaded or stored.
+
+ This function only handles gathers and scatters that were recognized
+ as such from the outset (indicated by STMT_VINFO_GATHER_SCATTER_P). */
+
+static gimple *
+vect_try_gather_scatter_pattern (gimple *stmt, stmt_vec_info last_stmt_info,
+ tree *type_in, tree *type_out)
+{
+ /* Currently we only support this for loop vectorization. */
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_info->vinfo);
+ if (!loop_vinfo)
+ return NULL;
+
+ /* Make sure that we're looking at a gather load or scatter store. */
+ data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ if (!dr || !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ return NULL;
+
+ /* Get the boolean that controls whether the load or store happens.
+ This is null if the operation is unconditional. */
+ tree mask = vect_get_load_store_mask (stmt);
+
+ /* Make sure that the target supports an appropriate internal
+ function for the gather/scatter operation. */
+ gather_scatter_info gs_info;
+ if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info)
+ || gs_info.decl)
+ return NULL;
+
+ /* Convert the mask to the right form. */
+ tree gs_vectype = get_vectype_for_scalar_type (gs_info.element_type);
+ if (mask)
+ mask = vect_convert_mask_for_vectype (mask, gs_vectype, last_stmt_info,
+ loop_vinfo);
+
+ /* Get the invariant base and non-invariant offset, converting the
+ latter to the same width as the vector elements. */
+ tree base = gs_info.base;
+ tree offset_type = vect_get_gather_scatter_offset_type (&gs_info);
+ tree offset = vect_add_conversion_to_patterm (offset_type, gs_info.offset,
+ last_stmt_info, loop_vinfo);
+
+ /* Build the new pattern statement. */
+ tree scale = size_int (gs_info.scale);
+ gcall *pattern_stmt;
+ if (DR_IS_READ (dr))
+ {
+ if (mask != NULL)
+ pattern_stmt = gimple_build_call_internal (gs_info.ifn, 4, base,
+ offset, scale, mask);
+ else
+ pattern_stmt = gimple_build_call_internal (gs_info.ifn, 3, base,
+ offset, scale);
+ tree load_lhs = vect_recog_temp_ssa_var (gs_info.element_type, NULL);
+ gimple_call_set_lhs (pattern_stmt, load_lhs);
+ }
+ else
+ {
+ tree rhs = vect_get_store_rhs (stmt);
+ if (mask != NULL)
+ pattern_stmt = gimple_build_call_internal (IFN_MASK_SCATTER_STORE, 5,
+ base, offset, scale, rhs,
+ mask);
+ else
+ pattern_stmt = gimple_build_call_internal (IFN_SCATTER_STORE, 4,
+ base, offset, scale, rhs);
+ }
+ gimple_call_set_nothrow (pattern_stmt, true);
+
+ /* Copy across relevant vectorization info and associate DR with the
+ new pattern statement instead of the original statement. */
+ stmt_vec_info pattern_stmt_info = new_stmt_vec_info (pattern_stmt,
+ loop_vinfo);
+ set_vinfo_for_stmt (pattern_stmt, pattern_stmt_info);
+ STMT_VINFO_DATA_REF (pattern_stmt_info) = dr;
+ STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
+ = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
+ STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info)
+ = STMT_VINFO_GATHER_SCATTER_P (stmt_info);
+ DR_STMT (dr) = pattern_stmt;
+
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ *type_out = vectype;
+ *type_in = vectype;
+
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "gather/scatter pattern detected:\n");
+
+ return pattern_stmt;
+}
+
+/* Pattern wrapper around vect_try_gather_scatter_pattern. */
+
+static gimple *
+vect_recog_gather_scatter_pattern (vec<gimple *> *stmts, tree *type_in,
+ tree *type_out)
+{
+ gimple *last_stmt = stmts->pop ();
+ stmt_vec_info last_stmt_info = vinfo_for_stmt (last_stmt);
+ gimple *pattern_stmt = vect_try_gather_scatter_pattern (last_stmt,
+ last_stmt_info,
+ type_in, type_out);
+ if (pattern_stmt)
+ stmts->safe_push (last_stmt);
+ return pattern_stmt;
+}
/* Mark statements that are involved in a pattern. */
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 01dfecaafb6..4b52092cc71 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -210,12 +210,13 @@ vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt)
/* Check whether it is possible to load COUNT elements of type ELT_MODE
using the method implemented by duplicate_and_interleave. Return true
if so, returning the number of intermediate vectors in *NVECTORS_OUT
- and the mode of each intermediate vector in *VEC_MODE_OUT. */
+ (if nonnull) and the type of each intermediate vector in *VECTOR_TYPE_OUT
+ (if nonnull). */
bool
can_duplicate_and_interleave_p (unsigned int count, machine_mode elt_mode,
unsigned int *nvectors_out,
- machine_mode *vec_mode_out)
+ tree *vector_type_out)
{
poly_int64 elt_bytes = count * GET_MODE_SIZE (elt_mode);
poly_int64 nelts;
@@ -223,18 +224,25 @@ can_duplicate_and_interleave_p (unsigned int count, machine_mode elt_mode,
for (;;)
{
scalar_int_mode int_mode;
+ poly_int64 elt_bits = elt_bytes * BITS_PER_UNIT;
if (multiple_p (current_vector_size, elt_bytes, &nelts)
- && int_mode_for_size (elt_bytes * BITS_PER_UNIT,
- 0).exists (&int_mode))
+ && int_mode_for_size (elt_bits, 0).exists (&int_mode))
{
- machine_mode vec_mode;
- if (mode_for_vector (int_mode, nelts).exists (&vec_mode)
- && targetm.vector_mode_supported_p (vec_mode))
+ tree int_type = build_nonstandard_integer_type
+ (GET_MODE_BITSIZE (int_mode), 1);
+ tree vector_type = build_vector_type (int_type, nelts);
+ if (VECTOR_MODE_P (TYPE_MODE (vector_type))
+ && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_LO,
+ vector_type,
+ OPTIMIZE_FOR_SPEED)
+ && direct_internal_fn_supported_p (IFN_VEC_INTERLEAVE_HI,
+ vector_type,
+ OPTIMIZE_FOR_SPEED))
{
if (nvectors_out)
*nvectors_out = nvectors;
- if (vec_mode_out)
- *vec_mode_out = vec_mode;
+ if (vector_type_out)
+ *vector_type_out = vector_type;
return true;
}
}
@@ -1832,7 +1840,7 @@ vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
: VMAT_CONTIGUOUS);
if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
vect_model_store_cost (stmt_info, ncopies_for_cost,
- memory_access_type, vect_uninitialized_def,
+ memory_access_type, VLS_STORE,
node, prologue_cost_vec, body_cost_vec);
else
{
@@ -2055,7 +2063,6 @@ vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
unsigned int first_uid = gimple_uid (first_stmt);
unsigned int last_uid = first_uid;
GROUP_SIZE (first_vinfo) = group1_size;
- GROUP_NUM_STMTS (first_vinfo) = group1_size;
gimple *stmt = first_stmt;
for (unsigned i = group1_size; i > 1; i--)
@@ -2074,12 +2081,10 @@ vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
first_uid = last_uid = gimple_uid (group2);
GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
- GROUP_NUM_STMTS (vinfo_for_stmt (group2)) = 0;
for (stmt = group2; stmt; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
{
first_uid = MIN (first_uid, gimple_uid (stmt));
last_uid = MAX (last_uid, gimple_uid (stmt));
- GROUP_NUM_STMTS (vinfo_for_stmt (group2)) += 1;
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
}
@@ -3342,10 +3347,10 @@ vect_mask_constant_operand_p (gimple *stmt, int opnum)
(3) Duplicate each ELTS'[I] into a vector of mode VM.
- (4) Use a tree of VEC_INTERLEAVE_HI/LOs to create VMs with the
+ (4) Use a tree of VEC_INTERLEAVE_LO/HIs to create VMs with the
correct byte contents.
- (5) Use VIEW_CONVERT_EXPR to cast the final VM to the required type.
+ (5) Use VIEW_CONVERT_EXPR to cast the final VMs to the required type.
We try to find the largest IM for which this sequence works, in order
to cut down on the number of interleaves. */
@@ -3359,18 +3364,11 @@ duplicate_and_interleave (gimple_seq *seq, tree vector_type, vec<tree> elts,
/* (1) Find a vector mode VM with integer elements of mode IM. */
unsigned int nvectors = 1;
- machine_mode new_vector_mode;
+ tree new_vector_type;
if (!can_duplicate_and_interleave_p (nelts, TYPE_MODE (element_type),
- &nvectors, &new_vector_mode))
+ &nvectors, &new_vector_type))
gcc_unreachable ();
- /* Get the types associated with IM and VM above. */
- unsigned int new_element_bits = GET_MODE_UNIT_BITSIZE (new_vector_mode);
- tree new_element_type
- = build_nonstandard_integer_type (new_element_bits, 1);
- tree new_vector_type
- = build_vector_type (new_element_type, GET_MODE_NUNITS (new_vector_mode));
-
/* Get a vector type that holds ELTS[0:NELTS/NELTS']. */
unsigned int partial_nelts = nelts / nvectors;
tree partial_vector_type = build_vector_type (element_type, partial_nelts);
@@ -3386,25 +3384,26 @@ duplicate_and_interleave (gimple_seq *seq, tree vector_type, vec<tree> elts,
for (unsigned int j = 0; j < partial_nelts; ++j)
partial_elts[j] = elts[i * partial_nelts + j];
tree t = gimple_build_vector (seq, partial_vector_type, partial_elts);
- t = gimple_build (seq, VIEW_CONVERT_EXPR, new_element_type, t);
+ t = gimple_build (seq, VIEW_CONVERT_EXPR,
+ TREE_TYPE (new_vector_type), t);
/* (3) Duplicate each ELTS'[I] into a vector of mode VM. */
pieces[i] = gimple_build_vector_from_val (seq, new_vector_type, t);
}
- /* (4) Use a tree of VEC_INTERLEAVE_HIs to create a single VM with the
+ /* (4) Use a tree of VEC_INTERLEAVE_LO/HIs to create a single VM with the
correct byte contents.
We need to repeat the following operation log2(nvectors) times:
- out[i * 2] = VEC_INTERLEAVE_HI (in[i], in[i + lo_start]);
- out[i * 2 + 1] = VEC_INTERLEAVE_LO (in[i], in[i + lo_start]);
+ out[i * 2] = VEC_INTERLEAVE_LO (in[i], in[i + hi_start]);
+ out[i * 2 + 1] = VEC_INTERLEAVE_HI (in[i], in[i + hi_start]);
However, if each input repeats every N elements and the VF is
- a multiple of N * 2, the LO result is the same as the HI. */
+ a multiple of N * 2, the HI result is the same as the LO. */
unsigned int in_start = 0;
unsigned int out_start = nvectors;
- unsigned int lo_start = nvectors / 2;
+ unsigned int hi_start = nvectors / 2;
/* A bound on the number of outputs needed to produce NRESULTS results
in the final iteration. */
unsigned int noutputs_bound = nvectors * nresults;
@@ -3415,7 +3414,7 @@ duplicate_and_interleave (gimple_seq *seq, tree vector_type, vec<tree> elts,
for (unsigned int i = 0; i < limit; ++i)
{
if ((i & 1) != 0
- && multiple_p (GET_MODE_NUNITS (new_vector_mode),
+ && multiple_p (TYPE_VECTOR_SUBPARTS (new_vector_type),
2 * in_repeat))
{
pieces[out_start + i] = pieces[out_start + i - 1];
@@ -3424,10 +3423,10 @@ duplicate_and_interleave (gimple_seq *seq, tree vector_type, vec<tree> elts,
tree output = make_ssa_name (new_vector_type);
tree input1 = pieces[in_start + (i / 2)];
- tree input2 = pieces[in_start + (i / 2) + lo_start];
+ tree input2 = pieces[in_start + (i / 2) + hi_start];
internal_fn fn = ((i & 1) != 0
- ? IFN_VEC_INTERLEAVE_LO
- : IFN_VEC_INTERLEAVE_HI);
+ ? IFN_VEC_INTERLEAVE_HI
+ : IFN_VEC_INTERLEAVE_LO);
gcall *call = gimple_build_call_internal (fn, 2, input1, input2);
gimple_call_set_lhs (call, output);
gimple_seq_add_stmt (seq, call);
@@ -3512,6 +3511,8 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
{s5, s6, s7, s8}. */
+ /* When using duplicate_and_interleave, we just need one element for
+ each scalar statement. */
if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
nunits = group_size;
@@ -3638,6 +3639,8 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
vec_cst = gimple_build_vector (&ctor_seq, vector_type, elts);
else if (neutral_op)
{
+ /* Build a vector of the neutral value and shift the
+ other elements into place. */
vec_cst = gimple_build_vector_from_val (&ctor_seq,
vector_type,
neutral_op);
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index f810212ca1b..7b3c73aebf2 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -49,20 +49,11 @@ along with GCC; see the file COPYING3. If not see
#include "builtins.h"
#include "internal-fn.h"
#include "tree-ssa-loop-niter.h"
-#include "cfghooks.h"
#include "gimple-fold.h"
/* For lang_hooks.types.type_for_mode. */
#include "langhooks.h"
-/* Says whether a statement is a load, a store of a vectorized statement
- result, or a store of an invariant value. */
-enum vec_load_store_type {
- VLS_LOAD,
- VLS_STORE,
- VLS_STORE_INVARIANT
-};
-
/* Return the vectorized type for the given statement. */
tree
@@ -119,6 +110,27 @@ record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
count, kind, stmt_info, misalign, where);
}
+/* Return a tree that represents STEP multiplied by the vectorization
+ factor. */
+
+static tree
+vect_mult_by_vf (loop_vec_info loop_vinfo, tree step)
+{
+ hash_map<tree, tree> *map = &LOOP_VINFO_VF_MULT_MAP (loop_vinfo);
+ bool existed;
+ tree &entry = map->get_or_insert (step, &existed);
+ if (!existed)
+ {
+ gimple_seq seq = NULL;
+ tree vf = LOOP_VINFO_CAP (loop_vinfo).niters;
+ vf = gimple_convert (&seq, TREE_TYPE (step), vf);
+ entry = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step), vf, step);
+ edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
+ gsi_insert_seq_on_edge_immediate (pe, seq);
+ }
+ return entry;
+}
+
/* Return a variable of type ELEM_TYPE[NELEMS]. */
static tree
@@ -399,21 +411,20 @@ exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
{
if (is_gimple_call (stmt)
&& gimple_call_internal_p (stmt))
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_MASK_STORE:
- operand = gimple_call_arg (stmt, 3);
- if (operand == use)
- return true;
- /* FALLTHRU */
- case IFN_MASK_LOAD:
- operand = gimple_call_arg (stmt, 2);
- if (operand == use)
- return true;
- break;
- default:
- break;
- }
+ {
+ internal_fn ifn = gimple_call_internal_fn (stmt);
+ int mask_index = internal_fn_mask_index (ifn);
+ if (mask_index >= 0
+ && use == gimple_call_arg (stmt, mask_index))
+ return true;
+ int stored_value_index = internal_fn_stored_value_index (ifn);
+ if (stored_value_index >= 0
+ && use == gimple_call_arg (stmt, stored_value_index))
+ return true;
+ if (internal_gather_scatter_fn_p (ifn)
+ && use == gimple_call_arg (stmt, 1))
+ return true;
+ }
return false;
}
@@ -812,9 +823,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
{
gather_scatter_info gs_info;
- if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info, false))
+ if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
gcc_unreachable ();
- if (!process_use (stmt, gs_info.u.offset, loop_vinfo, relevant,
+ if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
&worklist, true))
return false;
}
@@ -916,24 +927,22 @@ vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
void
vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
vect_memory_access_type memory_access_type,
- enum vect_def_type dt, slp_tree slp_node,
+ vec_load_store_type vls_type, slp_tree slp_node,
stmt_vector_for_cost *prologue_cost_vec,
stmt_vector_for_cost *body_cost_vec)
{
unsigned int inside_cost = 0, prologue_cost = 0;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
+ bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
- if (dt == vect_constant_def || dt == vect_external_def)
+ if (vls_type == VLS_STORE_INVARIANT)
prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
stmt_info, 0, vect_prologue);
- /* Scatter stores only update elements associated with STMT_INFO.
- Other grouped stores update all elements in the group at once,
+ /* Grouped stores update all elements in the group at once,
so we want the DR for the first statement. */
- if (!slp_node
- && STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && memory_access_type != VMAT_GATHER_SCATTER)
+ if (!slp_node && grouped_access_p)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
@@ -1065,13 +1074,11 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
unsigned int inside_cost = 0, prologue_cost = 0;
+ bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
- /* Gather loads only read elements associated with STMT_INFO.
- Other grouped loads read all elements in the group at once,
+ /* Grouped loads read all elements in the group at once,
so we want the DR for the first statement. */
- if (!slp_node
- && STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && memory_access_type != VMAT_GATHER_SCATTER)
+ if (!slp_node && grouped_access_p)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
@@ -1621,10 +1628,9 @@ vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
}
}
-/* Function vect_finish_stmt_generation_1.
-
- Helper function called by vect_finish_replace_stmt and
- vect_finish_stmt_generation. */
+/* Helper function called by vect_finish_replace_stmt and
+ vect_finish_stmt_generation. Set the location of the new
+ statement and create a stmt_vec_info for it. */
static void
vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
@@ -1650,14 +1656,12 @@ vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
add_stmt_to_eh_lp (vec_stmt, lp_nr);
}
-/* Function vect_finish_replace_stmt.
-
- Replace the scalar statement STMT with a new vector statement VEC_STMT. */
+/* Replace the scalar statement STMT with a new vector statement VEC_STMT,
+ which sets the same scalar result as STMT did. */
void
vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt)
{
- gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
gcc_assert (gimple_get_lhs (stmt) == gimple_get_lhs (vec_stmt));
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
@@ -1703,7 +1707,6 @@ vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
}
}
gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
-
vect_finish_stmt_generation_1 (stmt, vec_stmt);
}
@@ -1745,28 +1748,29 @@ static tree permute_vec_elements (tree, tree, tree, gimple *,
whether the vectorizer pass has the appropriate support, as well as
whether the target does.
- IS_LOAD is true if the statement is a load and VECTYPE is the type
- of the vector being loaded or stored. MEMORY_ACCESS_TYPE says how the
- load or store is going to be implemented and GROUP_SIZE is the number of
- load or store statements in the containing group. WIDENED_OFFSET_TYPE
- is as for gather_scatter_info.
+ VLS_TYPE says whether the statement is a load or store and VECTYPE
+ is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
+ says how the load or store is going to be implemented and GROUP_SIZE
+ is the number of load or store statements in the containing group.
+ If the access is a gather load or scatter store, GS_INFO describes
+ its arguments.
Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
supported, otherwise record the required mask types. */
static void
check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
- bool is_load, int group_size,
+ vec_load_store_type vls_type, int group_size,
vect_memory_access_type memory_access_type,
- tree widened_offset_type)
+ gather_scatter_info *gs_info)
{
- vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
- machine_mode vecmode = TYPE_MODE (vectype);
-
/* Invariant loads need no special support. */
if (memory_access_type == VMAT_INVARIANT)
return;
+ vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
+ machine_mode vecmode = TYPE_MODE (vectype);
+ bool is_load = (vls_type == VLS_LOAD);
if (memory_access_type == VMAT_LOAD_STORE_LANES)
{
if (is_load
@@ -1775,10 +1779,9 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because the"
+ "can't use a fully-masked loop because the"
" target doesn't have an appropriate masked"
- " %s-lanes instruction.\n",
- is_load ? "load" : "store");
+ " load/store-lanes instruction.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
return;
}
@@ -1789,16 +1792,20 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
if (memory_access_type == VMAT_GATHER_SCATTER)
{
- if (get_gather_scatter_internal_fn (is_load, vectype,
- widened_offset_type, true)
- == IFN_LAST)
+ internal_fn ifn = (is_load
+ ? IFN_MASK_GATHER_LOAD
+ : IFN_MASK_SCATTER_STORE);
+ tree offset_type = TREE_TYPE (gs_info->offset);
+ if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
+ gs_info->memory_type,
+ TYPE_SIGN (offset_type),
+ gs_info->scale))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because the"
- " target doesn't have the appropriate masked"
- " %s.\n",
- is_load ? "gather load" : "scatter store");
+ "can't use a fully-masked loop because the"
+ " target doesn't have an appropriate masked"
+ " gather load or scatter store instruction.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
return;
}
@@ -1814,7 +1821,7 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
scalar loop. We need more work to support other mappings. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because an access"
+ "can't use a fully-masked loop because an access"
" isn't contiguous.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
return;
@@ -1828,9 +1835,9 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because the target"
- " doesn't have the appropriate masked %s.\n",
- is_load ? "load" : "store");
+ "can't use a fully-masked loop because the target"
+ " doesn't have the appropriate masked load or"
+ " store.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
return;
}
@@ -1870,1088 +1877,138 @@ prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
return and_res;
}
-/* Function vect_gen_widened_results_half
-
- Create a vector stmt whose code, type, and result variable are CODE,
- OP_TYPE, and VEC_DEST, and its arguments are VEC_OPRND0 and VEC_OPRND1.
- The new vector stmt is to be inserted at GSI. In the case that CODE is a
- CALL_EXPR, this means that a call to DECL needs to be created (DECL is a
- function-decl of a target-builtin). STMT is the original scalar stmt that
- we are vectorizing. */
-
-static gimple *
-vect_gen_widened_results_half (enum tree_code code,
- tree decl,
- tree vec_oprnd0, tree vec_oprnd1, int op_type,
- tree vec_dest, gimple_stmt_iterator *gsi,
- gimple *stmt)
-{
- gimple *new_stmt;
- tree new_temp;
-
- /* Generate half of the widened result: */
- if (code == CALL_EXPR)
- {
- /* Target specific support */
- if (op_type == binary_op)
- new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
- else
- new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_call_set_lhs (new_stmt, new_temp);
- }
- else
- {
- /* Generic support */
- gcc_assert (op_type == TREE_CODE_LENGTH (code));
- if (op_type != binary_op)
- vec_oprnd1 = NULL;
- new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- }
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
-
- return new_stmt;
-}
-
-/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
- and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
- the resulting vectors and call the function recursively. */
-
-static void
-vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
- vec<tree> *vec_oprnds1,
- gimple *stmt, tree vec_dest,
- gimple_stmt_iterator *gsi,
- enum tree_code code1,
- enum tree_code code2, tree decl1,
- tree decl2, int op_type)
-{
- int i;
- tree vop0, vop1, new_tmp1, new_tmp2;
- gimple *new_stmt1, *new_stmt2;
- vec<tree> vec_tmp = vNULL;
-
- vec_tmp.create (vec_oprnds0->length () * 2);
- FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
- {
- if (op_type == binary_op)
- vop1 = (*vec_oprnds1)[i];
- else
- vop1 = NULL_TREE;
-
- /* Generate the two halves of promotion operation. */
- new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
- op_type, vec_dest, gsi, stmt);
- new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
- op_type, vec_dest, gsi, stmt);
- if (is_gimple_call (new_stmt1))
- {
- new_tmp1 = gimple_call_lhs (new_stmt1);
- new_tmp2 = gimple_call_lhs (new_stmt2);
- }
- else
- {
- new_tmp1 = gimple_assign_lhs (new_stmt1);
- new_tmp2 = gimple_assign_lhs (new_stmt2);
- }
-
- /* Store the results for the next step. */
- vec_tmp.quick_push (new_tmp1);
- vec_tmp.quick_push (new_tmp2);
- }
-
- vec_oprnds0->release ();
- *vec_oprnds0 = vec_tmp;
-}
-
-/* Replace IFN_MASK_LOAD statement STMT with a dummy assignment, to ensure
- that it won't be expanded even when there's no following DCE pass. */
-
-static void
-replace_mask_load (gimple *stmt, gimple_stmt_iterator *gsi)
-{
- /* If this statement is part of a pattern created by the vectorizer,
- get the original statement. */
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- if (STMT_VINFO_RELATED_STMT (stmt_info))
- {
- stmt = STMT_VINFO_RELATED_STMT (stmt_info);
- stmt_info = vinfo_for_stmt (stmt);
- }
-
- gcc_assert (gsi_stmt (*gsi) == stmt);
- tree lhs = gimple_call_lhs (stmt);
- tree zero = build_zero_cst (TREE_TYPE (lhs));
- gimple *new_stmt = gimple_build_assign (lhs, zero);
- set_vinfo_for_stmt (new_stmt, stmt_info);
- set_vinfo_for_stmt (stmt, NULL);
- STMT_VINFO_STMT (stmt_info) = new_stmt;
-
- /* If STMT was the first statement in a group, redirect all
- GROUP_FIRST_ELEMENT pointers to the new statement (which has the
- same stmt_info as the old statement). */
- if (GROUP_FIRST_ELEMENT (stmt_info) == stmt)
- {
- gimple *group_stmt = new_stmt;
- do
- {
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (group_stmt)) = new_stmt;
- group_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (group_stmt));
- }
- while (group_stmt);
- }
- else if (GROUP_FIRST_ELEMENT (stmt_info))
- {
- /* Otherwise redirect the GROUP_NEXT_ELEMENT. It would be more
- efficient if these pointers were to the stmt_vec_info rather
- than the gimple statements themselves, but this is by no means
- the only quadractic loop for groups. */
- gimple *group_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- while (GROUP_NEXT_ELEMENT (vinfo_for_stmt (group_stmt)) != stmt)
- group_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (group_stmt));
- GROUP_NEXT_ELEMENT (vinfo_for_stmt (group_stmt)) = new_stmt;
- }
- gsi_replace (gsi, new_stmt, true);
-}
-
-/* STMT is either a masked or unconditional store. Return the value
- being stored. */
-
-static tree
-get_store_op (gimple *stmt)
-{
- if (gimple_assign_single_p (stmt))
- return gimple_assign_rhs1 (stmt);
- if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
- return gimple_call_arg (stmt, 3);
- gcc_unreachable ();
-}
+/* Determine whether we can use a gather load or scatter store to vectorize
+ strided load or store STMT by truncating the current offset to a smaller
+ width. We need to be able to construct an offset vector:
-struct wgather_info
-{
- tree_code wcode1, wcode2;
- tree off_vectype;
- int multi_step_cvt;
- vec<tree> interm_types;
-};
+ { 0, X, X*2, X*3, ... }
-#define DEFAULT_WGATHER_INFO { ERROR_MARK, ERROR_MARK, NULL_TREE, 0, vNULL }
+ without loss of precision, where X is STMT's DR_STEP.
-/* Check to see if a widening gather load described by GS_INFO is
- supported for STMT, where the width of the vector element in result
- RESTYPE is greater than that in offset vector.
+ Return true if this is possible, describing the gather load or scatter
+ store in GS_INFO. MASKED_P is true if the load or store is conditional. */
- If supported, return TRUE and fill in details about the widening
- operation in WGATHER, otherwise return FALSE. */
static bool
-widened_gather_support_p (tree restype, gather_scatter_info *gs_info,
- gimple *stmt, wgather_info *wgather)
-{
- scalar_int_mode widened_offmode
- = SCALAR_INT_TYPE_MODE (gs_info->widened_offset_type);
- unsigned int bits = GET_MODE_BITSIZE (widened_offmode);
- tree scalar_type = (TYPE_UNSIGNED (gs_info->offset_type)
- ? make_unsigned_type (bits)
- : make_signed_type (bits));
- tree woff_vectype
- = build_vector_type (scalar_type, TYPE_VECTOR_SUBPARTS (restype));
-
- if (!supportable_widening_operation
- (CONVERT_EXPR, stmt, woff_vectype, gs_info->offset_vectype,
- &wgather->wcode1, &wgather->wcode2, &wgather->multi_step_cvt,
- &wgather->interm_types))
- return false;
-
- wgather->off_vectype = woff_vectype;
- return true;
-}
-
-hashval_t
-gather_scatter_hasher::hash (const gather_scatter_indices *x)
-{
- inchash::hash h;
- h.add_int (TYPE_MODE (x->type));
- inchash::add_expr (x->step, h);
- return h.end ();
-}
-
-bool
-gather_scatter_hasher::equal (const gather_scatter_indices *x,
- const gather_scatter_indices *y)
-{
- if (!types_compatible_p (x->type, y->type))
- return false;
-
- return operand_equal_p (x->step, y->step, 0);
-}
-
-/* Function copy_gather_scatter_indices.
-
- Copy tree nodes from SRC to DEST, ensuring that we keep copies of nodes that
- are not SSA_NAME types as future calls to force_gimple_operand will overwrite
- those nodes and replace them with an SSA_NAME. */
-
-static void
-copy_gather_scatter_indices (gather_scatter_indices *dest,
- gather_scatter_indices *src)
-{
- dest->type = src->type;
- dest->step = get_copy_for_caching (src->step);
-}
-
-/* For a given scalar STMT and LOOP, return a vector of indices suitable to be
- used as the argument to a gather/scatter internal function. GATHER contains
- information about the gather/scatter operation. */
-static tree
-get_gather_scatter_indices (gimple *stmt, loop_vec_info loop_vinfo,
- gather_scatter_info *gs_info)
-{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-
- gcc_assert (STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)
- == VMAT_GATHER_SCATTER);
- if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
- return vect_get_vec_def_for_operand (gs_info->u.offset, stmt);
-
- /* Perform a lookup based on the vector type and step (in bytes). */
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- gather_scatter_indices info;
- info.type = gs_info->offset_vectype;
- /* FIXME: Shouldn't cache this as we have the info->u.step cached that gives
- us better chance of a lookup. */
- info.step = DR_STEP (dr);
-
- gather_scatter_indices **slot =
- LOOP_VINFO_GATHER_SCATTER_CACHE (loop_vinfo).find_slot (&info, INSERT);
- if (*slot)
- return (*slot)->indices;
-
- /* Cache the newly created set of indices. */
- gather_scatter_indices *entry = XNEW (struct gather_scatter_indices);
- copy_gather_scatter_indices (entry, &info);
-
- /* Nothing cached so we need to create a new vector series. */
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- gimple_seq seq = NULL;
-
- tree offtype = TREE_TYPE (gs_info->offset_vectype);
- tree series_step = gs_info->u.step;
-
- tree stride;
- tree vec_stride;
- tree vf = LOOP_VINFO_CAP (loop_vinfo).niters;
-
- series_step = force_gimple_operand (series_step, &seq, true, NULL_TREE);
- series_step = gimple_convert (&seq, offtype, series_step);
- vf = gimple_convert (&seq, offtype, vf);
- stride = gimple_build (&seq, MULT_EXPR, offtype, series_step, vf);
- vec_stride = gimple_build_vector_from_val (&seq, gs_info->offset_vectype,
- stride);
-
- tree voff_first = gimple_build (&seq, VEC_SERIES_EXPR,
- gs_info->offset_vectype,
- build_int_cst (TREE_TYPE (series_step), 0),
- series_step);
-
- gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), seq);
-
- tree voff_in = make_temp_ssa_name (gs_info->offset_vectype, NULL,
- "gather_off_in");
- tree voff_out = make_temp_ssa_name (gs_info->offset_vectype, NULL,
- "gather_off_out");
-
- gphi *phi = create_phi_node (voff_in, loop->header);
- add_phi_arg (phi, voff_first, loop_preheader_edge (loop), UNKNOWN_LOCATION);
- add_phi_arg (phi, voff_out, loop_latch_edge (loop), UNKNOWN_LOCATION);
-
- gcond *orig_cond = get_loop_exit_condition (loop);
- gimple_stmt_iterator tmp_gsi = gsi_for_stmt (orig_cond);
-
- gimple *new_stmt = gimple_build_assign (voff_out, PLUS_EXPR,
- voff_in, vec_stride);
- gsi_insert_before (&tmp_gsi, new_stmt, GSI_SAME_STMT);
-
- entry->indices = voff_in;
- *slot = entry;
-
- return entry->indices;
-}
-
-/* Vectorize STMT as a scatter store. Insert the new statements
- before GSI and return the first vector statement in *VEC_STMT.
- GS_INFO describes the scatter operation. If MASK_VECTYPE is nonnull,
- STMT is an IFN_MASK_STORE and the mask should use type MASK_VECTYPE,
- otherwise STMT is a normal unconditional assignment. */
-
-static void
-do_scatter_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
- loop_vec_info loop_vinfo, gather_scatter_info *gs_info,
- tree mask_vectype)
-{
- tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
- tree srctype = NULL_TREE, ptrtype, idxtype = NULL_TREE, masktype;
- tree ptr, var, scale, perm_mask = NULL_TREE;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- edge pe = loop_preheader_edge (loop);
- gimple_seq seq;
- basic_block new_bb;
- bool builtin_scatter_p = (targetm.vectorize.builtin_scatter != NULL);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- int ncopies = vect_get_num_copies (loop_vinfo, vectype);
- enum { NARROW, NONE, WIDEN } modifier;
- poly_uint64 scatter_off_nunits
- = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
- gimple *new_stmt = NULL;
- stmt_vec_info prev_stmt_info;
- tree mask_op = NULL_TREE, vec_mask = NULL_TREE;
- bool masked_stmt_p = (mask_vectype != NULL_TREE);
- tree mask = masked_stmt_p ? gimple_call_arg (stmt, 2) : NULL_TREE;
- vect_def_type scatter_src_dt = vect_unknown_def_type;
-
- gcc_assert (STMT_VINFO_GATHER_SCATTER_P (stmt_info) || ncopies == 1);
-
- /* We don't want to implement masked scatter stores for the builtin case, as
- this is probably being worked on upstream. */
- gcc_assert (!builtin_scatter_p || !masked_stmt_p);
-
- if (must_eq (nunits, scatter_off_nunits))
- modifier = NONE;
- else if (must_eq (nunits * 2, scatter_off_nunits))
- {
- modifier = WIDEN;
-
- /* Currently gathers and scatters are only supported for
- fixed-length vectors. */
- unsigned int count = scatter_off_nunits.to_constant ();
- auto_vec_perm_indices sel (count);
- for (unsigned int i = 0; i < count; ++i)
- sel.quick_push (i | (count / 2));
-
- perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype, sel);
- gcc_assert (perm_mask != NULL_TREE);
- }
- else if (must_eq (nunits, scatter_off_nunits * 2))
- {
- modifier = NARROW;
-
- /* Currently gathers and scatters are only supported for
- fixed-length vectors. */
- unsigned int count = nunits.to_constant ();
- auto_vec_perm_indices sel (count);
- for (unsigned int i = 0; i < count; ++i)
- sel.quick_push (i | (count / 2));
-
- perm_mask = vect_gen_perm_mask_checked (vectype, sel);
- gcc_assert (perm_mask != NULL_TREE);
- ncopies *= 2;
- }
- else
- gcc_unreachable ();
-
- if (builtin_scatter_p)
- {
- tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
- tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
-
- ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- tree scaletype = TREE_VALUE (arglist);
-
- gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
- && TREE_CODE (rettype) == VOID_TYPE);
-
- /* Currently we support only unconditional scatter stores,
- so mask should be all ones. */
- mask = build_int_cst (masktype, -1);
- mask = vect_init_vector (stmt, mask, masktype, NULL);
-
- scale = build_int_cst (scaletype, gs_info->scale);
- }
- else
- {
- ptrtype = TREE_TYPE (gs_info->base);
- masktype = build_same_sized_truth_vector_type (gs_info->offset_vectype);
- scale = build_int_cst (size_type_node, gs_info->scale);
- }
-
- ptr = fold_convert (ptrtype, gs_info->base);
- if (!is_gimple_min_invariant (ptr))
- {
- ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
- new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
- gcc_assert (!new_bb);
- }
-
- prev_stmt_info = NULL;
- vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
- for (int j = 0; j < ncopies; ++j)
- {
- gcc_assert (builtin_scatter_p || j == 0);
- if (j == 0)
- {
- op = vec_oprnd0 = get_gather_scatter_indices (stmt, loop_vinfo,
- gs_info);
-
- tree scalar_src = get_store_op (stmt);
-
- src = vec_oprnd1 = vect_get_vec_def_for_operand (scalar_src, stmt);
- }
- else if (modifier != NONE && (j & 1))
- {
- if (modifier == WIDEN)
- {
- src = vec_oprnd1
- = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
- op = permute_vec_elements
- (vec_oprnd0, vec_oprnd0, perm_mask, stmt, gsi);
- }
- else if (modifier == NARROW)
- {
- src = permute_vec_elements
- (vec_oprnd1, vec_oprnd1, perm_mask, stmt, gsi);
- op = vec_oprnd0
- = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt,
- vec_oprnd0);
- }
- else
- gcc_unreachable ();
- }
- else
- {
- src = vec_oprnd1
- = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
- op = vec_oprnd0
- = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0);
- }
-
- if (builtin_scatter_p
- && !useless_type_conversion_p (srctype, TREE_TYPE (src)))
- {
- gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
- TYPE_VECTOR_SUBPARTS (srctype)));
- var = vect_get_new_ssa_name (srctype, vect_simple_var);
- src = build1 (VIEW_CONVERT_EXPR, srctype, src);
- new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- src = var;
- }
-
- if (builtin_scatter_p
- && !useless_type_conversion_p (idxtype, TREE_TYPE (op)))
- {
- gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
- TYPE_VECTOR_SUBPARTS (idxtype)));
- var = vect_get_new_ssa_name (idxtype, vect_simple_var);
- op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
- new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- op = var;
- }
-
- if (masked_stmt_p)
- {
- if (j == 0)
- vec_mask = vect_get_vec_def_for_operand (mask, stmt,
- mask_vectype);
- else
- {
- gimple *def_stmt;
- enum vect_def_type dt;
- vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
- vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
- }
-
- mask_op = vec_mask;
- if (builtin_scatter_p
- && !useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
- {
- gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
- TYPE_VECTOR_SUBPARTS (masktype)));
- var = vect_get_new_ssa_name (masktype, vect_simple_var);
- mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
- new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mask_op = var;
- }
- }
-
- bool masked_loop_p = (loop_vinfo
- && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
- tree offtype = gs_info->offset_type;
- bool off_unsigned = TYPE_UNSIGNED (offtype);
-
- /* Always used signed when the offset does not need extending. */
- if (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (TREE_TYPE (op))))
- >= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (ptrtype)))
- off_unsigned = false;
-
- gcall *call;
- if (builtin_scatter_p)
- call
- = gimple_build_call (gs_info->decl, 5, ptr, mask, op, src, scale);
- else if (masked_loop_p)
- {
- tree mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
- if (masked_stmt_p)
- mask = prepare_load_store_mask (masktype, mask, mask_op, gsi);
- call = gimple_build_call_internal
- (off_unsigned ? IFN_MASK_SCATTER_STOREU : IFN_MASK_SCATTER_STORES,
- 5, ptr, op, scale, src, mask);
- }
- else
- call = gimple_build_call_internal
- (off_unsigned ? IFN_SCATTER_STOREU : IFN_SCATTER_STORES, 4, ptr, op,
- scale, src);
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
-
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
-
- if (prev_stmt_info == NULL)
- STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
- prev_stmt_info = vinfo_for_stmt (new_stmt);
- }
-}
-
-/* Vectorize STMT as a gather load. Insert the new statements
- before GSI and return the first vector statement in *VEC_STMT.
- GS_INFO describes the gather operation. If MASK_VECTYPE is nonnull,
- STMT is an IFN_MASK_LOAD and the mask should use type MASK_VECTYPE,
- otherwise STMT is a normal unconditional assignment. */
-
-static void
-do_gather_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
- loop_vec_info loop_vinfo, gather_scatter_info *gs_info,
- wgather_info *wgather, tree mask_vectype)
+vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
+ bool masked_p,
+ gather_scatter_info *gs_info)
{
- tree vec_oprnd0 = NULL_TREE, op, rettype, ptrtype, idxtype = NULL_TREE;
- tree ptr, var, scale, merge = NULL_TREE;
- tree perm_mask = NULL_TREE, prev_res = NULL_TREE, masktype;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- edge pe = loop_preheader_edge (loop);
- gimple_seq seq;
- basic_block new_bb;
- bool builtin_gather_p = (targetm.vectorize.builtin_gather != NULL);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- int ncopies = vect_get_num_copies (loop_vinfo, vectype);
- enum { NARROW, NONE, WIDEN } modifier;
- poly_uint64 gather_off_nunits
- = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
- gimple *new_stmt = NULL;
- stmt_vec_info prev_stmt_info;
- tree mask_perm_mask = NULL_TREE, mask_op = NULL_TREE, vec_mask = NULL_TREE;
- bool masked_stmt_p = (mask_vectype != NULL_TREE);
- tree mask = masked_stmt_p ? gimple_call_arg (stmt, 2) : NULL_TREE;
-
- gcc_assert (STMT_VINFO_GATHER_SCATTER_P (stmt_info) || ncopies == 1);
-
- if (builtin_gather_p)
- {
- tree arglist, srctype, scaletype;
-
- arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
- rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
- srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
- scaletype = TREE_VALUE (arglist);
-
- gcc_checking_assert (types_compatible_p (srctype, rettype));
- gcc_checking_assert (!masked_stmt_p
- || types_compatible_p (srctype, masktype));
-
- scale = build_int_cst (scaletype, gs_info->scale);
- }
- else
- {
- rettype = vectype;
- ptrtype = TREE_TYPE (gs_info->base);
- masktype
- = build_same_sized_truth_vector_type (wgather->off_vectype
- ? wgather->off_vectype
- : gs_info->offset_vectype);
- scale = build_int_cst (size_type_node, gs_info->scale);
- }
-
- if (must_eq (nunits, gather_off_nunits))
- modifier = NONE;
- else if (wgather->off_vectype || must_eq (nunits * 2, gather_off_nunits))
- {
- modifier = WIDEN;
- if (!wgather->off_vectype)
- {
- /* Enforced when we checked the mask originally. */
- unsigned int count = gather_off_nunits.to_constant ();
- auto_vec_perm_indices sel (count);
- for (unsigned int i = 0; i < count; ++i)
- sel.quick_push (i | (count / 2));
- perm_mask
- = vect_gen_perm_mask_checked (gs_info->offset_vectype, sel);
- }
- }
- else if (must_eq (nunits, gather_off_nunits * 2))
- {
- modifier = NARROW;
-
- /* Enforced when we checked the mask originally. */
- unsigned int count = nunits.to_constant ();
- auto_vec_perm_indices sel (count);
- sel.quick_grow (count);
- for (unsigned int i = 0; i < count; ++i)
- sel[i] = i < count / 2 ? i : i + count / 2;
- perm_mask = vect_gen_perm_mask_checked (vectype, sel);
- ncopies *= 2;
-
- if (masked_stmt_p)
- {
- for (unsigned int i = 0; i < count; ++i)
- sel[i] = i | (count / 2);
- mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
- }
- }
- else
- gcc_unreachable ();
-
- if (!masked_stmt_p && builtin_gather_p)
- {
- /* Currently we support only unconditional gather loads,
- so mask should be all ones. */
- if (TREE_CODE (masktype) == INTEGER_TYPE)
- mask = build_int_cst (masktype, -1);
- else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
- {
- mask = build_int_cst (TREE_TYPE (masktype), -1);
- mask = build_vector_from_val (masktype, mask);
- mask = vect_init_vector (stmt, mask, masktype, NULL);
- }
- else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
- {
- REAL_VALUE_TYPE r;
- long tmp[6];
- for (int j = 0; j < 6; ++j)
- tmp[j] = -1;
- real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
- mask = build_real (TREE_TYPE (masktype), r);
- mask = build_vector_from_val (masktype, mask);
- mask = vect_init_vector (stmt, mask, masktype, NULL);
- }
- else
- gcc_unreachable ();
-
- if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
- merge = build_int_cst (TREE_TYPE (rettype), 0);
- else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
- {
- REAL_VALUE_TYPE r;
- long tmp[6];
- for (int j = 0; j < 6; ++j)
- tmp[j] = 0;
- real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
- merge = build_real (TREE_TYPE (rettype), r);
- }
- else
- gcc_unreachable ();
- merge = build_vector_from_val (rettype, merge);
- merge = vect_init_vector (stmt, merge, rettype, NULL);
- }
-
- ptr = fold_convert (ptrtype, gs_info->base);
- if (!is_gimple_min_invariant (ptr))
- {
- ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
- new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
- gcc_assert (!new_bb);
- }
-
- prev_stmt_info = NULL;
-
- tree scalar_dest = (masked_stmt_p
- ? gimple_call_lhs (stmt)
- : gimple_assign_lhs (stmt));
- tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
-
- auto_vec<tree> vec_offset_dsts;
- if (wgather->off_vectype)
- {
- vec_offset_dsts.create (wgather->multi_step_cvt + 1);
- vec_offset_dsts.quick_push (wgather->off_vectype);
-
- tree tmp_type;
- if (wgather->multi_step_cvt)
- for (int i = wgather->interm_types.length () - 1;
- wgather->interm_types.iterate (i, &tmp_type);
- i--)
- {
- tree tmp = vect_get_new_vect_var (tmp_type, vect_simple_var, NULL);
- vec_offset_dsts.quick_push (tmp);
- }
- }
-
- auto_vec<tree> vec_oprnds0;
- unsigned int k = 0;
- vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
- for (int j = 0; j < ncopies; ++j)
+ data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ tree step = DR_STEP (dr);
+ if (TREE_CODE (step) != INTEGER_CST)
{
- if (wgather->off_vectype && k < vec_oprnds0.length ())
- op = vec_oprnds0[k++];
- else if (modifier == WIDEN && (j & 1))
- op = permute_vec_elements
- (vec_oprnd0, vec_oprnd0, perm_mask, stmt, gsi);
- else
- {
- if (j == 0)
- op = vec_oprnd0 = get_gather_scatter_indices (stmt, loop_vinfo,
- gs_info);
- else
- op = vec_oprnd0
- = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt,
- vec_oprnd0);
-
- if (wgather->off_vectype)
- {
- vec_oprnds0.truncate (0);
- vec_oprnds0.safe_push (vec_oprnd0);
- for (int i = wgather->multi_step_cvt; i >= 0; i--)
- vect_create_vectorized_promotion_stmts
- (&vec_oprnds0, NULL, stmt, vec_offset_dsts[i], gsi,
- wgather->wcode1, wgather->wcode2, NULL, NULL, unary_op);
- op = vec_oprnds0[0];
- k = 1;
- }
- }
-
- if (builtin_gather_p && !useless_type_conversion_p (idxtype, TREE_TYPE (op)))
- {
- gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
- TYPE_VECTOR_SUBPARTS (idxtype)));
- var = vect_get_new_ssa_name (idxtype, vect_simple_var);
- op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
- new_stmt
- = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- op = var;
- }
-
- if (masked_stmt_p && mask_perm_mask && (j & 1))
- mask_op
- = permute_vec_elements (mask_op, mask_op, mask_perm_mask, stmt, gsi);
- else if (masked_stmt_p)
- {
- if (j == 0)
- vec_mask = vect_get_vec_def_for_operand (mask, stmt, mask_vectype);
- else
- {
- gimple *def_stmt;
- enum vect_def_type dt;
- vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
- vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
- }
-
- mask_op = vec_mask;
- if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
- {
- gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
- TYPE_VECTOR_SUBPARTS (masktype)));
- var = vect_get_new_ssa_name (masktype, vect_simple_var);
- mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
- new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mask_op = var;
- }
- }
-
- bool masked_loop_p = (loop_vinfo
- && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
-
- gcall *call;
- if (builtin_gather_p)
- {
- gcc_assert (!masked_loop_p);
- call = gimple_build_call
- (gs_info->decl, 5, masked_stmt_p ? mask_op : merge, ptr, op,
- masked_stmt_p ? mask_op : mask, scale);
- }
- else
- {
- internal_fn ifn = get_gather_scatter_internal_fn
- (true, rettype, TREE_TYPE (TREE_TYPE (op)),
- masked_stmt_p || masked_loop_p);
- if (ifn == IFN_LAST)
- gcc_unreachable ();
- if (masked_stmt_p || masked_loop_p)
- {
- tree mask = NULL;
- if (masked_loop_p)
- mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
- if (masked_stmt_p)
- mask = prepare_load_store_mask (masktype, mask, mask_op, gsi);
- call = gimple_build_call_internal (ifn, 4, ptr, op, scale, mask);
- }
- else
- call = gimple_build_call_internal (ifn, 3, ptr, op, scale);
- }
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
-
- if (!useless_type_conversion_p (vectype, rettype))
- {
- gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (vectype),
- TYPE_VECTOR_SUBPARTS (rettype)));
- op = vect_get_new_ssa_name (rettype, vect_simple_var);
- gimple_call_set_lhs (call, op);
- vect_finish_stmt_generation (stmt, call, gsi);
- var = make_ssa_name (vec_dest);
- op = build1 (VIEW_CONVERT_EXPR, vectype, op);
- new_stmt
- = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
- }
- else
- {
- var = make_ssa_name (vec_dest, call);
- gimple_call_set_lhs (call, var);
- }
-
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
-
- if (modifier == NARROW)
- {
- if ((j & 1) == 0)
- {
- prev_res = var;
- continue;
- }
- var = permute_vec_elements (prev_res, var, perm_mask, stmt, gsi);
- new_stmt = SSA_NAME_DEF_STMT (var);
- }
-
- if (prev_stmt_info == NULL)
- STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
- prev_stmt_info = vinfo_for_stmt (new_stmt);
+ /* ??? Perhaps we could use range information here? */
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "cannot truncate variable step.\n");
+ return false;
}
- if (masked_stmt_p)
- replace_mask_load (stmt, gsi);
-}
-
-/* Function use_gather_scatters_1.
-
- Check whether there is hardware support for performing gathers/scatters to be
- used for certain strided and/or grouped accesses. HAS_MASK_P determines
- whether the operation is masked or not. If support is available return TRUE
- and fill in BASEP with the base address, OFF_VECTYPE with the offset vector
- type, SCALEP with the scale, DECLP with the builtin or internal function and
- OFFMODEP with the offset mode. Otherwise, return FALSE. */
-static bool
-use_gather_scatters_1 (stmt_vec_info stmt_info, int offmode_bits, int scale,
- gather_scatter_info *info, bool has_mask_p)
-{
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ /* Get the number of bits in an element. */
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- bool uns = TYPE_UNSIGNED (TREE_TYPE (DR_STEP (dr)));
- tree offtype = uns
- ? make_unsigned_type (offmode_bits)
- : make_signed_type (offmode_bits);
- tree decl = NULL_TREE;
-
- if (DR_IS_READ (dr)
- ? targetm.vectorize.builtin_gather
- : targetm.vectorize.builtin_scatter)
- {
- if (DR_IS_READ (dr))
- decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
- else
- decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
-
- if (!decl)
- return false;
- }
- else
- {
- if (offmode_bits != GET_MODE_UNIT_BITSIZE (TYPE_MODE (vectype))
- || get_gather_scatter_internal_fn (DR_IS_READ (dr), vectype,
- offtype, has_mask_p) == IFN_LAST
- || !targetm.gather_scatter_supports_scale_p
- (DR_IS_READ (dr), offmode_bits, scale))
- return false;
- }
-
- tree offset_vectype = build_vector_type (offtype,
- TYPE_VECTOR_SUBPARTS (vectype));
- if (optab_handler (vec_series_optab,
- TYPE_MODE (offset_vectype)) == CODE_FOR_nothing)
- return false;
-
- tree dr_offset = fold_convert (sizetype, DR_OFFSET (dr));
- tree dr_init = fold_convert (sizetype, DR_INIT (dr));
- tree offset = fold_build2 (PLUS_EXPR, sizetype, dr_offset, dr_init);
- tree base = fold_build_pointer_plus (DR_BASE_ADDRESS (dr), offset);
-
- info->decl = decl;
- info->base = base;
- info->offset_type = offtype;
- info->widened_offset_type = offtype;
- info->scale = scale;
- info->offset_dt = vect_constant_def;
- info->offset_vectype = offset_vectype;
- return true;
-}
-
-static tree_code
-extract_two_ops (tree step, tree *op0, tree *op1)
-{
- if (TREE_CODE (step) == SSA_NAME)
- {
- gimple *def_stmt = SSA_NAME_DEF_STMT (step);
- if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
- return SSA_NAME; /* We can't walk any further back. */
-
- *op0 = gimple_assign_rhs1 (def_stmt);
- *op1 = gimple_assign_rhs2 (def_stmt);
- return gimple_assign_rhs_code (def_stmt);
- }
- else
- {
- tree_code code = TREE_CODE (step);
- extract_ops_from_tree (step, &code, op0, op1);
- return code;
- }
-}
+ scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
+ unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
-static bool
-can_use_gather_for_step (stmt_vec_info stmt_info, unsigned int scale,
- bool masked_p, gather_scatter_info *gs_info)
-{
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr));
- unsigned int addr_bits = TYPE_PRECISION (addr_type);
- tree step = NULL_TREE;
-
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- machine_mode vecmode = TYPE_MODE (vectype);
- scalar_int_mode offmode;
- if (!int_mode_for_mode (GET_MODE_INNER (vecmode)).exists (&offmode))
- return false;
+ /* Set COUNT to the upper limit on the number of elements - 1.
+ Start with the maximum vectorization factor. */
+ unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
- unsigned int offset_bits = GET_MODE_BITSIZE (offmode);
-
- if (TREE_CODE (DR_STEP (dr)) == INTEGER_CST)
- {
- /* We need to test the following is true:
- DR_STEP * max-niters <= max-offset-value * scale
- for a given choice of offset width and scale. */
+ /* Try lowering COUNT to the number of scalar latch iterations. */
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ widest_int max_iters;
+ if (max_loop_iterations (loop, &max_iters)
+ && max_iters < count)
+ count = max_iters.to_shwi ();
+
+ /* Try scales of 1 and the element size. */
+ int scales[] = { 1, vect_get_scalar_dr_size (dr) };
+ bool overflow_p = false;
+ for (int i = 0; i < 2; ++i)
+ {
+ int scale = scales[i];
+ widest_int factor;
+ if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
+ continue;
- if (offset_bits != addr_bits)
+ /* See whether we can calculate (COUNT - 1) * STEP / SCALE
+ in OFFSET_BITS bits. */
+ widest_int range = wi::mul (count, factor, SIGNED, &overflow_p);
+ if (overflow_p)
+ continue;
+ signop sign = range >= 0 ? UNSIGNED : SIGNED;
+ if (wi::min_precision (range, sign) > element_bits)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- widest_int max_iters;
- if (!max_loop_iterations (loop, &max_iters))
- return false;
-
- widest_int lhs =
- wi::mul (max_iters, wi::abs (wi::to_widest (DR_STEP (dr))));
- widest_int max_offset_value = wi::lshift (1, offset_bits - 1) - 1;
- widest_int rhs = wi::mul (max_offset_value, scale);
-
- if (wi::gtu_p (lhs, rhs))
- return false;
+ overflow_p = true;
+ continue;
}
- /* Let step be constructed once we have decided on the offset type. */
- }
- else
- {
- step = DR_STEP (dr);
- STRIP_NOPS (step);
+ /* See whether the target supports the operation. */
+ tree memory_type = TREE_TYPE (DR_REF (dr));
+ if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
+ memory_type, element_bits, sign, scale,
+ &gs_info->ifn, &gs_info->element_type))
+ continue;
- unsigned int step_bits = TYPE_PRECISION (TREE_TYPE (step));
- tree op0, op1;
- tree_code code = extract_two_ops (step, &op0, &op1);
- if (offset_bits != step_bits)
- {
- unsigned int required_scale = 1;
- if (code == MULT_EXPR && tree_fits_uhwi_p (op1))
- {
- required_scale = tree_to_uhwi (op1);
- step = op0;
- code = extract_two_ops (step, &op0, &op1);
- }
- if (!CONVERT_EXPR_CODE_P (code)
- || TYPE_PRECISION (TREE_TYPE (op0)) != offset_bits
- || scale != required_scale)
- return false;
- step = op0;
- }
- else if (scale != 1)
- {
- if (code != MULT_EXPR
- || !tree_fits_uhwi_p (op1)
- || (tree_to_uhwi (op1) % scale) != 0)
- return false;
- }
+ tree offset_type = build_nonstandard_integer_type (element_bits,
+ sign == UNSIGNED);
+
+ gs_info->decl = NULL_TREE;
+ /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
+ but we don't need to store that here. */
+ gs_info->base = NULL_TREE;
+ gs_info->offset = fold_convert (offset_type, step);
+ gs_info->offset_dt = vect_unknown_def_type;
+ gs_info->offset_vectype = NULL_TREE;
+ gs_info->scale = scale;
+ gs_info->memory_type = memory_type;
+ return true;
}
- if (!use_gather_scatters_1 (stmt_info, offset_bits, scale, gs_info,
- masked_p))
- return false;
+ if (overflow_p && dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "truncating gather/scatter offset to %d bits"
+ " might change its value.\n", element_bits);
- if (!step)
- step = wide_int_to_tree (TREE_TYPE (gs_info->offset_vectype),
- wi::sdiv_trunc (wi::to_wide (DR_STEP (dr)),
- scale));
- gs_info->u.step = step;
- return true;
+ return false;
}
-/* Function use_gather_scatters_p.
+/* Return true if we can use gather/scatter internal functions to
+ vectorize STMT, which is a grouped or strided load or store.
+ MASKED_P is true if load or store is conditional. When returning
+ true, fill in GS_INFO with the information required to perform the
+ operation. */
- For the given scalar STMT determine if we can use gather/scatter internal
- functions for the vectorization of strided or grouped loads/stores. If SLP
- is TRUE and STMT belongs to a group the function returns FALSE. MASKED_P
- indicates whether the operation should be masked or not. If gather/scatters
- can be used return TRUE and fill in INFO with the information
- required to perform the operation, otherwise return FALSE. */
static bool
-use_gather_scatters_p (gimple *stmt, bool masked_p,
- gather_scatter_info *gs_info)
+vect_use_strided_gather_scatters_p (gimple *stmt, loop_vec_info loop_vinfo,
+ bool masked_p,
+ gather_scatter_info *gs_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)
+ || gs_info->decl)
+ return vect_truncate_gather_scatter_offset (stmt, loop_vinfo,
+ masked_p, gs_info);
- bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
- bool strided_access_p = STMT_VINFO_STRIDED_P (stmt_info);
- if (!grouped_access_p && !strided_access_p)
- return false;
+ scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
+ unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
+ tree offset_type = TREE_TYPE (gs_info->offset);
+ unsigned int offset_bits = TYPE_PRECISION (offset_type);
- tree type = TREE_TYPE (DR_REF (dr));
- HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
+ /* Enforced by vect_check_gather_scatter. */
+ gcc_assert (element_bits >= offset_bits);
- if (!can_use_gather_for_step (stmt_info, 1, masked_p, gs_info)
- && !can_use_gather_for_step (stmt_info, type_size, masked_p, gs_info))
+ /* If the elements are wider than the offset, convert the offset to the
+ same width, without changing its sign. */
+ if (element_bits > offset_bits)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Cannot use gather/scatter for strided/grouped"
- " access.\n");
- return false;
+ bool unsigned_p = TYPE_UNSIGNED (offset_type);
+ offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
+ gs_info->offset = fold_convert (offset_type, gs_info->offset);
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
- "Using gather/scatter for strided/grouped access,"
+ "using gather/scatter for strided/grouped access,"
" scale = %d\n", gs_info->scale);
return true;
@@ -3030,20 +2087,39 @@ reverse_vector (tree vec_dest, tree input, gimple *stmt,
return new_temp;
}
+/* STMT is either a masked or unconditional store. Return the value
+ being stored. */
+
+tree
+vect_get_store_rhs (gimple *stmt)
+{
+ if (gassign *assign = dyn_cast <gassign *> (stmt))
+ {
+ gcc_assert (gimple_assign_single_p (assign));
+ return gimple_assign_rhs1 (assign);
+ }
+ if (gcall *call = dyn_cast <gcall *> (stmt))
+ {
+ internal_fn ifn = gimple_call_internal_fn (call);
+ int index = internal_fn_stored_value_index (ifn);
+ gcc_assert (index >= 0);
+ return gimple_call_arg (stmt, index);
+ }
+ gcc_unreachable ();
+}
+
/* A subroutine of get_load_store_type, with a subset of the same
arguments. Handle the case where STMT is part of a grouped load
or store.
- For stores, the statements in the group are all consecutive,
- but a non-strided group may have a gap at the end, between
- loop iterations. For loads, the statements in the group might
- not be consecutive; there can be gaps between statements as well
- as at the end. */
+ For stores, the statements in the group are all consecutive
+ and there is no gap at the end. For loads, the statements in the
+ group might not be consecutive; there can be gaps between statements
+ as well as at the end. */
static bool
get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
bool masked_p, vec_load_store_type vls_type,
- int ncopies,
vect_memory_access_type *memory_access_type,
gather_scatter_info *gs_info)
{
@@ -3054,7 +2130,6 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
- unsigned int num_stmts = GROUP_NUM_STMTS (vinfo_for_stmt (first_stmt));
bool single_element_p = (stmt == first_stmt
&& !GROUP_NEXT_ELEMENT (stmt_info));
unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
@@ -3072,30 +2147,13 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
&& !loop->inner
&& !LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo));
- if (vls_type != VLS_LOAD)
- {
- /* The store statements in the group are always consecutive.
- num_stmts != group_size means that (a) the stride is known at
- compile time and (b) there is a gap between one iteration of
- the group and the next. E.g.:
-
- a[i * n + 0] = ...;
- a[i * n + 1] = ...;
- ...
- a[i * n + (m - 1)] = ...;
-
- where n and m are compile-time constants and n > m.
- In this case group_size is n and num_stmts is m. */
- if (slp || STMT_VINFO_STRIDED_P (stmt_info))
- gcc_assert (num_stmts == group_size);
- else
- gcc_assert (gap == group_size - num_stmts);
- }
-
/* There can only be a gap at the end of the group if the stride is
known at compile time. */
gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
+ /* Stores can't yet have gaps. */
+ gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
+
if (slp)
{
if (STMT_VINFO_STRIDED_P (stmt_info))
@@ -3142,8 +2200,8 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
but see if something more efficient is available. */
*memory_access_type = VMAT_ELEMENTWISE;
- /* If there is a gap at the end of the group then some of these
- optimizations would access excess elements in the last iteration. */
+ /* If there is a gap at the end of the group then these optimizations
+ would access excess elements in the last iteration. */
bool would_overrun_p = (gap != 0);
/* An overrun is fine if the trailing elements are smaller than the
alignment boundary B. Every vector access will be a multiple of B
@@ -3151,43 +2209,48 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
same B-sized block. */
if (would_overrun_p
&& !masked_p
- && vls_type == VLS_LOAD
&& gap < (vect_known_alignment_in_bytes (first_dr)
/ vect_get_scalar_dr_size (first_dr)))
would_overrun_p = false;
- /* First try using LOAD/STORE_LANES. */
if (!STMT_VINFO_STRIDED_P (stmt_info)
&& (can_overrun_p || !would_overrun_p)
- && compare_step_with_zero (stmt) > 0
- && (vls_type == VLS_LOAD
- ? vect_load_lanes_supported (vectype, group_size, masked_p)
- : vect_store_lanes_supported (vectype, group_size, masked_p)))
+ && compare_step_with_zero (stmt) > 0)
{
- *memory_access_type = VMAT_LOAD_STORE_LANES;
- overrun_p = would_overrun_p;
+ /* First try using LOAD/STORE_LANES. */
+ if (vls_type == VLS_LOAD
+ ? vect_load_lanes_supported (vectype, group_size, masked_p)
+ : vect_store_lanes_supported (vectype, group_size, masked_p))
+ {
+ *memory_access_type = VMAT_LOAD_STORE_LANES;
+ overrun_p = would_overrun_p;
+ }
+
+ /* If that fails, try using permuting loads. */
+ if (*memory_access_type == VMAT_ELEMENTWISE
+ && (vls_type == VLS_LOAD
+ ? vect_grouped_load_supported (vectype, single_element_p,
+ group_size)
+ : vect_grouped_store_supported (vectype, group_size)))
+ {
+ *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
+ overrun_p = would_overrun_p;
+ }
}
- /* If that fails, try using gather/scatter. */
- scalar_int_mode offset_mode;
- if (ncopies == 1
- && *memory_access_type == VMAT_ELEMENTWISE
- && use_gather_scatters_p (stmt, masked_p, gs_info))
- *memory_access_type = VMAT_GATHER_SCATTER;
+ /* As a last resort, trying using a gather load or scatter store.
- /* If that fails, try using permuting loads. */
+ ??? Although the code can handle all group sizes correctly,
+ it probably isn't a win to use separate strided accesses based
+ on nearby locations. Or, even if it's a win over scalar code,
+ it might not be a win over vectorizing at a lower VF, if that
+ allows us to use contiguous accesses. */
if (*memory_access_type == VMAT_ELEMENTWISE
- && !STMT_VINFO_STRIDED_P (stmt_info)
- && (can_overrun_p || !would_overrun_p)
- && compare_step_with_zero (stmt) > 0
- && (vls_type == VLS_LOAD
- ? vect_grouped_load_supported (vectype, single_element_p,
- group_size)
- : vect_grouped_store_supported (vectype, group_size)))
- {
- *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
- overrun_p = would_overrun_p;
- }
+ && single_element_p
+ && loop_vinfo
+ && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
+ masked_p, gs_info))
+ *memory_access_type = VMAT_GATHER_SCATTER;
}
if (vls_type != VLS_LOAD && first_stmt == stmt)
@@ -3197,7 +2260,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
while (next_stmt)
{
- tree op = get_store_op (next_stmt);
+ tree op = vect_get_store_rhs (next_stmt);
gimple *def_stmt;
enum vect_def_type dt;
if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
@@ -3234,19 +2297,9 @@ get_negative_load_store_type (gimple *stmt, tree vectype,
unsigned int ncopies)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
dr_alignment_support alignment_support_scheme;
- if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- {
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Can't use a fully-masked loop because one of the"
- " steps is negative.\n");
- }
-
if (ncopies > 1)
{
if (dump_enabled_p ())
@@ -3309,9 +2362,9 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
{
*memory_access_type = VMAT_GATHER_SCATTER;
gimple *def_stmt;
- if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info, masked_p))
+ if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
gcc_unreachable ();
- else if (!vect_is_simple_use (gs_info->u.offset, vinfo, &def_stmt,
+ else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
&gs_info->offset_dt,
&gs_info->offset_vectype))
{
@@ -3325,14 +2378,15 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
if (!get_group_load_store_type (stmt, vectype, slp, masked_p, vls_type,
- ncopies, memory_access_type, gs_info))
+ memory_access_type, gs_info))
return false;
}
else if (STMT_VINFO_STRIDED_P (stmt_info))
{
gcc_assert (!slp);
- scalar_int_mode offset_mode;
- if (ncopies == 1 && use_gather_scatters_p (stmt, masked_p, gs_info))
+ if (loop_vinfo
+ && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
+ masked_p, gs_info))
*memory_access_type = VMAT_GATHER_SCATTER;
else
*memory_access_type = VMAT_ELEMENTWISE;
@@ -3369,7 +2423,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
if (*memory_access_type == VMAT_ELEMENTWISE
&& !STMT_VINFO_STRIDED_P (stmt_info)
&& !(stmt == GROUP_FIRST_ELEMENT (stmt_info)
- && GROUP_NUM_STMTS (stmt_info) == 1
+ && !GROUP_NEXT_ELEMENT (stmt_info)
&& !pow2p_hwi (GROUP_SIZE (stmt_info))))
{
if (dump_enabled_p ())
@@ -3380,577 +2434,501 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
return true;
}
-/* Set up the stored values for the first copy of a vectorized store.
- GROUP_SIZE is the number of stores in the group (which is 1 for
- ungrouped stores). FIRST_STMT is the first statement in the group.
-
- On return, initialize OPERANDS to a new vector in which element I
- is the value that the first copy of group member I should store.
- The caller should free OPERANDS after use. */
-
-static void
-init_stored_values (unsigned int group_size, gimple *first_stmt,
- vec<tree> *operands)
-{
- operands->create (group_size);
- gimple *next_stmt = first_stmt;
- for (unsigned int i = 0; i < group_size; i++)
- {
- /* Since gaps are not supported for interleaved stores,
- GROUP_SIZE is the exact number of stmts in the chain.
- Therefore, NEXT_STMT can't be NULL_TREE. In case that
- there is no interleaving, GROUP_SIZE is 1, and only one
- iteration of the loop will be executed. */
- gcc_assert (next_stmt);
- tree op = get_store_op (next_stmt);
- tree vec_op = vect_get_vec_def_for_operand (op, next_stmt);
- operands->quick_push (vec_op);
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
- }
-}
-
-/* OPERANDS is a vector set up by init_stored_values. Update each element
- for the next copy of each statement. GROUP_SIZE and FIRST_STMT are
- as for init_stored_values. */
+/* Return true if boolean argument MASK is suitable for vectorizing
+ conditional load or store STMT. When returning true, store the
+ type of the vectorized mask in *MASK_VECTYPE_OUT. */
-static void
-advance_stored_values (unsigned int group_size, gimple *first_stmt,
- vec<tree> operands)
+static bool
+vect_check_load_store_mask (gimple *stmt, tree mask, tree *mask_vectype_out)
{
- vec_info *vinfo = vinfo_for_stmt (first_stmt)->vinfo;
- for (unsigned int i = 0; i < group_size; i++)
+ if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
{
- tree op = operands[i];
- enum vect_def_type dt;
- gimple *def_stmt;
- vect_is_simple_use (op, vinfo, &def_stmt, &dt);
- operands[i] = vect_get_vec_def_for_stmt_copy (dt, op);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "mask argument is not a boolean.\n");
+ return false;
}
-}
-/* Emit one copy of a vectorized LOAD_LANES for STMT. GROUP_SIZE is
- the number of vectors being loaded and VECTYPE is the type of each
- vector. AGGR_TYPE is the type that should be used to refer to the
- memory source (which contains the same number of elements as
- GROUP_SIZE copies of VECTYPE, but in a different order).
- DATAREF_PTR points to the first element that should be loaded.
- ALIAS_PTR_TYPE is the type of the accessed elements for aliasing
- purposes. MASK, if nonnull, is a mask in which element I is true
- if element I of each destination vector should be loaded. */
-
-static void
-do_load_lanes (gimple *stmt, gimple_stmt_iterator *gsi,
- unsigned int group_size, tree vectype, tree aggr_type,
- tree dataref_ptr, tree alias_ptr_type, tree mask)
-{
- tree scalar_dest = gimple_get_lhs (stmt);
- tree vec_array = create_vector_array (vectype, group_size);
-
- gcall *new_stmt;
- if (mask)
- {
- /* Emit: VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR, MASK). */
- tree alias_ptr = build_int_cst (alias_ptr_type,
- TYPE_ALIGN_UNIT (TREE_TYPE (vectype)));
- new_stmt = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
- dataref_ptr, alias_ptr, mask);
- }
- else
+ if (TREE_CODE (mask) != SSA_NAME)
{
- /* Emit: VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
- tree data_ref = create_array_ref (aggr_type, dataref_ptr,
- alias_ptr_type);
- new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "mask argument is not an SSA name.\n");
+ return false;
}
- gimple_call_set_lhs (new_stmt, vec_array);
- gimple_call_set_nothrow (new_stmt, true);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- /* Extract each vector into an SSA_NAME. */
- auto_vec<tree, 16> dr_chain;
- dr_chain.reserve (group_size);
- for (unsigned int i = 0; i < group_size; i++)
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ gimple *def_stmt;
+ enum vect_def_type dt;
+ tree mask_vectype;
+ if (!vect_is_simple_use (mask, stmt_info->vinfo, &def_stmt, &dt,
+ &mask_vectype))
{
- tree new_temp = read_vector_array (stmt, gsi, scalar_dest,
- vec_array, i);
- dr_chain.quick_push (new_temp);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "mask use not simple.\n");
+ return false;
}
- /* Record the mapping between SSA_NAMEs and statements. */
- vect_record_grouped_load_vectors (stmt, dr_chain);
-}
-
-/* Emit one copy of a vectorized STORE_LANES for STMT. GROUP_SIZE is
- the number of vectors being stored and OPERANDS[I] is the value
- that group member I should store. AGGR_TYPE is the type that should
- be used to refer to the memory destination (which contains the same
- number of elements as the source vectors, but in a different order).
- DATAREF_PTR points to the first store location. ALIAS_PTR_TYPE is
- the type of the accessed elements for aliasing purposes. MASK,
- if nonnull, is a mask in which element I is true if element I of
- each source vector should be stored. */
-
-static gimple *
-do_store_lanes (gimple *stmt, gimple_stmt_iterator *gsi,
- unsigned int group_size, tree aggr_type, tree dataref_ptr,
- tree alias_ptr_type, vec<tree> operands, tree mask)
-{
- /* Combine all the vectors into an array. */
- tree vectype = TREE_TYPE (operands[0]);
- tree vec_array = create_vector_array (vectype, group_size);
- for (unsigned int i = 0; i < group_size; i++)
- write_vector_array (stmt, gsi, operands[i], vec_array, i);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ if (!mask_vectype)
+ mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
- gcall *new_stmt;
- if (mask)
- {
- /* Emit: MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, MASK, VEC_ARRAY). */
- tree alias_ptr = build_int_cst (alias_ptr_type,
- TYPE_ALIGN_UNIT (TREE_TYPE (vectype)));
- new_stmt = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
- dataref_ptr, alias_ptr,
- mask, vec_array);
- }
- else
+ if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
{
- /* Emit: MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
- tree data_ref = create_array_ref (aggr_type, dataref_ptr, alias_ptr_type);
- new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
- gimple_call_set_lhs (new_stmt, data_ref);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "could not find an appropriate vector mask type.\n");
+ return false;
}
- gimple_call_set_nothrow (new_stmt, true);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- return new_stmt;
-}
-
-/* Return the alias pointer type for the group of masked loads or
- stores starting at FIRST_STMT. */
-
-static tree
-get_masked_group_alias_ptr_type (gimple *first_stmt)
-{
- tree type, next_type;
- gimple *next_stmt;
-
- type = TREE_TYPE (gimple_call_arg (first_stmt, 1));
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
- while (next_stmt)
+ if (may_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
+ TYPE_VECTOR_SUBPARTS (vectype)))
{
- next_type = TREE_TYPE (gimple_call_arg (next_stmt, 1));
- if (get_alias_set (type) != get_alias_set (next_type))
+ if (dump_enabled_p ())
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "conflicting alias set types.\n");
- return ptr_type_node;
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "vector mask type ");
+ dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
+ dump_printf (MSG_MISSED_OPTIMIZATION,
+ " does not match vector data type ");
+ dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
+ dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
}
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ return false;
}
- return type;
-}
+ *mask_vectype_out = mask_vectype;
+ return true;
+}
-/* Function vectorizable_mask_load_store.
-
- Check if STMT performs a conditional load or store that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at GSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+/* Return true if stored value RHS is suitable for vectorizing store
+ statement STMT. When returning true, store the type of the
+ vectorized store value in *RHS_VECTYPE_OUT and the type of the
+ store in *VLS_TYPE_OUT. */
static bool
-vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
- gimple **vec_stmt, slp_tree slp_node)
+vect_check_store_rhs (gimple *stmt, tree rhs, tree *rhs_vectype_out,
+ vec_load_store_type *vls_type_out)
{
- tree vec_dest = NULL;
+ /* In the case this is a store from a constant make sure
+ native_encode_expr can handle it. */
+ if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "cannot encode constant as a byte sequence.\n");
+ return false;
+ }
+
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- stmt_vec_info prev_stmt_info;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- tree rhs_vectype = NULL_TREE;
- tree mask_vectype;
- tree elem_type;
- tree aggr_type;
- gimple *new_stmt;
- tree dummy;
- tree dataref_ptr = NULL_TREE;
- gimple *ptr_incr;
- poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- int ncopies;
- int i;
- bool inv_p;
- gather_scatter_info gs_info;
- vec_load_store_type vls_type;
- tree mask;
gimple *def_stmt;
enum vect_def_type dt;
- gimple *first_stmt = stmt;
- unsigned int group_size = 1;
-
- if (slp_node != NULL)
- return false;
-
- if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
+ tree rhs_vectype;
+ if (!vect_is_simple_use (rhs, stmt_info->vinfo, &def_stmt, &dt,
+ &rhs_vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Speculative loop mask load/stores not supported\n");
+ "use not simple.\n");
return false;
}
- ncopies = vect_get_num_copies (loop_vinfo, vectype);
- gcc_assert (ncopies >= 1);
-
- mask = gimple_call_arg (stmt, 2);
-
- if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
- return false;
-
- /* FORNOW. This restriction should be relaxed. */
- if (nested_in_vect_loop && ncopies > 1)
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "multiple types in nested loop.");
+ "incompatible vector types.\n");
return false;
}
- if (!STMT_VINFO_RELEVANT_P (stmt_info))
- return false;
-
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
- && ! vec_stmt)
- return false;
+ *rhs_vectype_out = rhs_vectype;
+ if (dt == vect_constant_def || dt == vect_external_def)
+ *vls_type_out = VLS_STORE_INVARIANT;
+ else
+ *vls_type_out = VLS_STORE;
+ return true;
+}
- if (!STMT_VINFO_DATA_REF (stmt_info))
- return false;
+/* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT.
+ Note that we support masks with floating-point type, in which case the
+ floats are interpreted as a bitmask. */
- elem_type = TREE_TYPE (vectype);
+static tree
+vect_build_all_ones_mask (gimple *stmt, tree masktype)
+{
+ if (TREE_CODE (masktype) == INTEGER_TYPE)
+ return build_int_cst (masktype, -1);
+ else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
+ {
+ tree mask = build_int_cst (TREE_TYPE (masktype), -1);
+ mask = build_vector_from_val (masktype, mask);
+ return vect_init_vector (stmt, mask, masktype, NULL);
+ }
+ else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
+ {
+ REAL_VALUE_TYPE r;
+ long tmp[6];
+ for (int j = 0; j < 6; ++j)
+ tmp[j] = -1;
+ real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
+ tree mask = build_real (TREE_TYPE (masktype), r);
+ mask = build_vector_from_val (masktype, mask);
+ return vect_init_vector (stmt, mask, masktype, NULL);
+ }
+ gcc_unreachable ();
+}
- if (TREE_CODE (mask) != SSA_NAME)
- return false;
+/* Build an all-zero merge value of type VECTYPE while vectorizing
+ STMT as a gather load. */
- if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
- return false;
+static tree
+vect_build_zero_merge_argument (gimple *stmt, tree vectype)
+{
+ tree merge;
+ if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
+ merge = build_int_cst (TREE_TYPE (vectype), 0);
+ else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
+ {
+ REAL_VALUE_TYPE r;
+ long tmp[6];
+ for (int j = 0; j < 6; ++j)
+ tmp[j] = 0;
+ real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
+ merge = build_real (TREE_TYPE (vectype), r);
+ }
+ else
+ gcc_unreachable ();
+ merge = build_vector_from_val (vectype, merge);
+ return vect_init_vector (stmt, merge, vectype, NULL);
+}
- if (!mask_vectype)
- mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
+/* Build a gather load call while vectorizing STMT. Insert new instructions
+ before GSI and add them to VEC_STMT. GS_INFO describes the gather load
+ operation. If the load is conditional, MASK is the unvectorized
+ condition, otherwise MASK is null. */
- if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
- || may_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
- TYPE_VECTOR_SUBPARTS (vectype)))
- return false;
+static void
+vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
+ gimple **vec_stmt, gather_scatter_info *gs_info,
+ tree mask)
+{
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ int ncopies = vect_get_num_copies (loop_vinfo, vectype);
+ edge pe = loop_preheader_edge (loop);
+ enum { NARROW, NONE, WIDEN } modifier;
+ poly_uint64 gather_off_nunits
+ = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
- if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
+ tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
+ tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ tree scaletype = TREE_VALUE (arglist);
+ gcc_checking_assert (types_compatible_p (srctype, rettype)
+ && (!mask || types_compatible_p (srctype, masktype)));
+
+ tree perm_mask = NULL_TREE;
+ tree mask_perm_mask = NULL_TREE;
+ if (must_eq (nunits, gather_off_nunits))
+ modifier = NONE;
+ else if (must_eq (nunits * 2, gather_off_nunits))
{
- tree rhs = gimple_call_arg (stmt, 3);
- if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
- return false;
- if (dt == vect_constant_def || dt == vect_external_def)
- vls_type = VLS_STORE_INVARIANT;
- else
- vls_type = VLS_STORE;
- }
- else
- vls_type = VLS_LOAD;
+ modifier = WIDEN;
- if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
- {
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ /* Currently widening gathers and scatters are only supported for
+ fixed-length vectors. */
+ int count = gather_off_nunits.to_constant ();
+ auto_vec_perm_indices sel (count);
+ for (int i = 0; i < count; ++i)
+ sel.quick_push (i | (count / 2));
+
+ perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype, sel);
}
+ else if (must_eq (nunits, gather_off_nunits * 2))
+ {
+ modifier = NARROW;
- vect_memory_access_type memory_access_type;
- if (!get_load_store_type (stmt, vectype, false, true, vls_type, ncopies,
- &memory_access_type, &gs_info))
- return false;
+ /* Currently narrowing gathers and scatters are only supported for
+ fixed-length vectors. */
+ int count = nunits.to_constant ();
+ auto_vec_perm_indices sel (count);
+ sel.quick_grow (count);
+ for (int i = 0; i < count; ++i)
+ sel[i] = i < count / 2 ? i : i + count / 2;
+ perm_mask = vect_gen_perm_mask_checked (vectype, sel);
- wgather_info wgather = DEFAULT_WGATHER_INFO;
- if (memory_access_type == VMAT_GATHER_SCATTER)
- {
- if (gs_info.decl)
- {
- tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
- tree masktype
- = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
- if (TREE_CODE (masktype) == INTEGER_TYPE)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "masked gather with integer mask"
- " not supported.");
- return false;
- }
- }
- else
+ ncopies *= 2;
+
+ if (mask)
{
- if (vls_type == VLS_LOAD
- && may_ne (nunits, TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype))
- && !widened_gather_support_p (vectype, &gs_info, stmt, &wgather))
- return false;
+ for (int i = 0; i < count; ++i)
+ sel[i] = i | (count / 2);
+ mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
}
}
- else if (rhs_vectype
- && !useless_type_conversion_p (vectype, rhs_vectype))
- return false;
- else if (memory_access_type == VMAT_CONTIGUOUS)
- {
- if (!can_vec_mask_load_store_p (TYPE_MODE (vectype),
- TYPE_MODE (mask_vectype),
- vls_type == VLS_LOAD))
- return false;
- }
- else if (memory_access_type != VMAT_LOAD_STORE_LANES)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "unsupported access type for masked %s.\n",
- vls_type == VLS_LOAD ? "load" : "store");
- return false;
- }
+ else
+ gcc_unreachable ();
- if (!vec_stmt) /* transformation not required. */
- {
- STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
- if (loop_vinfo
- && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- check_load_store_masking (loop_vinfo, vectype, vls_type == VLS_LOAD,
- group_size, memory_access_type,
- gs_info.widened_offset_type);
+ tree vec_dest = vect_create_destination_var (gimple_get_lhs (stmt),
+ vectype);
- STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
- if (vls_type == VLS_LOAD)
- vect_model_load_cost (stmt_info, ncopies, memory_access_type,
- NULL, NULL, NULL);
- else
- vect_model_store_cost (stmt_info, ncopies, memory_access_type,
- dt, NULL, NULL, NULL);
- return true;
+ tree ptr = fold_convert (ptrtype, gs_info->base);
+ if (!is_gimple_min_invariant (ptr))
+ {
+ gimple_seq seq;
+ ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
+ basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
+ gcc_assert (!new_bb);
}
- gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
- /* Transform. */
-
- bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
+ tree scale = build_int_cst (scaletype, gs_info->scale);
- if (vls_type != VLS_LOAD && STMT_VINFO_GROUPED_ACCESS (stmt_info))
- GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
+ tree vec_oprnd0 = NULL_TREE;
+ tree vec_mask = NULL_TREE;
+ tree src_op = NULL_TREE;
+ tree mask_op = NULL_TREE;
+ tree prev_res = NULL_TREE;
+ stmt_vec_info prev_stmt_info = NULL;
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (!mask)
{
- if (vls_type == VLS_LOAD)
- do_gather_load (stmt, gsi, vec_stmt, loop_vinfo, &gs_info,
- &wgather, mask_vectype);
- else
- do_scatter_store (stmt, gsi, vec_stmt, loop_vinfo, &gs_info,
- mask_vectype);
- return true;
+ src_op = vect_build_zero_merge_argument (stmt, rettype);
+ mask_op = vect_build_all_ones_mask (stmt, masktype);
}
- if (memory_access_type == VMAT_LOAD_STORE_LANES)
- aggr_type = build_array_type_nelts (elem_type, group_size * nunits);
- else
- aggr_type = vectype;
-
- vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
- if (vls_type != VLS_LOAD)
+ for (int j = 0; j < ncopies; ++j)
{
- /* Vectorize the whole group when we reach the final statement.
- Replace all other statements with an empty sequence. */
- if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- < GROUP_NUM_STMTS (vinfo_for_stmt (first_stmt))))
+ tree op, var;
+ gimple *new_stmt;
+ if (modifier == WIDEN && (j & 1))
+ op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
+ perm_mask, stmt, gsi);
+ else if (j == 0)
+ op = vec_oprnd0
+ = vect_get_vec_def_for_operand (gs_info->offset, stmt);
+ else
+ op = vec_oprnd0
+ = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0);
+
+ if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- *vec_stmt = NULL;
- return true;
+ gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
+ var = vect_get_new_ssa_name (idxtype, vect_simple_var);
+ op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
+ new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ op = var;
}
- auto_vec<tree, 16> operands;
- tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
- prev_stmt_info = NULL;
- LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
- for (i = 0; i < ncopies; i++)
+ if (mask)
{
- unsigned align, misalign;
-
- if (i == 0)
- {
- init_stored_values (group_size, first_stmt, &operands);
- vec_rhs = operands[0];
- vec_mask = vect_get_vec_def_for_operand (mask, stmt,
- mask_vectype);
- /* We should have caught mismatched types earlier. */
- gcc_assert (useless_type_conversion_p (vectype,
- TREE_TYPE (vec_rhs)));
- dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type,
- group_size,
- NULL, NULL_TREE, &dummy,
- gsi, &ptr_incr, false,
- &inv_p);
- gcc_assert (!inv_p);
- }
+ if (mask_perm_mask && (j & 1))
+ mask_op = permute_vec_elements (mask_op, mask_op,
+ mask_perm_mask, stmt, gsi);
else
{
- advance_stored_values (group_size, first_stmt, operands);
- vec_rhs = operands[0];
- vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
- vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr,
- gsi, first_stmt,
- TYPE_SIZE_UNIT (aggr_type));
- }
-
- tree mask = NULL;
- if (masked_loop_p)
- {
- gcc_assert (!slp_node);
- mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, i);
- }
- mask = prepare_load_store_mask (mask_vectype, mask, vec_mask, gsi);
+ if (j == 0)
+ vec_mask = vect_get_vec_def_for_operand (mask, stmt);
+ else
+ {
+ gimple *def_stmt;
+ enum vect_def_type dt;
+ vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
+ vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
+ }
- if (memory_access_type == VMAT_LOAD_STORE_LANES)
- {
- tree ref_type = get_masked_group_alias_ptr_type (first_stmt);
- new_stmt = do_store_lanes (stmt, gsi, group_size, aggr_type,
- dataref_ptr, ref_type, operands,
- mask);
- }
- else
- {
- /* Without this the mask calculated above would be
- incorrect. */
- gcc_assert (group_size == 1);
- align = DR_TARGET_ALIGNMENT (dr);
- if (aligned_access_p (dr))
- misalign = 0;
- else if (DR_MISALIGNMENT (dr) == -1)
+ mask_op = vec_mask;
+ if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
{
- align = TYPE_ALIGN_UNIT (elem_type);
- misalign = 0;
+ gcc_assert
+ (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
+ TYPE_VECTOR_SUBPARTS (masktype)));
+ var = vect_get_new_ssa_name (masktype, vect_simple_var);
+ mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
+ new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR,
+ mask_op);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ mask_op = var;
}
- else
- misalign = DR_MISALIGNMENT (dr);
- set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
- misalign);
- tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
- misalign
- ? least_bit_hwi (misalign)
- : align);
- gcall *call
- = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
- ptr, mask, vec_rhs);
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
}
- if (i == 0)
- STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
- prev_stmt_info = vinfo_for_stmt (new_stmt);
+ src_op = mask_op;
}
- }
- else
- {
- /* Vectorize the whole group when we reach the first statement.
- For later statements we just need to return the cached
- replacement. */
- if (group_size > 1
- && STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
+
+ new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
+ mask_op, scale);
+
+ if (!useless_type_conversion_p (vectype, rettype))
{
- *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
- replace_mask_load (stmt, gsi);
- return true;
+ gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (rettype)));
+ op = vect_get_new_ssa_name (rettype, vect_simple_var);
+ gimple_call_set_lhs (new_stmt, op);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ var = make_ssa_name (vec_dest);
+ op = build1 (VIEW_CONVERT_EXPR, vectype, op);
+ new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
}
-
- tree vec_mask = NULL_TREE;
- prev_stmt_info = NULL;
- if (memory_access_type == VMAT_LOAD_STORE_LANES)
- vec_dest = NULL_TREE;
else
- vec_dest = vect_create_destination_var (gimple_call_lhs (stmt),
- vectype);
- for (i = 0; i < ncopies; i++)
{
- unsigned align, misalign;
-
- if (i == 0)
- {
- gcc_assert (mask == gimple_call_arg (first_stmt, 2));
- vec_mask = vect_get_vec_def_for_operand (mask, stmt,
- mask_vectype);
- dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type,
- group_size, NULL,
- NULL_TREE, &dummy,
- gsi, &ptr_incr, false,
- &inv_p);
- gcc_assert (!inv_p);
- }
- else
- {
- vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
- vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr,
- gsi, first_stmt,
- TYPE_SIZE_UNIT (aggr_type));
- }
+ var = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, var);
+ }
- tree mask = NULL;
- if (masked_loop_p)
- {
- gcc_assert (!slp_node);
- mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, i);
- }
- mask = prepare_load_store_mask (mask_vectype, mask, vec_mask, gsi);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
- if (memory_access_type == VMAT_LOAD_STORE_LANES)
- {
- tree ref_type = get_masked_group_alias_ptr_type (first_stmt);
- do_load_lanes (stmt, gsi, group_size, vectype,
- aggr_type, dataref_ptr, ref_type, mask);
- *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
- }
- else
+ if (modifier == NARROW)
+ {
+ if ((j & 1) == 0)
{
- /* Without this the mask calculated above would be incorrect. */
- gcc_assert (group_size == 1);
- align = DR_TARGET_ALIGNMENT (dr);
- if (aligned_access_p (dr))
- misalign = 0;
- else if (DR_MISALIGNMENT (dr) == -1)
- {
- align = TYPE_ALIGN_UNIT (elem_type);
- misalign = 0;
- }
- else
- misalign = DR_MISALIGNMENT (dr);
- set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
- misalign);
- tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
- misalign
- ? least_bit_hwi (misalign)
- : align);
- gcall *call
- = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
- ptr, mask);
- gimple_call_set_lhs (call, make_ssa_name (vec_dest));
- gimple_call_set_nothrow (call, true);
- vect_finish_stmt_generation (stmt, call, gsi);
- if (i == 0)
- STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
- prev_stmt_info = vinfo_for_stmt (call);
+ prev_res = var;
+ continue;
}
+ var = permute_vec_elements (prev_res, var, perm_mask, stmt, gsi);
+ new_stmt = SSA_NAME_DEF_STMT (var);
}
- replace_mask_load (stmt, gsi);
+ if (prev_stmt_info == NULL)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+}
+
+/* Prepare the base and offset in GS_INFO for vectorization.
+ Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
+ to the vectorized offset argument for the first copy of STMT. STMT
+ is the statement described by GS_INFO and LOOP is the containing loop. */
+
+static void
+vect_get_gather_scatter_ops (struct loop *loop, gimple *stmt,
+ gather_scatter_info *gs_info,
+ tree *dataref_ptr, tree *vec_offset)
+{
+ gimple_seq stmts = NULL;
+ *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
+ if (stmts != NULL)
+ {
+ basic_block new_bb;
+ edge pe = loop_preheader_edge (loop);
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
+ gcc_assert (!new_bb);
}
+ tree offset_type = TREE_TYPE (gs_info->offset);
+ tree offset_vectype = get_vectype_for_scalar_type (offset_type);
+ *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt,
+ offset_vectype);
+}
- return true;
+/* Prepare to implement a grouped or strided load or store using
+ the gather load or scatter store operation described by GS_INFO.
+ STMT is the load or store statement.
+
+ Set *DATAREF_BUMP to the amount that should be added to the base
+ address after each copy of the vectorized statement. Set *VEC_OFFSET
+ to an invariant offset vector in which element I has the value
+ I * DR_STEP / SCALE. */
+
+static void
+vect_get_strided_load_store_ops (gimple *stmt, loop_vec_info loop_vinfo,
+ gather_scatter_info *gs_info,
+ tree *iv_step, tree *dataref_bump,
+ tree *vec_offset)
+{
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ gimple_seq stmts;
+
+ tree bump = size_binop (MULT_EXPR,
+ fold_convert (sizetype, DR_STEP (dr)),
+ size_int (TYPE_VECTOR_SUBPARTS (vectype)));
+ *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
+ if (stmts)
+ gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
+
+ if (use_capped_vf (loop_vinfo))
+ *iv_step = vect_mult_by_vf (loop_vinfo,
+ fold_convert (sizetype, DR_STEP (dr)));
+ else
+ *iv_step = *dataref_bump;
+
+ /* The offset given in GS_INFO can have pointer type, so use the element
+ type of the vector instead. */
+ tree offset_type = TREE_TYPE (gs_info->offset);
+ tree offset_vectype = get_vectype_for_scalar_type (offset_type);
+ offset_type = TREE_TYPE (offset_vectype);
+
+ /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
+ tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
+ ssize_int (gs_info->scale));
+ step = fold_convert (offset_type, step);
+ step = force_gimple_operand (step, &stmts, true, NULL_TREE);
+
+ /* Create {0, X, X*2, X*3, ...}. */
+ *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
+ build_zero_cst (offset_type), step);
+ if (stmts)
+ gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
+}
+
+/* Return the amount that should be added to a vector pointer to move
+ to the next or previous copy of AGGR_TYPE. DR is the data reference
+ being vectorized and MEMORY_ACCESS_TYPE describes the type of
+ vectorization. */
+
+static void
+vect_get_data_ptr_increment (loop_vec_info loop_vinfo, data_reference *dr,
+ tree aggr_type, unsigned int group_size,
+ vect_memory_access_type memory_access_type,
+ tree *iv_step, tree *bump)
+{
+ if (memory_access_type == VMAT_INVARIANT)
+ {
+ *iv_step = *bump = size_zero_node;
+ return;
+ }
+
+ *bump = TYPE_SIZE_UNIT (aggr_type);
+ tree step = vect_dr_behavior (dr)->step;
+ if (tree_int_cst_sgn (step) == -1)
+ *bump = fold_build1 (NEGATE_EXPR, TREE_TYPE (*bump), *bump);
+
+ if (loop_vinfo && use_capped_vf (loop_vinfo))
+ {
+ tree elt_type = TREE_TYPE (DR_REF (dr));
+ tree bytes_per_iter = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (elt_type),
+ size_int (group_size));
+ *iv_step = vect_mult_by_vf (loop_vinfo, bytes_per_iter);
+ }
+ else if (loop_vinfo && LOOP_VINFO_FIRSTFAULTING_EXECUTION (loop_vinfo))
+ {
+ gimple_seq seq = NULL;
+ tree elt_type = TREE_TYPE (DR_REF (dr));
+ tree bytes_per_iter = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (elt_type),
+ size_int (group_size));
+ *iv_step = gimple_build (&seq, MULT_EXPR, sizetype,
+ LOOP_VINFO_NONFAULTING (loop_vinfo).niters,
+ bytes_per_iter);
+
+ gimple_stmt_iterator incr_gsi;
+ bool insert_after;
+ standard_iv_increment_position (LOOP_VINFO_LOOP (loop_vinfo),
+ &incr_gsi, &insert_after);
+ gsi_insert_seq_before (&incr_gsi, seq, GSI_SAME_STMT);
+ }
+ else
+ *iv_step = *bump;
}
/* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
@@ -4142,16 +3120,11 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!stmt)
return false;
- combined_fn cfn = gimple_call_combined_fn (stmt);
- switch (cfn)
- {
- case CFN_MASK_LOAD:
- case CFN_MASK_STORE:
- return vectorizable_mask_load_store (stmt, gsi, vec_stmt, slp_node);
-
- default:
- break;
- }
+ if (gimple_call_internal_p (stmt)
+ && (internal_load_fn_p (gimple_call_internal_fn (stmt))
+ || internal_store_fn_p (gimple_call_internal_fn (stmt))))
+ /* Handled by vectorizable_load and vectorizable_store. */
+ return false;
if (gimple_call_lhs (stmt) == NULL_TREE
|| TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
@@ -4173,6 +3146,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
return false;
/* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
+ combined_fn cfn = gimple_call_combined_fn (stmt);
if (cfn == CFN_GOMP_SIMD_LANE)
{
nargs = 0;
@@ -5388,6 +4362,53 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
return true;
}
+
+/* Function vect_gen_widened_results_half
+
+ Create a vector stmt whose code, type, number of arguments, and result
+ variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
+ VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
+ In the case that CODE is a CALL_EXPR, this means that a call to DECL
+ needs to be created (DECL is a function-decl of a target-builtin).
+ STMT is the original scalar stmt that we are vectorizing. */
+
+static gimple *
+vect_gen_widened_results_half (enum tree_code code,
+ tree decl,
+ tree vec_oprnd0, tree vec_oprnd1, int op_type,
+ tree vec_dest, gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ gimple *new_stmt;
+ tree new_temp;
+
+ /* Generate half of the widened result: */
+ if (code == CALL_EXPR)
+ {
+ /* Target specific support */
+ if (op_type == binary_op)
+ new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
+ else
+ new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ }
+ else
+ {
+ /* Generic support */
+ gcc_assert (op_type == TREE_CODE_LENGTH (code));
+ if (op_type != binary_op)
+ vec_oprnd1 = NULL;
+ new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ }
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ return new_stmt;
+}
+
+
/* Get vectorized definitions for loop-based vectorization. For the first
operand we call vect_get_vec_def_for_operand() (with OPRND containing
scalar operand), and for the rest we get a copy with
@@ -5493,6 +4514,58 @@ vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
vec_dsts.quick_push (vec_dest);
}
+
+/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
+ and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
+ the resulting vectors and call the function recursively. */
+
+static void
+vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
+ vec<tree> *vec_oprnds1,
+ gimple *stmt, tree vec_dest,
+ gimple_stmt_iterator *gsi,
+ enum tree_code code1,
+ enum tree_code code2, tree decl1,
+ tree decl2, int op_type)
+{
+ int i;
+ tree vop0, vop1, new_tmp1, new_tmp2;
+ gimple *new_stmt1, *new_stmt2;
+ vec<tree> vec_tmp = vNULL;
+
+ vec_tmp.create (vec_oprnds0->length () * 2);
+ FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
+ {
+ if (op_type == binary_op)
+ vop1 = (*vec_oprnds1)[i];
+ else
+ vop1 = NULL_TREE;
+
+ /* Generate the two halves of promotion operation. */
+ new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
+ op_type, vec_dest, gsi, stmt);
+ new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
+ op_type, vec_dest, gsi, stmt);
+ if (is_gimple_call (new_stmt1))
+ {
+ new_tmp1 = gimple_call_lhs (new_stmt1);
+ new_tmp2 = gimple_call_lhs (new_stmt2);
+ }
+ else
+ {
+ new_tmp1 = gimple_assign_lhs (new_stmt1);
+ new_tmp2 = gimple_assign_lhs (new_stmt2);
+ }
+
+ /* Store the results for the next step. */
+ vec_tmp.quick_push (new_tmp1);
+ vec_tmp.quick_push (new_tmp2);
+ }
+
+ vec_oprnds0->release ();
+ *vec_oprnds0 = vec_tmp;
+}
+
/* Pack the masks in MASKS to a single mask and return it. Insert any
new statements before GSI. Leave MASKS with just the returned value
on exit. */
@@ -6741,8 +5814,9 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
return false;
code = gimple_assign_rhs_code (stmt);
- /* Mask out operations that mix scalar and vector input operands. */
- if (code == STRICT_REDUC_PLUS_EXPR)
+
+ /* Ignore operations that mix scalar and vector input operands. */
+ if (code == FOLD_LEFT_PLUS_EXPR)
return false;
/* For pointer addition, we should use the normal plus for
@@ -7111,7 +6185,6 @@ static bool
vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
slp_tree slp_node)
{
- tree scalar_dest;
tree data_ref;
tree op;
tree vec_oprnd = NULL_TREE;
@@ -7145,6 +6218,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vec_info *vinfo = stmt_info->vinfo;
tree aggr_type;
gather_scatter_info gs_info;
+ enum vect_def_type scatter_src_dt = vect_unknown_def_type;
gimple *new_stmt;
poly_uint64 vf;
vec_load_store_type vls_type;
@@ -7159,28 +6233,55 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* Is vectorizable store? */
- if (!is_gimple_assign (stmt))
- return false;
+ tree mask = NULL_TREE, mask_vectype = NULL_TREE;
+ if (is_gimple_assign (stmt))
+ {
+ tree scalar_dest = gimple_assign_lhs (stmt);
+ if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
+ && is_pattern_stmt_p (stmt_info))
+ scalar_dest = TREE_OPERAND (scalar_dest, 0);
+ if (TREE_CODE (scalar_dest) != ARRAY_REF
+ && TREE_CODE (scalar_dest) != BIT_FIELD_REF
+ && TREE_CODE (scalar_dest) != INDIRECT_REF
+ && TREE_CODE (scalar_dest) != COMPONENT_REF
+ && TREE_CODE (scalar_dest) != IMAGPART_EXPR
+ && TREE_CODE (scalar_dest) != REALPART_EXPR
+ && TREE_CODE (scalar_dest) != MEM_REF)
+ return false;
+ }
+ else
+ {
+ gcall *call = dyn_cast <gcall *> (stmt);
+ if (!call || !gimple_call_internal_p (call))
+ return false;
- scalar_dest = gimple_assign_lhs (stmt);
- if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
- && is_pattern_stmt_p (stmt_info))
- scalar_dest = TREE_OPERAND (scalar_dest, 0);
- if (TREE_CODE (scalar_dest) != ARRAY_REF
- && TREE_CODE (scalar_dest) != BIT_FIELD_REF
- && TREE_CODE (scalar_dest) != INDIRECT_REF
- && TREE_CODE (scalar_dest) != COMPONENT_REF
- && TREE_CODE (scalar_dest) != IMAGPART_EXPR
- && TREE_CODE (scalar_dest) != REALPART_EXPR
- && TREE_CODE (scalar_dest) != MEM_REF)
- return false;
+ internal_fn ifn = gimple_call_internal_fn (call);
+ if (!internal_store_fn_p (ifn))
+ return false;
+
+ if (slp_node != NULL)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "SLP of masked stores not supported.\n");
+ return false;
+ }
+
+ int mask_index = internal_fn_mask_index (ifn);
+ if (mask_index >= 0)
+ {
+ mask = gimple_call_arg (call, mask_index);
+ if (!vect_check_load_store_mask (stmt, mask, &mask_vectype))
+ return false;
+ }
+ }
+
+ op = vect_get_store_rhs (stmt);
/* Cannot have hybrid store SLP -- that would mean storing to the
same location twice. */
gcc_assert (slp == PURE_SLP_STMT (stmt_info));
- gcc_assert (gimple_assign_single_p (stmt));
-
tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
@@ -7212,58 +6313,53 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
return false;
}
- op = gimple_assign_rhs1 (stmt);
-
- /* In the case this is a store from a constant make sure
- native_encode_expr can handle it. */
- if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
- return false;
-
- if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "use not simple.\n");
- return false;
- }
-
- if (dt == vect_constant_def || dt == vect_external_def)
- vls_type = VLS_STORE_INVARIANT;
- else
- vls_type = VLS_STORE;
-
- if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
+ if (!vect_check_store_rhs (stmt, op, &rhs_vectype, &vls_type))
return false;
elem_type = TREE_TYPE (vectype);
vec_mode = TYPE_MODE (vectype);
- /* FORNOW. In some cases can vectorize even if data-type not supported
- (e.g. - array initialization with 0). */
- if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
- return false;
-
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
vect_memory_access_type memory_access_type;
- if (!get_load_store_type (stmt, vectype, slp, false, vls_type, ncopies,
+ if (!get_load_store_type (stmt, vectype, slp, mask, vls_type, ncopies,
&memory_access_type, &gs_info))
return false;
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (mask)
+ {
+ if (memory_access_type == VMAT_CONTIGUOUS)
+ {
+ if (!VECTOR_MODE_P (vec_mode)
+ || !can_vec_mask_load_store_p (vec_mode,
+ TYPE_MODE (mask_vectype), false))
+ return false;
+ }
+ else if (memory_access_type != VMAT_LOAD_STORE_LANES
+ && memory_access_type != VMAT_GATHER_SCATTER)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "unsupported access type for masked store.\n");
+ return false;
+ }
+ }
+ else
{
- if (!gs_info.decl
- && may_ne (nunits, TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype)))
+ /* FORNOW. In some cases can vectorize even if data-type not supported
+ (e.g. - array initialization with 0). */
+ if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
return false;
}
- grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
+ grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
+ && memory_access_type != VMAT_GATHER_SCATTER);
if (grouped_store)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- group_size = GROUP_NUM_STMTS (vinfo_for_stmt (first_stmt));
+ group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
}
else
{
@@ -7278,15 +6374,14 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (loop_vinfo
&& LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- check_load_store_masking (loop_vinfo, vectype, false, group_size,
- memory_access_type,
- gs_info.widened_offset_type);
+ check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
+ memory_access_type, &gs_info);
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
/* The SLP costs are calculated during SLP analysis. */
if (!PURE_SLP_STMT (stmt_info))
- vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
- NULL, NULL, NULL);
+ vect_model_store_cost (stmt_info, ncopies, memory_access_type,
+ vls_type, NULL, NULL, NULL);
return true;
}
gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
@@ -7295,15 +6390,159 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
ensure_base_align (dr);
- if (grouped_store)
- GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
-
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
{
- do_scatter_store (stmt, gsi, vec_stmt, loop_vinfo, &gs_info, NULL_TREE);
+ tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
+ tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
+ tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
+ tree ptr, mask, var, scale, perm_mask = NULL_TREE;
+ edge pe = loop_preheader_edge (loop);
+ gimple_seq seq;
+ basic_block new_bb;
+ enum { NARROW, NONE, WIDEN } modifier;
+ poly_uint64 scatter_off_nunits
+ = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
+
+ if (must_eq (nunits, scatter_off_nunits))
+ modifier = NONE;
+ else if (must_eq (nunits * 2, scatter_off_nunits))
+ {
+ modifier = WIDEN;
+
+ /* Currently gathers and scatters are only supported for
+ fixed-length vectors. */
+ unsigned int count = scatter_off_nunits.to_constant ();
+ auto_vec_perm_indices sel (count);
+ for (i = 0; i < (unsigned int) count; ++i)
+ sel.quick_push (i | (count / 2));
+
+ perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel);
+ gcc_assert (perm_mask != NULL_TREE);
+ }
+ else if (must_eq (nunits, scatter_off_nunits * 2))
+ {
+ modifier = NARROW;
+
+ /* Currently gathers and scatters are only supported for
+ fixed-length vectors. */
+ unsigned int count = nunits.to_constant ();
+ auto_vec_perm_indices sel (count);
+ for (i = 0; i < (unsigned int) count; ++i)
+ sel.quick_push (i | (count / 2));
+
+ perm_mask = vect_gen_perm_mask_checked (vectype, sel);
+ gcc_assert (perm_mask != NULL_TREE);
+ ncopies *= 2;
+ }
+ else
+ gcc_unreachable ();
+
+ rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
+ ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ scaletype = TREE_VALUE (arglist);
+
+ gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
+ && TREE_CODE (rettype) == VOID_TYPE);
+
+ ptr = fold_convert (ptrtype, gs_info.base);
+ if (!is_gimple_min_invariant (ptr))
+ {
+ ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
+ gcc_assert (!new_bb);
+ }
+
+ /* Currently we support only unconditional scatter stores,
+ so mask should be all ones. */
+ mask = build_int_cst (masktype, -1);
+ mask = vect_init_vector (stmt, mask, masktype, NULL);
+
+ scale = build_int_cst (scaletype, gs_info.scale);
+
+ prev_stmt_info = NULL;
+ for (j = 0; j < ncopies; ++j)
+ {
+ if (j == 0)
+ {
+ src = vec_oprnd1
+ = vect_get_vec_def_for_operand (op, stmt);
+ op = vec_oprnd0
+ = vect_get_vec_def_for_operand (gs_info.offset, stmt);
+ }
+ else if (modifier != NONE && (j & 1))
+ {
+ if (modifier == WIDEN)
+ {
+ src = vec_oprnd1
+ = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
+ op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
+ stmt, gsi);
+ }
+ else if (modifier == NARROW)
+ {
+ src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
+ stmt, gsi);
+ op = vec_oprnd0
+ = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
+ vec_oprnd0);
+ }
+ else
+ gcc_unreachable ();
+ }
+ else
+ {
+ src = vec_oprnd1
+ = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
+ op = vec_oprnd0
+ = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
+ vec_oprnd0);
+ }
+
+ if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
+ {
+ gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
+ TYPE_VECTOR_SUBPARTS (srctype)));
+ var = vect_get_new_ssa_name (srctype, vect_simple_var);
+ src = build1 (VIEW_CONVERT_EXPR, srctype, src);
+ new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ src = var;
+ }
+
+ if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
+ {
+ gcc_assert (must_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
+ var = vect_get_new_ssa_name (idxtype, vect_simple_var);
+ op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
+ new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ op = var;
+ }
+
+ new_stmt
+ = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
+
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ if (prev_stmt_info == NULL)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
return true;
}
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
+ {
+ gimple *group_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
+ }
+
if (grouped_store)
{
/* FORNOW */
@@ -7312,7 +6551,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */
if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- < GROUP_NUM_STMTS (vinfo_for_stmt (first_stmt))
+ < GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& !slp)
{
*vec_stmt = NULL;
@@ -7328,7 +6567,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- op = gimple_assign_rhs1 (first_stmt);
+ op = vect_get_store_rhs (first_stmt);
}
else
/* VEC_NUM is the number of vect stmts to be created for this
@@ -7493,7 +6732,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
for (j = 0; j < ncopies; j++)
{
- /* We've set op and dt above, from gimple_assign_rhs1(stmt),
+ /* We've set op and dt above, from vect_get_store_rhs,
and first_stmt == stmt. */
if (j == 0)
{
@@ -7505,8 +6744,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
else
{
- gcc_assert (gimple_assign_single_p (next_stmt));
- op = gimple_assign_rhs1 (next_stmt);
+ op = vect_get_store_rhs (next_stmt);
vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
}
}
@@ -7593,9 +6831,12 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
gcc_assert (alignment_support_scheme);
bool masked_loop_p = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
- /* Targets with store-lane instructions or support for fully-masked loops
- must not require explicit realignment. */
- gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES && !masked_loop_p)
+ /* Targets with store-lane instructions must not require explicit
+ realignment. vect_supportable_dr_alignment always returns either
+ dr_aligned or dr_unaligned_supported for masked operations. */
+ gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
+ && !mask
+ && !masked_loop_p)
|| alignment_support_scheme == dr_aligned
|| alignment_support_scheme == dr_unaligned_supported);
@@ -7603,10 +6844,32 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
|| memory_access_type == VMAT_CONTIGUOUS_REVERSE)
offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
- if (memory_access_type == VMAT_LOAD_STORE_LANES)
- aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
+ tree bump, iv_step;
+ tree vec_offset = NULL_TREE;
+ if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ {
+ aggr_type = NULL_TREE;
+ iv_step = NULL_TREE;
+ bump = NULL_TREE;
+ }
+ else if (memory_access_type == VMAT_GATHER_SCATTER)
+ {
+ aggr_type = elem_type;
+ vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
+ &iv_step, &bump, &vec_offset);
+ }
else
- aggr_type = vectype;
+ {
+ if (memory_access_type == VMAT_LOAD_STORE_LANES)
+ aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
+ else
+ aggr_type = vectype;
+ vect_get_data_ptr_increment (loop_vinfo, dr, aggr_type, group_size,
+ memory_access_type, &iv_step, &bump);
+ }
+
+ if (mask)
+ LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
@@ -7648,6 +6911,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
*/
prev_stmt_info = NULL;
+ tree vec_mask = NULL_TREE;
vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
for (j = 0; j < ncopies; j++)
{
@@ -7663,21 +6927,34 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vec_oprnd = vec_oprnds[0];
}
else
- {
- /* For interleaved stores we collect vectorized defs
- for all the stores in the group in DR_CHAIN and OPRNDS.
- DR_CHAIN is then used as an input to
- vect_permute_store_chain(), and OPRNDS as an input to
- vect_get_vec_def_for_stmt_copy() for the next copy.
-
- If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN
- and OPRNDS are of size 1. */
- init_stored_values (group_size, first_stmt, &oprnds);
- dr_chain.safe_splice (oprnds);
- vec_oprnd = oprnds[0];
+ {
+ /* For interleaved stores we collect vectorized defs for all the
+ stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
+ used as an input to vect_permute_store_chain(), and OPRNDS as
+ an input to vect_get_vec_def_for_stmt_copy() for the next copy.
+
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
+ OPRNDS are of size 1. */
+ next_stmt = first_stmt;
+ for (i = 0; i < group_size; i++)
+ {
+ /* Since gaps are not supported for interleaved stores,
+ GROUP_SIZE is the exact number of stmts in the chain.
+ Therefore, NEXT_STMT can't be NULL_TREE. In case that
+ there is no interleaving, GROUP_SIZE is 1, and only one
+ iteration of the loop will be executed. */
+ op = vect_get_store_rhs (next_stmt);
+ vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
+ dr_chain.quick_push (vec_oprnd);
+ oprnds.quick_push (vec_oprnd);
+ next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ }
+ if (mask)
+ vec_mask = vect_get_vec_def_for_operand (mask, stmt,
+ mask_vectype);
}
- /* We should have caught mismatched types earlier. */
+ /* We should have catched mismatched types earlier. */
gcc_assert (useless_type_conversion_p (vectype,
TREE_TYPE (vec_oprnd)));
bool simd_lane_access_p
@@ -7694,12 +6971,19 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
dataref_offset = build_int_cst (ref_type, 0);
inv_p = false;
}
+ else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ {
+ vect_get_gather_scatter_ops (loop, stmt, &gs_info,
+ &dataref_ptr, &vec_offset);
+ inv_p = false;
+ }
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt, aggr_type, group_size,
+ = vect_create_data_ref_ptr (first_stmt, aggr_type,
simd_lane_access_p ? loop : NULL,
offset, &dummy, gsi, &ptr_incr,
- simd_lane_access_p, &inv_p);
+ simd_lane_access_p, &inv_p,
+ NULL_TREE, iv_step);
gcc_assert (bb_vinfo || !inv_p);
}
else
@@ -7711,26 +6995,77 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
next copy.
If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
- advance_stored_values (group_size, first_stmt, oprnds);
- dr_chain.truncate (0);
- dr_chain.splice (oprnds);
- vec_oprnd = oprnds[0];
+ for (i = 0; i < group_size; i++)
+ {
+ op = oprnds[i];
+ vect_is_simple_use (op, vinfo, &def_stmt, &dt);
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
+ dr_chain[i] = vec_oprnd;
+ oprnds[i] = vec_oprnd;
+ }
+ if (mask)
+ {
+ vect_is_simple_use (vec_mask, vinfo, &def_stmt, &dt);
+ vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
+ }
if (dataref_offset)
dataref_offset
- = int_const_binop (PLUS_EXPR, dataref_offset,
- TYPE_SIZE_UNIT (aggr_type));
+ = int_const_binop (PLUS_EXPR, dataref_offset, bump);
+ else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ {
+ gimple *def_stmt;
+ vect_def_type dt;
+ vect_is_simple_use (vec_offset, loop_vinfo, &def_stmt, &dt);
+ vec_offset = vect_get_vec_def_for_stmt_copy (dt, vec_offset);
+ }
else
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
- TYPE_SIZE_UNIT (aggr_type));
+ bump);
}
- tree mask = NULL;
if (memory_access_type == VMAT_LOAD_STORE_LANES)
{
+ tree vec_array;
+
+ /* Combine all the vectors into an array. */
+ vec_array = create_vector_array (vectype, vec_num);
+ for (i = 0; i < vec_num; i++)
+ {
+ vec_oprnd = dr_chain[i];
+ write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
+ }
+
+ tree final_mask = NULL;
if (masked_loop_p)
- mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
- new_stmt = do_store_lanes (stmt, gsi, vec_num, aggr_type,
- dataref_ptr, ref_type, dr_chain, mask);
+ final_mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
+ if (vec_mask)
+ final_mask = prepare_load_store_mask (mask_vectype, final_mask,
+ vec_mask, gsi);
+
+ gcall *call;
+ if (final_mask)
+ {
+ /* Emit:
+ MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
+ VEC_ARRAY). */
+ unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
+ tree alias_ptr = build_int_cst (ref_type, align);
+ call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
+ dataref_ptr, alias_ptr,
+ final_mask, vec_array);
+ }
+ else
+ {
+ /* Emit:
+ MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
+ data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
+ call = gimple_build_call_internal (IFN_STORE_LANES, 1,
+ vec_array);
+ gimple_call_set_lhs (call, data_ref);
+ }
+ gimple_call_set_nothrow (call, true);
+ new_stmt = call;
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
}
else
{
@@ -7749,14 +7084,36 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
{
unsigned align, misalign;
+ tree final_mask = NULL_TREE;
if (masked_loop_p)
- mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
- vectype, vec_num * j + i);
+ final_mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
+ vectype, vec_num * j + i);
+ if (vec_mask)
+ final_mask = prepare_load_store_mask (mask_vectype, final_mask,
+ vec_mask, gsi);
+
+ if (memory_access_type == VMAT_GATHER_SCATTER)
+ {
+ tree scale = size_int (gs_info.scale);
+ gcall *call;
+ if (masked_loop_p)
+ call = gimple_build_call_internal
+ (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
+ scale, vec_oprnd, final_mask);
+ else
+ call = gimple_build_call_internal
+ (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
+ scale, vec_oprnd);
+ gimple_call_set_nothrow (call, true);
+ new_stmt = call;
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ break;
+ }
if (i > 0)
/* Bump the vector pointer. */
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
- stmt, NULL_TREE);
+ stmt, bump);
if (slp)
vec_oprnd = vec_oprnds[i];
@@ -7765,11 +7122,6 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vect_permute_store_chain(). */
vec_oprnd = result_chain[i];
- data_ref = fold_build2 (MEM_REF, vectype,
- dataref_ptr,
- dataref_offset
- ? dataref_offset
- : build_int_cst (ref_type, 0));
align = DR_TARGET_ALIGNMENT (first_dr);
if (aligned_access_p (first_dr))
misalign = 0;
@@ -7788,33 +7140,40 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
{
tree perm_dest
- = vect_create_destination_var (gimple_assign_rhs1 (stmt),
+ = vect_create_destination_var (vect_get_store_rhs (stmt),
vectype);
vec_oprnd = reverse_vector (perm_dest, vec_oprnd, stmt, gsi);
}
- tree offset_arg = (dataref_offset
- ? dataref_offset
- : build_int_cst (ref_type, 0));
- if (misalign)
- align = least_bit_hwi (misalign);
-
/* Arguments are ready. Create the new vector stmt. */
- if (masked_loop_p)
+ if (final_mask)
{
+ align = least_bit_hwi (misalign | align);
tree ptr = build_int_cst (ref_type, align);
- gcall *call = gimple_build_call_internal
- (IFN_MASK_STORE, 4, dataref_ptr, ptr, mask, vec_oprnd);
+ gcall *call
+ = gimple_build_call_internal (IFN_MASK_STORE, 4,
+ dataref_ptr, ptr,
+ final_mask, vec_oprnd);
gimple_call_set_nothrow (call, true);
new_stmt = call;
}
else
{
- data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
- dataref_ptr, offset_arg);
- if (align < TYPE_ALIGN_UNIT (vectype))
+ data_ref = fold_build2 (MEM_REF, vectype,
+ dataref_ptr,
+ dataref_offset
+ ? dataref_offset
+ : build_int_cst (ref_type, 0));
+ if (aligned_access_p (first_dr))
+ ;
+ else if (DR_MISALIGNMENT (first_dr) == -1)
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ align * BITS_PER_UNIT);
+ else
TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref), align);
+ = build_aligned_type (TREE_TYPE (data_ref),
+ TYPE_ALIGN (elem_type));
new_stmt = gimple_build_assign (data_ref, vec_oprnd);
}
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -8007,7 +7366,6 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
int vec_num;
bool slp = (slp_node != NULL);
bool slp_perm = false;
- enum tree_code code;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
poly_uint64 vf;
tree aggr_type;
@@ -8023,24 +7381,62 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& ! vec_stmt)
return false;
- /* Is vectorizable load? */
- if (!is_gimple_assign (stmt))
- return false;
+ tree mask = NULL_TREE, mask_vectype = NULL_TREE;
+ if (is_gimple_assign (stmt))
+ {
+ scalar_dest = gimple_assign_lhs (stmt);
+ if (TREE_CODE (scalar_dest) != SSA_NAME)
+ return false;
- scalar_dest = gimple_assign_lhs (stmt);
- if (TREE_CODE (scalar_dest) != SSA_NAME)
- return false;
+ tree_code code = gimple_assign_rhs_code (stmt);
+ if (code != ARRAY_REF
+ && code != BIT_FIELD_REF
+ && code != INDIRECT_REF
+ && code != COMPONENT_REF
+ && code != IMAGPART_EXPR
+ && code != REALPART_EXPR
+ && code != MEM_REF
+ && TREE_CODE_CLASS (code) != tcc_declaration)
+ return false;
+ }
+ else
+ {
+ gcall *call = dyn_cast <gcall *> (stmt);
+ if (!call || !gimple_call_internal_p (call))
+ return false;
- code = gimple_assign_rhs_code (stmt);
- if (code != ARRAY_REF
- && code != BIT_FIELD_REF
- && code != INDIRECT_REF
- && code != COMPONENT_REF
- && code != IMAGPART_EXPR
- && code != REALPART_EXPR
- && code != MEM_REF
- && TREE_CODE_CLASS (code) != tcc_declaration)
- return false;
+ internal_fn ifn = gimple_call_internal_fn (call);
+ if (!internal_load_fn_p (ifn))
+ return false;
+
+ scalar_dest = gimple_call_lhs (call);
+ if (!scalar_dest)
+ return false;
+
+ if (slp_node != NULL)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "SLP of masked loads not supported.\n");
+ return false;
+ }
+
+ if (LOOP_VINFO_SPECULATIVE_EXECUTION (loop_vinfo))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "speculative mask loads not supported\n");
+ return false;
+ }
+
+ int mask_index = internal_fn_mask_index (ifn);
+ if (mask_index >= 0)
+ {
+ mask = gimple_call_arg (call, mask_index);
+ if (!vect_check_load_store_mask (stmt, mask, &mask_vectype))
+ return false;
+ }
+ }
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
@@ -8162,17 +7558,42 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
group_size = 1;
vect_memory_access_type memory_access_type;
- if (!get_load_store_type (stmt, vectype, slp, false, VLS_LOAD, ncopies,
+ if (!get_load_store_type (stmt, vectype, slp, mask, VLS_LOAD, ncopies,
&memory_access_type, &gs_info))
return false;
- wgather_info wgather = DEFAULT_WGATHER_INFO;
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (mask)
{
- if (!gs_info.decl
- && may_ne (nunits, TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype))
- && !widened_gather_support_p (vectype, &gs_info, stmt, &wgather))
- return false;
+ if (memory_access_type == VMAT_CONTIGUOUS)
+ {
+ machine_mode vec_mode = TYPE_MODE (vectype);
+ if (!VECTOR_MODE_P (vec_mode)
+ || !can_vec_mask_load_store_p (vec_mode,
+ TYPE_MODE (mask_vectype), true))
+ return false;
+ }
+ else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
+ {
+ tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
+ tree masktype
+ = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
+ if (TREE_CODE (masktype) == INTEGER_TYPE)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "masked gather with integer mask not"
+ " supported.");
+ return false;
+ }
+ }
+ else if (memory_access_type != VMAT_LOAD_STORE_LANES
+ && memory_access_type != VMAT_GATHER_SCATTER)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "unsupported access type for masked load.\n");
+ return false;
+ }
}
if (firstfaulting_p && memory_access_type != VMAT_CONTIGUOUS)
@@ -8193,9 +7614,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (loop_vinfo
&& LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- check_load_store_masking (loop_vinfo, vectype, true, group_size,
- memory_access_type,
- gs_info.widened_offset_type);
+ check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
+ memory_access_type, &gs_info);
STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
/* The SLP costs are calculated during SLP analysis. */
@@ -8217,10 +7637,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
ensure_base_align (dr);
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
{
- do_gather_load (stmt, gsi, vec_stmt, loop_vinfo, &gs_info,
- &wgather, NULL_TREE);
+ vect_build_gather_load_calls (stmt, gsi, vec_stmt, &gs_info, mask);
return true;
}
@@ -8446,6 +7865,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
return true;
}
+ if (memory_access_type == VMAT_GATHER_SCATTER)
+ grouped_load = false;
+
if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
@@ -8523,9 +7945,12 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gcc_assert (alignment_support_scheme);
bool masked_loop_p = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
- /* Targets with load-lane instructions or support for fully-masked loops
- must not require explicit realignment. */
- gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES && !masked_loop_p)
+ /* Targets with store-lane instructions must not require explicit
+ realignment. vect_supportable_dr_alignment always returns either
+ dr_aligned or dr_unaligned_supported for masked operations. */
+ gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
+ && !mask
+ && !masked_loop_p)
|| alignment_support_scheme == dr_aligned
|| alignment_support_scheme == dr_unaligned_supported);
@@ -8660,11 +8085,31 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
- if (memory_access_type == VMAT_LOAD_STORE_LANES)
- aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
+ tree bump, iv_step;
+ tree vec_offset = NULL_TREE;
+ if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ {
+ aggr_type = NULL_TREE;
+ iv_step = NULL_TREE;
+ bump = NULL_TREE;
+ }
+ else if (memory_access_type == VMAT_GATHER_SCATTER)
+ {
+ aggr_type = elem_type;
+ vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
+ &iv_step, &bump, &vec_offset);
+ }
else
- aggr_type = vectype;
+ {
+ if (memory_access_type == VMAT_LOAD_STORE_LANES)
+ aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
+ else
+ aggr_type = vectype;
+ vect_get_data_ptr_increment (loop_vinfo, dr, aggr_type, group_size,
+ memory_access_type, &iv_step, &bump);
+ }
+ tree vec_mask = NULL_TREE;
prev_stmt_info = NULL;
poly_uint64 group_elt = 0;
for (j = 0; j < ncopies; j++)
@@ -8693,9 +8138,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
{
dataref_ptr
= vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
- 0, at_loop, offset, &dummy, gsi,
+ at_loop, offset, &dummy, gsi,
&ptr_incr, simd_lane_access_p,
- &inv_p, byte_offset);
+ &inv_p, byte_offset, iv_step);
/* Adjust the pointer by the difference to first_stmt. */
data_reference_p ptrdr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
@@ -8706,46 +8151,115 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, diff);
}
+ else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ {
+ vect_get_gather_scatter_ops (loop, stmt, &gs_info,
+ &dataref_ptr, &vec_offset);
+ inv_p = false;
+ }
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt, aggr_type,
- group_size, at_loop,
+ = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, &inv_p,
- byte_offset);
+ byte_offset, iv_step);
+ if (mask)
+ vec_mask = vect_get_vec_def_for_operand (mask, stmt,
+ mask_vectype);
}
- else if (dataref_offset)
- dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
- TYPE_SIZE_UNIT (aggr_type));
else
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
- TYPE_SIZE_UNIT (aggr_type));
+ {
+ if (dataref_offset)
+ dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
+ bump);
+ else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ {
+ gimple *def_stmt;
+ vect_def_type dt;
+ vect_is_simple_use (vec_offset, loop_vinfo, &def_stmt, &dt);
+ vec_offset = vect_get_vec_def_for_stmt_copy (dt, vec_offset);
+ }
+ else
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ stmt, bump);
+ if (mask)
+ {
+ gimple *def_stmt;
+ vect_def_type dt;
+ vect_is_simple_use (vec_mask, vinfo, &def_stmt, &dt);
+ vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
+ }
+ }
if (grouped_load || slp_perm)
dr_chain.create (vec_num);
- tree mask = NULL;
-
if (memory_access_type == VMAT_LOAD_STORE_LANES)
{
+ tree vec_array;
+
+ vec_array = create_vector_array (vectype, vec_num);
+
+ tree final_mask = NULL_TREE;
if (masked_loop_p)
- mask = vect_get_load_mask (loop_vinfo, gsi, ncopies, vectype, j);
- do_load_lanes (stmt, gsi, group_size, vectype, aggr_type,
- dataref_ptr, ref_type, mask);
+ final_mask = vect_get_load_mask (loop_vinfo, gsi, ncopies,
+ vectype, j);
+ if (vec_mask)
+ final_mask = prepare_load_store_mask (mask_vectype, final_mask,
+ vec_mask, gsi);
+
+ gcall *call;
+ if (final_mask)
+ {
+ /* Emit:
+ VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
+ VEC_MASK). */
+ unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
+ tree alias_ptr = build_int_cst (ref_type, align);
+ call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
+ dataref_ptr, alias_ptr,
+ final_mask);
+ }
+ else
+ {
+ /* Emit:
+ VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
+ data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
+ call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
+ }
+ gimple_call_set_lhs (call, vec_array);
+ gimple_call_set_nothrow (call, true);
+ new_stmt = call;
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ /* Extract each vector into an SSA_NAME. */
+ for (i = 0; i < vec_num; i++)
+ {
+ new_temp = read_vector_array (stmt, gsi, scalar_dest,
+ vec_array, i);
+ dr_chain.quick_push (new_temp);
+ }
+
+ /* Record the mapping between SSA_NAMEs and statements. */
+ vect_record_grouped_load_vectors (stmt, dr_chain);
}
else
{
for (i = 0; i < vec_num; i++)
{
+ tree final_mask = NULL_TREE;
if (masked_loop_p
&& memory_access_type != VMAT_INVARIANT)
- mask = vect_get_load_mask (loop_vinfo, gsi, vec_num * ncopies,
- vectype, vec_num * j + i);
+ final_mask = vect_get_load_mask (loop_vinfo, gsi,
+ vec_num * ncopies,
+ vectype, vec_num * j + i);
+ if (vec_mask)
+ final_mask = prepare_load_store_mask (mask_vectype, final_mask,
+ vec_mask, gsi);
+
if (i > 0)
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
- stmt, NULL_TREE);
-
- vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ stmt, bump);
/* 2. Create the vector-load in the loop. */
switch (alignment_support_scheme)
@@ -8755,9 +8269,23 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
{
unsigned int align, misalign;
- tree offset_arg = (dataref_offset
- ? dataref_offset
- : build_int_cst (ref_type, 0));
+ if (memory_access_type == VMAT_GATHER_SCATTER)
+ {
+ tree scale = size_int (gs_info.scale);
+ gcall *call;
+ if (masked_loop_p)
+ call = gimple_build_call_internal
+ (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
+ vec_offset, scale, final_mask);
+ else
+ call = gimple_build_call_internal
+ (IFN_GATHER_LOAD, 3, dataref_ptr,
+ vec_offset, scale);
+ gimple_call_set_nothrow (call, true);
+ new_stmt = call;
+ data_ref = NULL_TREE;
+ break;
+ }
if (alignment_support_scheme == dr_aligned)
{
@@ -8779,19 +8307,21 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& TREE_CODE (dataref_ptr) == SSA_NAME)
set_ptr_info_alignment (get_ptr_info (dataref_ptr),
align, misalign);
- if (misalign)
- align = least_bit_hwi (misalign);
- if (mask)
+
+ if (final_mask)
{
/* At present we always start a first-faulting
load at the first element. */
gcc_assert (!firstfaulting_p);
+ align = least_bit_hwi (misalign | align);
tree ptr = build_int_cst (ref_type, align);
- gcall *call = gimple_build_call_internal
- (IFN_MASK_LOAD, 3, dataref_ptr, ptr, mask);
- gimple_call_set_lhs (call, vec_dest);
+ gcall *call
+ = gimple_build_call_internal (IFN_MASK_LOAD, 3,
+ dataref_ptr, ptr,
+ final_mask);
gimple_call_set_nothrow (call, true);
new_stmt = call;
+ data_ref = NULL_TREE;
}
else if (firstfaulting_p)
{
@@ -8804,12 +8334,21 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
else
{
- data_ref = fold_build2 (MEM_REF, vectype, dataref_ptr,
- offset_arg);
- if (align < TYPE_ALIGN_UNIT (vectype))
+ data_ref
+ = fold_build2 (MEM_REF, vectype, dataref_ptr,
+ dataref_offset
+ ? dataref_offset
+ : build_int_cst (ref_type, 0));
+ if (alignment_support_scheme == dr_aligned)
+ ;
+ else if (DR_MISALIGNMENT (first_dr) == -1)
TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref), align);
- new_stmt = gimple_build_assign (vec_dest, data_ref);
+ = build_aligned_type (TREE_TYPE (data_ref),
+ align * BITS_PER_UNIT);
+ else
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ TYPE_ALIGN (elem_type));
}
break;
}
@@ -8839,10 +8378,10 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
data_ref
= build2 (MEM_REF, vectype, ptr,
build_int_cst (ref_type, 0));
- tree vec_dest2
- = vect_create_destination_var (scalar_dest, vectype);
- new_stmt = gimple_build_assign (vec_dest2, data_ref);
- new_temp = make_ssa_name (vec_dest2, new_stmt);
+ vec_dest = vect_create_destination_var (scalar_dest,
+ vectype);
+ new_stmt = gimple_build_assign (vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
gimple_set_vdef (new_stmt, gimple_vdef (stmt));
gimple_set_vuse (new_stmt, gimple_vuse (stmt));
@@ -8863,7 +8402,6 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
data_ref
= build2 (MEM_REF, vectype, ptr,
build_int_cst (ref_type, 0));
- new_stmt = gimple_build_assign (vec_dest, data_ref);
break;
}
case dr_explicit_realign_optimized:
@@ -8876,23 +8414,22 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
new_stmt = gimple_build_assign
(new_temp, BIT_AND_EXPR, dataref_ptr,
build_int_cst (TREE_TYPE (dataref_ptr),
- -(HOST_WIDE_INT) align));
+ -(HOST_WIDE_INT) align));
vect_finish_stmt_generation (stmt, new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, new_temp,
build_int_cst (ref_type, 0));
- new_stmt = gimple_build_assign (vec_dest, data_ref);
break;
}
default:
gcc_unreachable ();
}
-
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ /* DATA_REF is null if we've already built the statement. */
+ if (data_ref)
+ new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt);
- if (is_gimple_call (new_stmt))
- gimple_call_set_lhs (new_stmt, new_temp);
- else
- gimple_assign_set_lhs (new_stmt, new_temp);
+ gimple_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
/* 3. Handle explicit realignment if necessary/supported.
@@ -9166,7 +8703,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
- if (!REDUCTION_IS_COND_REDUCTION_P (reduction_type))
+ if (reduction_type == TREE_CODE_REDUCTION)
{
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
@@ -9325,7 +8862,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
/* Handle def. */
scalar_dest = gimple_assign_lhs (stmt);
- if (reduction_type != COND_REDUCTION_CLASTB)
+ if (reduction_type != EXTRACT_LAST_REDUCTION)
vec_dest = vect_create_destination_var (scalar_dest, vectype);
/* Handle cond expr. */
@@ -9465,7 +9002,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
}
}
}
- if (reduction_type == COND_REDUCTION_CLASTB)
+ if (reduction_type == EXTRACT_LAST_REDUCTION)
{
if (!is_gimple_val (vec_compare))
{
@@ -9477,7 +9014,8 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
}
gcc_assert (reduc_index == 2);
new_stmt = gimple_build_call_internal
- (IFN_CLASTB, 3, vec_compare, else_clause, vec_then_clause);
+ (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
+ vec_then_clause);
gimple_call_set_lhs (new_stmt, scalar_dest);
SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
if (stmt == gsi_stmt (*gsi))
@@ -10217,17 +9755,18 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
case store_vec_info_type:
done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
gcc_assert (done);
- if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node
- && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) != VMAT_GATHER_SCATTER)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
*grouped_store = true;
- if (STMT_VINFO_VEC_STMT (stmt_info))
+ stmt_vec_info group_info
+ = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
+ if (GROUP_STORE_COUNT (group_info) == GROUP_SIZE (group_info))
is_store = true;
- }
+ }
else
is_store = true;
break;
@@ -10245,19 +9784,6 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
case call_vec_info_type:
done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
stmt = gsi_stmt (*gsi);
- if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
- {
- gcc_assert (!slp_node);
- /* As with normal stores, we vectorize the whole group when
- we reach the last call in the group. The other calls are
- in the group are left with a null VEC_STMT. */
- if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && (STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)
- != VMAT_GATHER_SCATTER))
- *grouped_store = true;
- if (STMT_VINFO_VEC_STMT (stmt_info))
- is_store = true;
- }
break;
case call_simd_clone_vec_info_type:
@@ -10408,7 +9934,6 @@ new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
GROUP_FIRST_ELEMENT (res) = NULL;
GROUP_NEXT_ELEMENT (res) = NULL;
GROUP_SIZE (res) = 0;
- GROUP_NUM_STMTS (res) = 0;
GROUP_FIRST_UID (res) = 0;
GROUP_LAST_UID (res) = 0;
GROUP_STORE_COUNT (res) = 0;
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 69e2c3ef9c7..96d1567cae9 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -65,20 +65,25 @@ enum vect_def_type {
/* Define type of reduction. */
enum vect_reduction_type {
TREE_CODE_REDUCTION,
- STRICT_FP_REDUCTION,
COND_REDUCTION,
INTEGER_INDUC_COND_REDUCTION,
CONST_COND_REDUCTION,
- COND_REDUCTION_CLASTB
-};
-/* Any type of condition reduction. */
-#define REDUCTION_IS_COND_REDUCTION_P(R) \
- ((R) != TREE_CODE_REDUCTION && (R) != STRICT_FP_REDUCTION)
+ /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
+ to implement:
+
+ for (int i = 0; i < VF; ++i)
+ res = cond[i] ? val[i] : res; */
+ EXTRACT_LAST_REDUCTION,
-/* Any standard condition reduction. */
-#define REDUCTION_IS_FULL_COND_REDUCTION_P(R) (R == COND_REDUCTION \
- || R == COND_REDUCTION_CLASTB)
+ /* Use a folding reduction within the loop to implement:
+
+ for (int i = 0; i < VF; ++i)
+ res = res OP val[i];
+
+ (with no reassocation). */
+ FOLD_LEFT_REDUCTION
+};
#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
|| ((D) == vect_double_reduction_def) \
@@ -256,27 +261,6 @@ struct vect_addr_base_hasher : free_ptr_hash <vect_addr_base_info>
static bool equal (const vect_addr_base_info *, const vect_addr_base_info *);
};
-struct gather_scatter_indices
-{
- /* Map from. */
- tree type;
- tree step;
-
- /* Map to. */
- tree indices;
-};
-
-/* Gather/scatter hashtable helpers. */
-
-struct gather_scatter_hasher : free_ptr_hash <gather_scatter_indices>
-{
- typedef gather_scatter_indices *value_type;
- typedef gather_scatter_indices *compare_type;
- static hashval_t hash (const gather_scatter_indices *);
- static bool equal (const gather_scatter_indices *,
- const gather_scatter_indices *);
-};
-
/* In general, we can divide the vector statements in a vectorized loop
into related groups ("rgroups") and say that for each rgroup there is
some nS such that the rgroup operates on nS values from one scalar
@@ -566,9 +550,6 @@ typedef struct _loop_vec_info : public vec_info {
/* A hash table used for caching vector base addresses. */
hash_table<vect_addr_base_hasher> vect_addr_base_htab;
- /* A hash table used for caching gather/scatter indices. */
- hash_table<gather_scatter_hasher> gather_scatter_htab;
-
/* A map from X to a precomputed gimple_val containing
CAPPED_VECTORIZATION_FACTOR * X. */
hash_map<tree, tree> vf_mult_map;
@@ -645,7 +626,6 @@ typedef struct _loop_vec_info : public vec_info {
#define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
#define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
#define LOOP_VINFO_ADDR_CACHE(L) (L)->vect_addr_base_htab
-#define LOOP_VINFO_GATHER_SCATTER_CACHE(L) (L)->gather_scatter_htab
#define LOOP_VINFO_VF_MULT_MAP(L) (L)->vf_mult_map
#define LOOP_VINFO_SPECULATIVE_EXECUTION(L) (L)->speculative_execution
#define LOOP_VINFO_EXIT_TEST_MASK(L) (L)->exit_test_mask
@@ -785,6 +765,14 @@ enum slp_vect_type {
hybrid
};
+/* Says whether a statement is a load, a store of a vectorized statement
+ result, or a store of an invariant value. */
+enum vec_load_store_type {
+ VLS_LOAD,
+ VLS_STORE,
+ VLS_STORE_INVARIANT
+};
+
/* Describes how we're going to vectorize an individual load or store,
or a group of loads or stores. */
enum vect_memory_access_type {
@@ -962,9 +950,6 @@ typedef struct _stmt_vec_info {
/* The number of scalar stmt references from active SLP instances. */
unsigned int num_slp_uses;
- /* Number of real statements in a group. */
- unsigned int num_stmts;
-
/* For GROUP_FIRST_ELEMENT statements, these fields give the UIDs of
the first and last statements in the group, otherwise both are
equal to the statement's UID. */
@@ -974,30 +959,19 @@ typedef struct _stmt_vec_info {
/* Information about a gather/scatter call. */
struct gather_scatter_info {
- /* The FUNCTION_DECL for the built-in gather/scatter function. */
+ /* The internal function to use for the gather/scatter operation,
+ or IFN_LAST if a built-in function should be used instead. */
+ internal_fn ifn;
+
+ /* The FUNCTION_DECL for the built-in gather/scatter function,
+ or null if an internal function should be used instead. */
tree decl;
/* The loop-invariant base value. */
tree base;
- union
- {
- /* If the offset needs to be vectorized, this is the original
- original scalar offset, which is a non-loop-invariant SSA_NAME. */
- tree offset;
-
- /* If the offset should be [0, STEP, STEP*2, ...], then this is
- the step value. */
- tree step;
- } u;
-
- /* The type of the scalar offset. If OFFSET is nonnull then this
- is TREE_TYPE (OFFSET). */
- tree offset_type;
-
- /* The type to which OFFSET_TYPE must be widened, or OFFSET_TYPE
- itself if no widening is necessary. */
- tree widened_offset_type;
+ /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
+ tree offset;
/* Each offset element should be multiplied by this amount before
being added to the base. */
@@ -1008,6 +982,12 @@ struct gather_scatter_info {
/* The type of the vectorized offset. */
tree offset_vectype;
+
+ /* The type of the scalar elements after loading or before storing. */
+ tree element_type;
+
+ /* The type of the scalar elements being loaded or stored. */
+ tree memory_type;
};
/* Access Functions. */
@@ -1065,7 +1045,6 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
#define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count
#define STMT_VINFO_GROUP_GAP(S) (S)->gap
#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
-#define STMT_VINFO_GROUP_NUM_STMTS(S) (S)->num_stmts
#define STMT_VINFO_GROUP_FIRST_UID(S) (S)->first_uid
#define STMT_VINFO_GROUP_LAST_UID(S) (S)->last_uid
#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
@@ -1082,7 +1061,6 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
#define GROUP_STORE_COUNT(S) (S)->store_count
#define GROUP_GAP(S) (S)->gap
#define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
-#define GROUP_NUM_STMTS(S) (S)->num_stmts
#define GROUP_FIRST_UID(S) (S)->first_uid
#define GROUP_LAST_UID(S) (S)->last_uid
@@ -1583,7 +1561,7 @@ extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
int, stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern void vect_model_store_cost (stmt_vec_info, int, vect_memory_access_type,
- enum vect_def_type, slp_tree,
+ vec_load_store_type, slp_tree,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern void vect_model_load_cost (stmt_vec_info, int, vect_memory_access_type,
@@ -1596,6 +1574,7 @@ extern void vect_finish_replace_stmt (gimple *, gimple *);
extern void vect_finish_stmt_generation (gimple *, gimple *,
gimple_stmt_iterator *);
extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
+extern tree vect_get_store_rhs (gimple *);
extern tree vect_get_vec_def_for_operand_1 (gimple *, enum vect_def_type);
extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL);
extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *,
@@ -1638,15 +1617,16 @@ extern bool vect_verify_datarefs_alignment (loop_vec_info);
extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
extern bool vect_analyze_data_ref_accesses (vec_info *);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
+extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int,
+ signop, int, internal_fn *, tree *);
extern bool vect_check_gather_scatter (gimple *, loop_vec_info,
- gather_scatter_info *, bool);
+ gather_scatter_info *);
extern bool vect_analyze_data_refs (vec_info *, poly_uint64 *);
extern void vect_record_base_alignments (vec_info *);
-extern tree vect_create_data_ref_ptr (gimple *, tree, unsigned int,
- struct loop *, tree,
+extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool, bool *,
- tree = NULL_TREE);
+ tree = NULL_TREE, tree = NULL_TREE);
extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *,
tree);
extern tree vect_create_destination_var (tree, tree);
@@ -1721,7 +1701,7 @@ extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree);
extern bool is_simple_and_all_uses_invariant (gimple *, loop_vec_info);
extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode,
unsigned int * = NULL,
- machine_mode * = NULL);
+ tree * = NULL);
extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>,
unsigned int, vec<tree> &);
@@ -1730,7 +1710,7 @@ extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>,
Additional pattern recognition functions can (and will) be added
in the future. */
typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *);
-#define NUM_PATTERNS 14
+#define NUM_PATTERNS 15
void vect_pattern_recog (vec_info *);
/* In tree-vectorizer.c. */
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index f229c8983bf..b35033a4c2c 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -64,12 +64,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfgcleanup.h"
#include "stringpool.h"
#include "attribs.h"
-
-#define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
-
-/* Allocation pools for tree-vrp allocations. */
-static object_allocator<value_range> vrp_value_range_pool ("Tree VRP value ranges");
-static bitmap_obstack vrp_equiv_obstack;
+#include "vr-values.h"
/* Set of SSA names found live during the RPO traversal of the function
for still active basic-blocks. */
@@ -84,28 +79,6 @@ live_on_edge (edge e, tree name)
&& bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
}
-/* Local functions. */
-static int compare_values (tree val1, tree val2);
-static int compare_values_warnv (tree val1, tree val2, bool *);
-static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
- tree, tree, bool, bool *,
- bool *);
-
-struct assert_info
-{
- /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
- enum tree_code comp_code;
-
- /* Name to register the assert for. */
- tree name;
-
- /* Value being compared against. */
- tree val;
-
- /* Expression to compare. */
- tree expr;
-};
-
/* Location information for ASSERT_EXPRs. Each instance of this
structure describes an ASSERT_EXPR for an SSA name. Since a single
SSA name may have more than one assertion associated with it, these
@@ -145,29 +118,13 @@ static bitmap need_assert_for;
ASSERT_EXPRs for SSA name N_I should be inserted. */
static assert_locus **asserts_for;
-/* Value range array. After propagation, VR_VALUE[I] holds the range
- of values that SSA name N_I may take. */
-static unsigned num_vr_values;
-static value_range **vr_value;
-static bool values_propagated;
-
-/* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
- number of executable edges we saw the last time we visited the
- node. */
-static int *vr_phi_edge_counts;
-
-struct switch_update {
- gswitch *stmt;
- tree vec;
-};
-
-static vec<edge> to_remove_edges;
-static vec<switch_update> to_update_switch_stmts;
+vec<edge> to_remove_edges;
+vec<switch_update> to_update_switch_stmts;
/* Return the maximum value for TYPE. */
-static inline tree
+tree
vrp_val_max (const_tree type)
{
if (!INTEGRAL_TYPE_P (type))
@@ -178,7 +135,7 @@ vrp_val_max (const_tree type)
/* Return the minimum value for TYPE. */
-static inline tree
+tree
vrp_val_min (const_tree type)
{
if (!INTEGRAL_TYPE_P (type))
@@ -192,7 +149,7 @@ vrp_val_min (const_tree type)
C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
is not == to the integer constant with the same value in the type. */
-static inline bool
+bool
vrp_val_is_max (const_tree val)
{
tree type_max = vrp_val_max (TREE_TYPE (val));
@@ -203,7 +160,7 @@ vrp_val_is_max (const_tree val)
/* Return whether VAL is equal to the minimum value of its type. */
-static inline bool
+bool
vrp_val_is_min (const_tree val)
{
tree type_min = vrp_val_min (TREE_TYPE (val));
@@ -224,10 +181,9 @@ set_value_range_to_undefined (value_range *vr)
bitmap_clear (vr->equiv);
}
-
/* Set value range VR to VR_VARYING. */
-static inline void
+void
set_value_range_to_varying (value_range *vr)
{
vr->type = VR_VARYING;
@@ -236,10 +192,9 @@ set_value_range_to_varying (value_range *vr)
bitmap_clear (vr->equiv);
}
-
/* Set value range VR to {T, MIN, MAX, EQUIV}. */
-static void
+void
set_value_range (value_range *vr, enum value_range_type t, tree min,
tree max, bitmap equiv)
{
@@ -272,10 +227,13 @@ set_value_range (value_range *vr, enum value_range_type t, tree min,
vr->max = max;
/* Since updating the equivalence set involves deep copying the
- bitmaps, only do it if absolutely necessary. */
+ bitmaps, only do it if absolutely necessary.
+
+ All equivalence bitmaps are allocated from the same obstack. So
+ we can use the obstack associated with EQUIV to allocate vr->equiv. */
if (vr->equiv == NULL
&& equiv != NULL)
- vr->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
+ vr->equiv = BITMAP_ALLOC (equiv->obstack);
if (equiv != vr->equiv)
{
@@ -296,7 +254,7 @@ set_value_range (value_range *vr, enum value_range_type t, tree min,
This routine exists to ease canonicalization in the case where we
extract ranges from var + CST op limit. */
-static void
+void
set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
tree min, tree max, bitmap equiv)
{
@@ -404,7 +362,7 @@ set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
/* Copy value range FROM into value range TO. */
-static inline void
+void
copy_value_range (value_range *to, value_range *from)
{
set_value_range (to, from->type, from->min, from->max, from->equiv);
@@ -414,7 +372,7 @@ copy_value_range (value_range *to, value_range *from)
with values we get from statements, and exists to clear the
TREE_OVERFLOW flag. */
-static inline void
+void
set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
{
gcc_assert (is_gimple_min_invariant (val));
@@ -423,18 +381,9 @@ set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
set_value_range (vr, VR_RANGE, val, val, equiv);
}
-/* Set value range VR to a non-negative range of type TYPE. */
-
-static inline void
-set_value_range_to_nonnegative (value_range *vr, tree type)
-{
- tree zero = build_int_cst (type, 0);
- set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv);
-}
-
/* Set value range VR to a non-NULL range of type TYPE. */
-static inline void
+void
set_value_range_to_nonnull (value_range *vr, tree type)
{
tree zero = build_int_cst (type, 0);
@@ -444,27 +393,13 @@ set_value_range_to_nonnull (value_range *vr, tree type)
/* Set value range VR to a NULL range of type TYPE. */
-static inline void
+void
set_value_range_to_null (value_range *vr, tree type)
{
set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
}
-/* Set value range VR to a range of a truthvalue of type TYPE. */
-
-static inline void
-set_value_range_to_truthvalue (value_range *vr, tree type)
-{
- if (TYPE_PRECISION (type) == 1)
- set_value_range_to_varying (vr);
- else
- set_value_range (vr, VR_RANGE,
- build_int_cst (type, 0), build_int_cst (type, 1),
- vr->equiv);
-}
-
-
/* If abs (min) < abs (max), set VR to [-max, max], if
abs (min) >= abs (max), set VR to [-min, min]. */
@@ -500,103 +435,9 @@ abs_extent_range (value_range *vr, tree min, tree max)
set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
}
-
-/* Return value range information for VAR.
-
- If we have no values ranges recorded (ie, VRP is not running), then
- return NULL. Otherwise create an empty range if none existed for VAR. */
-
-static value_range *
-get_value_range (const_tree var)
-{
- static const value_range vr_const_varying
- = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
- value_range *vr;
- tree sym;
- unsigned ver = SSA_NAME_VERSION (var);
-
- /* If we have no recorded ranges, then return NULL. */
- if (! vr_value)
- return NULL;
-
- /* If we query the range for a new SSA name return an unmodifiable VARYING.
- We should get here at most from the substitute-and-fold stage which
- will never try to change values. */
- if (ver >= num_vr_values)
- return CONST_CAST (value_range *, &vr_const_varying);
-
- vr = vr_value[ver];
- if (vr)
- return vr;
-
- /* After propagation finished do not allocate new value-ranges. */
- if (values_propagated)
- return CONST_CAST (value_range *, &vr_const_varying);
-
- /* Create a default value range. */
- vr_value[ver] = vr = vrp_value_range_pool.allocate ();
- memset (vr, 0, sizeof (*vr));
-
- /* Defer allocating the equivalence set. */
- vr->equiv = NULL;
-
- /* If VAR is a default definition of a parameter, the variable can
- take any value in VAR's type. */
- if (SSA_NAME_IS_DEFAULT_DEF (var))
- {
- sym = SSA_NAME_VAR (var);
- if (TREE_CODE (sym) == PARM_DECL)
- {
- /* Try to use the "nonnull" attribute to create ~[0, 0]
- anti-ranges for pointers. Note that this is only valid with
- default definitions of PARM_DECLs. */
- if (POINTER_TYPE_P (TREE_TYPE (sym))
- && (nonnull_arg_p (sym)
- || get_ptr_nonnull (var)))
- set_value_range_to_nonnull (vr, TREE_TYPE (sym));
- else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
- {
- wide_int min, max;
- value_range_type rtype = get_range_info (var, &min, &max);
- if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
- set_value_range (vr, rtype,
- wide_int_to_tree (TREE_TYPE (var), min),
- wide_int_to_tree (TREE_TYPE (var), max),
- NULL);
- else
- set_value_range_to_varying (vr);
- }
- else
- set_value_range_to_varying (vr);
- }
- else if (TREE_CODE (sym) == RESULT_DECL
- && DECL_BY_REFERENCE (sym))
- set_value_range_to_nonnull (vr, TREE_TYPE (sym));
- }
-
- return vr;
-}
-
-/* Set value-ranges of all SSA names defined by STMT to varying. */
-
-static void
-set_defs_to_varying (gimple *stmt)
-{
- ssa_op_iter i;
- tree def;
- FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
- {
- value_range *vr = get_value_range (def);
- /* Avoid writing to vr_const_varying get_value_range may return. */
- if (vr->type != VR_VARYING)
- set_value_range_to_varying (vr);
- }
-}
-
-
/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
-static inline bool
+bool
vrp_operand_equal_p (const_tree val1, const_tree val2)
{
if (val1 == val2)
@@ -608,7 +449,7 @@ vrp_operand_equal_p (const_tree val1, const_tree val2)
/* Return true, if the bitmaps B1 and B2 are equal. */
-static inline bool
+bool
vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
{
return (b1 == b2
@@ -618,92 +459,9 @@ vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
&& bitmap_equal_p (b1, b2)));
}
-/* Update the value range and equivalence set for variable VAR to
- NEW_VR. Return true if NEW_VR is different from VAR's previous
- value.
-
- NOTE: This function assumes that NEW_VR is a temporary value range
- object created for the sole purpose of updating VAR's range. The
- storage used by the equivalence set from NEW_VR will be freed by
- this function. Do not call update_value_range when NEW_VR
- is the range object associated with another SSA name. */
-
-static inline bool
-update_value_range (const_tree var, value_range *new_vr)
-{
- value_range *old_vr;
- bool is_new;
-
- /* If there is a value-range on the SSA name from earlier analysis
- factor that in. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
- {
- wide_int min, max;
- value_range_type rtype = get_range_info (var, &min, &max);
- if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
- {
- tree nr_min, nr_max;
- nr_min = wide_int_to_tree (TREE_TYPE (var), min);
- nr_max = wide_int_to_tree (TREE_TYPE (var), max);
- value_range nr = VR_INITIALIZER;
- set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
- vrp_intersect_ranges (new_vr, &nr);
- }
- }
-
- /* Update the value range, if necessary. */
- old_vr = get_value_range (var);
- is_new = old_vr->type != new_vr->type
- || !vrp_operand_equal_p (old_vr->min, new_vr->min)
- || !vrp_operand_equal_p (old_vr->max, new_vr->max)
- || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
-
- if (is_new)
- {
- /* Do not allow transitions up the lattice. The following
- is slightly more awkward than just new_vr->type < old_vr->type
- because VR_RANGE and VR_ANTI_RANGE need to be considered
- the same. We may not have is_new when transitioning to
- UNDEFINED. If old_vr->type is VARYING, we shouldn't be
- called. */
- if (new_vr->type == VR_UNDEFINED)
- {
- BITMAP_FREE (new_vr->equiv);
- set_value_range_to_varying (old_vr);
- set_value_range_to_varying (new_vr);
- return true;
- }
- else
- set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
- new_vr->equiv);
- }
-
- BITMAP_FREE (new_vr->equiv);
-
- return is_new;
-}
-
-
-/* Add VAR and VAR's equivalence set to EQUIV. This is the central
- point where equivalence processing can be turned on/off. */
-
-static void
-add_equivalence (bitmap *equiv, const_tree var)
-{
- unsigned ver = SSA_NAME_VERSION (var);
- value_range *vr = get_value_range (var);
-
- if (*equiv == NULL)
- *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
- bitmap_set_bit (*equiv, ver);
- if (vr && vr->equiv)
- bitmap_ior_into (*equiv, vr->equiv);
-}
-
-
/* Return true if VR is ~[0, 0]. */
-static inline bool
+bool
range_is_nonnull (value_range *vr)
{
return vr->type == VR_ANTI_RANGE
@@ -725,7 +483,7 @@ range_is_null (value_range *vr)
/* Return true if max and min of VR are INTEGER_CST. It's not necessary
a singleton. */
-static inline bool
+bool
range_int_cst_p (value_range *vr)
{
return (vr->type == VR_RANGE
@@ -735,7 +493,7 @@ range_int_cst_p (value_range *vr)
/* Return true if VR is a INTEGER_CST singleton. */
-static inline bool
+bool
range_int_cst_singleton_p (value_range *vr)
{
return (range_int_cst_p (vr)
@@ -744,7 +502,7 @@ range_int_cst_singleton_p (value_range *vr)
/* Return true if value range VR involves at least one symbol. */
-static inline bool
+bool
symbolic_range_p (value_range *vr)
{
return (!is_gimple_min_invariant (vr->min)
@@ -755,7 +513,7 @@ symbolic_range_p (value_range *vr)
otherwise. We only handle additive operations and set NEG to true if the
symbol is negated and INV to the invariant part, if any. */
-static tree
+tree
get_single_symbol (tree t, bool *neg, tree *inv)
{
bool neg_;
@@ -824,161 +582,11 @@ build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
}
-/* Return true if value range VR involves exactly one symbol SYM. */
-
-static bool
-symbolic_range_based_on_p (value_range *vr, const_tree sym)
-{
- bool neg, min_has_symbol, max_has_symbol;
- tree inv;
-
- if (is_gimple_min_invariant (vr->min))
- min_has_symbol = false;
- else if (get_single_symbol (vr->min, &neg, &inv) == sym)
- min_has_symbol = true;
- else
- return false;
-
- if (is_gimple_min_invariant (vr->max))
- max_has_symbol = false;
- else if (get_single_symbol (vr->max, &neg, &inv) == sym)
- max_has_symbol = true;
- else
- return false;
-
- return (min_has_symbol || max_has_symbol);
-}
-
-/* Return true if the result of assignment STMT is know to be non-zero. */
-
-static bool
-gimple_assign_nonzero_p (gimple *stmt)
-{
- enum tree_code code = gimple_assign_rhs_code (stmt);
- bool strict_overflow_p;
- switch (get_gimple_rhs_class (code))
- {
- case GIMPLE_UNARY_RHS:
- return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- &strict_overflow_p);
- case GIMPLE_BINARY_RHS:
- return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- &strict_overflow_p);
- case GIMPLE_TERNARY_RHS:
- return false;
- case GIMPLE_SINGLE_RHS:
- return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
- &strict_overflow_p);
- case GIMPLE_INVALID_RHS:
- gcc_unreachable ();
- default:
- gcc_unreachable ();
- }
-}
-
-/* Return true if STMT is known to compute a non-zero value. */
-
-static bool
-gimple_stmt_nonzero_p (gimple *stmt)
-{
- switch (gimple_code (stmt))
- {
- case GIMPLE_ASSIGN:
- return gimple_assign_nonzero_p (stmt);
- case GIMPLE_CALL:
- {
- tree fndecl = gimple_call_fndecl (stmt);
- if (!fndecl) return false;
- if (flag_delete_null_pointer_checks && !flag_check_new
- && DECL_IS_OPERATOR_NEW (fndecl)
- && !TREE_NOTHROW (fndecl))
- return true;
- /* References are always non-NULL. */
- if (flag_delete_null_pointer_checks
- && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
- return true;
- if (flag_delete_null_pointer_checks &&
- lookup_attribute ("returns_nonnull",
- TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
- return true;
-
- gcall *call_stmt = as_a<gcall *> (stmt);
- unsigned rf = gimple_call_return_flags (call_stmt);
- if (rf & ERF_RETURNS_ARG)
- {
- unsigned argnum = rf & ERF_RETURN_ARG_MASK;
- if (argnum < gimple_call_num_args (call_stmt))
- {
- tree arg = gimple_call_arg (call_stmt, argnum);
- if (SSA_VAR_P (arg)
- && infer_nonnull_range_by_attribute (stmt, arg))
- return true;
- }
- }
- return gimple_alloca_call_p (stmt);
- }
- default:
- gcc_unreachable ();
- }
-}
-
-/* Like tree_expr_nonzero_p, but this function uses value ranges
- obtained so far. */
-
-static bool
-vrp_stmt_computes_nonzero (gimple *stmt)
-{
- if (gimple_stmt_nonzero_p (stmt))
- return true;
-
- /* If we have an expression of the form &X->a, then the expression
- is nonnull if X is nonnull. */
- if (is_gimple_assign (stmt)
- && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
- {
- tree expr = gimple_assign_rhs1 (stmt);
- tree base = get_base_address (TREE_OPERAND (expr, 0));
-
- if (base != NULL_TREE
- && TREE_CODE (base) == MEM_REF
- && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
- {
- value_range *vr = get_value_range (TREE_OPERAND (base, 0));
- if (range_is_nonnull (vr))
- return true;
- }
- }
-
- return false;
-}
-
-/* Returns true if EXPR is a valid value (as expected by compare_values) --
- a gimple invariant, or SSA_NAME +- CST. */
-
-static bool
-valid_value_p (tree expr)
-{
- if (TREE_CODE (expr) == SSA_NAME)
- return true;
-
- if (TREE_CODE (expr) == PLUS_EXPR
- || TREE_CODE (expr) == MINUS_EXPR)
- return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
- && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
-
- return is_gimple_min_invariant (expr);
-}
-
/* Return
1 if VAL < VAL2
0 if !(VAL < VAL2)
-2 if those are incomparable. */
-static inline int
+int
operand_less_p (tree val, tree val2)
{
/* LT is folded faster than GE and others. Inline the common case. */
@@ -1020,7 +628,7 @@ operand_less_p (tree val, tree val2)
true if the return value is only valid if we assume that signed
overflow is undefined. */
-static int
+int
compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
{
if (val1 == val2)
@@ -1174,7 +782,7 @@ compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
/* Compare values like compare_values_warnv. */
-static int
+int
compare_values (tree val1, tree val2)
{
bool sop;
@@ -1189,7 +797,7 @@ compare_values (tree val1, tree val2)
Benchmark compile/20001226-1.c compilation time after changing this
function. */
-static inline int
+int
value_inside_range (tree val, tree min, tree max)
{
int cmp1, cmp2;
@@ -1232,7 +840,7 @@ value_ranges_intersect_p (value_range *vr0, value_range *vr1)
/* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
include the value zero, -2 if we cannot tell. */
-static inline int
+int
range_includes_zero_p (tree min, tree max)
{
tree zero = build_int_cst (TREE_TYPE (min), 0);
@@ -1259,7 +867,7 @@ value_range_nonnegative_p (value_range *vr)
/* If *VR has a value rante that is a single constant value return that,
otherwise return NULL_TREE. */
-static tree
+tree
value_range_constant_singleton (value_range *vr)
{
if (vr->type == VR_RANGE
@@ -1270,355 +878,6 @@ value_range_constant_singleton (value_range *vr)
return NULL_TREE;
}
-/* If OP has a value range with a single constant value return that,
- otherwise return NULL_TREE. This returns OP itself if OP is a
- constant. */
-
-static tree
-op_with_constant_singleton_value_range (tree op)
-{
- if (is_gimple_min_invariant (op))
- return op;
-
- if (TREE_CODE (op) != SSA_NAME)
- return NULL_TREE;
-
- return value_range_constant_singleton (get_value_range (op));
-}
-
-/* Return true if op is in a boolean [0, 1] value-range. */
-
-static bool
-op_with_boolean_value_range_p (tree op)
-{
- value_range *vr;
-
- if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
- return true;
-
- if (integer_zerop (op)
- || integer_onep (op))
- return true;
-
- if (TREE_CODE (op) != SSA_NAME)
- return false;
-
- vr = get_value_range (op);
- return (vr->type == VR_RANGE
- && integer_zerop (vr->min)
- && integer_onep (vr->max));
-}
-
-/* Extract value range information for VAR when (OP COND_CODE LIMIT) is
- true and store it in *VR_P. */
-
-static void
-extract_range_for_var_from_comparison_expr (tree var, enum tree_code cond_code,
- tree op, tree limit,
- value_range *vr_p)
-{
- tree min, max, type;
- value_range *limit_vr;
- type = TREE_TYPE (var);
- gcc_assert (limit != var);
-
- /* For pointer arithmetic, we only keep track of pointer equality
- and inequality. */
- if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
- {
- set_value_range_to_varying (vr_p);
- return;
- }
-
- /* If LIMIT is another SSA name and LIMIT has a range of its own,
- try to use LIMIT's range to avoid creating symbolic ranges
- unnecessarily. */
- limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
-
- /* LIMIT's range is only interesting if it has any useful information. */
- if (! limit_vr
- || limit_vr->type == VR_UNDEFINED
- || limit_vr->type == VR_VARYING
- || (symbolic_range_p (limit_vr)
- && ! (limit_vr->type == VR_RANGE
- && (limit_vr->min == limit_vr->max
- || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
- limit_vr = NULL;
-
- /* Initially, the new range has the same set of equivalences of
- VAR's range. This will be revised before returning the final
- value. Since assertions may be chained via mutually exclusive
- predicates, we will need to trim the set of equivalences before
- we are done. */
- gcc_assert (vr_p->equiv == NULL);
- add_equivalence (&vr_p->equiv, var);
-
- /* Extract a new range based on the asserted comparison for VAR and
- LIMIT's value range. Notice that if LIMIT has an anti-range, we
- will only use it for equality comparisons (EQ_EXPR). For any
- other kind of assertion, we cannot derive a range from LIMIT's
- anti-range that can be used to describe the new range. For
- instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
- then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
- no single range for x_2 that could describe LE_EXPR, so we might
- as well build the range [b_4, +INF] for it.
- One special case we handle is extracting a range from a
- range test encoded as (unsigned)var + CST <= limit. */
- if (TREE_CODE (op) == NOP_EXPR
- || TREE_CODE (op) == PLUS_EXPR)
- {
- if (TREE_CODE (op) == PLUS_EXPR)
- {
- min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
- TREE_OPERAND (op, 1));
- max = int_const_binop (PLUS_EXPR, limit, min);
- op = TREE_OPERAND (op, 0);
- }
- else
- {
- min = build_int_cst (TREE_TYPE (var), 0);
- max = limit;
- }
-
- /* Make sure to not set TREE_OVERFLOW on the final type
- conversion. We are willingly interpreting large positive
- unsigned values as negative signed values here. */
- min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
- max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
-
- /* We can transform a max, min range to an anti-range or
- vice-versa. Use set_and_canonicalize_value_range which does
- this for us. */
- if (cond_code == LE_EXPR)
- set_and_canonicalize_value_range (vr_p, VR_RANGE,
- min, max, vr_p->equiv);
- else if (cond_code == GT_EXPR)
- set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
- min, max, vr_p->equiv);
- else
- gcc_unreachable ();
- }
- else if (cond_code == EQ_EXPR)
- {
- enum value_range_type range_type;
-
- if (limit_vr)
- {
- range_type = limit_vr->type;
- min = limit_vr->min;
- max = limit_vr->max;
- }
- else
- {
- range_type = VR_RANGE;
- min = limit;
- max = limit;
- }
-
- set_value_range (vr_p, range_type, min, max, vr_p->equiv);
-
- /* When asserting the equality VAR == LIMIT and LIMIT is another
- SSA name, the new range will also inherit the equivalence set
- from LIMIT. */
- if (TREE_CODE (limit) == SSA_NAME)
- add_equivalence (&vr_p->equiv, limit);
- }
- else if (cond_code == NE_EXPR)
- {
- /* As described above, when LIMIT's range is an anti-range and
- this assertion is an inequality (NE_EXPR), then we cannot
- derive anything from the anti-range. For instance, if
- LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
- not imply that VAR's range is [0, 0]. So, in the case of
- anti-ranges, we just assert the inequality using LIMIT and
- not its anti-range.
-
- If LIMIT_VR is a range, we can only use it to build a new
- anti-range if LIMIT_VR is a single-valued range. For
- instance, if LIMIT_VR is [0, 1], the predicate
- VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
- Rather, it means that for value 0 VAR should be ~[0, 0]
- and for value 1, VAR should be ~[1, 1]. We cannot
- represent these ranges.
-
- The only situation in which we can build a valid
- anti-range is when LIMIT_VR is a single-valued range
- (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
- build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
- if (limit_vr
- && limit_vr->type == VR_RANGE
- && compare_values (limit_vr->min, limit_vr->max) == 0)
- {
- min = limit_vr->min;
- max = limit_vr->max;
- }
- else
- {
- /* In any other case, we cannot use LIMIT's range to build a
- valid anti-range. */
- min = max = limit;
- }
-
- /* If MIN and MAX cover the whole range for their type, then
- just use the original LIMIT. */
- if (INTEGRAL_TYPE_P (type)
- && vrp_val_is_min (min)
- && vrp_val_is_max (max))
- min = max = limit;
-
- set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
- min, max, vr_p->equiv);
- }
- else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
- {
- min = TYPE_MIN_VALUE (type);
-
- if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
- max = limit;
- else
- {
- /* If LIMIT_VR is of the form [N1, N2], we need to build the
- range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
- LT_EXPR. */
- max = limit_vr->max;
- }
-
- /* If the maximum value forces us to be out of bounds, simply punt.
- It would be pointless to try and do anything more since this
- all should be optimized away above us. */
- if (cond_code == LT_EXPR
- && compare_values (max, min) == 0)
- set_value_range_to_varying (vr_p);
- else
- {
- /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
- if (cond_code == LT_EXPR)
- {
- if (TYPE_PRECISION (TREE_TYPE (max)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (max)))
- max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
- build_int_cst (TREE_TYPE (max), -1));
- else
- max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
- build_int_cst (TREE_TYPE (max), 1));
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (max))
- TREE_NO_WARNING (max) = 1;
- }
-
- set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
- }
- }
- else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
- {
- max = TYPE_MAX_VALUE (type);
-
- if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
- min = limit;
- else
- {
- /* If LIMIT_VR is of the form [N1, N2], we need to build the
- range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
- GT_EXPR. */
- min = limit_vr->min;
- }
-
- /* If the minimum value forces us to be out of bounds, simply punt.
- It would be pointless to try and do anything more since this
- all should be optimized away above us. */
- if (cond_code == GT_EXPR
- && compare_values (min, max) == 0)
- set_value_range_to_varying (vr_p);
- else
- {
- /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
- if (cond_code == GT_EXPR)
- {
- if (TYPE_PRECISION (TREE_TYPE (min)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (min)))
- min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
- build_int_cst (TREE_TYPE (min), -1));
- else
- min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
- build_int_cst (TREE_TYPE (min), 1));
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (min))
- TREE_NO_WARNING (min) = 1;
- }
-
- set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
- }
- }
- else
- gcc_unreachable ();
-
- /* Finally intersect the new range with what we already know about var. */
- vrp_intersect_ranges (vr_p, get_value_range (var));
-}
-
-/* Extract value range information from an ASSERT_EXPR EXPR and store
- it in *VR_P. */
-
-static void
-extract_range_from_assert (value_range *vr_p, tree expr)
-{
- tree var = ASSERT_EXPR_VAR (expr);
- tree cond = ASSERT_EXPR_COND (expr);
- tree limit, op;
- enum tree_code cond_code;
- gcc_assert (COMPARISON_CLASS_P (cond));
-
- /* Find VAR in the ASSERT_EXPR conditional. */
- if (var == TREE_OPERAND (cond, 0)
- || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
- || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
- {
- /* If the predicate is of the form VAR COMP LIMIT, then we just
- take LIMIT from the RHS and use the same comparison code. */
- cond_code = TREE_CODE (cond);
- limit = TREE_OPERAND (cond, 1);
- op = TREE_OPERAND (cond, 0);
- }
- else
- {
- /* If the predicate is of the form LIMIT COMP VAR, then we need
- to flip around the comparison code to create the proper range
- for VAR. */
- cond_code = swap_tree_comparison (TREE_CODE (cond));
- limit = TREE_OPERAND (cond, 0);
- op = TREE_OPERAND (cond, 1);
- }
- extract_range_for_var_from_comparison_expr (var, cond_code, op,
- limit, vr_p);
-}
-
-/* Extract range information from SSA name VAR and store it in VR. If
- VAR has an interesting range, use it. Otherwise, create the
- range [VAR, VAR] and return it. This is useful in situations where
- we may have conditionals testing values of VARYING names. For
- instance,
-
- x_3 = y_5;
- if (x_3 > y_5)
- ...
-
- Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
- always false. */
-
-static void
-extract_range_from_ssa_name (value_range *vr, tree var)
-{
- value_range *var_vr = get_value_range (var);
-
- if (var_vr->type != VR_VARYING)
- copy_value_range (vr, var_vr);
- else
- set_value_range (vr, VR_RANGE, var, var, NULL);
-
- add_equivalence (&vr->equiv, var);
-}
-
-
/* Wrapper around int_const_binop. Return true if we can compute the
result; i.e. if the operation doesn't overflow or if the overflow is
undefined. In the latter case (if the operation overflows and
@@ -1752,7 +1011,7 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
bitmask if some bit is set, it means for all numbers in the range
the bit is 1, otherwise it might be 0 or 1. */
-static bool
+bool
zero_nonzero_bits_from_vr (const tree expr_type,
value_range *vr,
wide_int *may_be_nonzero,
@@ -1942,7 +1201,7 @@ extract_range_from_multiplicative_op_1 (value_range *vr,
the ranges of each of its operands *VR0 and *VR1 with resulting
type EXPR_TYPE. The resulting range is stored in *VR. */
-static void
+void
extract_range_from_binary_expr_1 (value_range *vr,
enum tree_code code, tree expr_type,
value_range *vr0_, value_range *vr1_)
@@ -3037,105 +2296,6 @@ extract_range_from_binary_expr_1 (value_range *vr,
set_value_range (vr, type, min, max, NULL);
}
-/* Extract range information from a binary expression OP0 CODE OP1 based on
- the ranges of each of its operands with resulting type EXPR_TYPE.
- The resulting range is stored in *VR. */
-
-static void
-extract_range_from_binary_expr (value_range *vr,
- enum tree_code code,
- tree expr_type, tree op0, tree op1)
-{
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
-
- /* Get value ranges for each operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- set_value_range_to_value (&vr1, op1, NULL);
- else
- set_value_range_to_varying (&vr1);
-
- extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
-
- /* Try harder for PLUS and MINUS if the range of one operand is symbolic
- and based on the other operand, for example if it was deduced from a
- symbolic comparison. When a bound of the range of the first operand
- is invariant, we set the corresponding bound of the new range to INF
- in order to avoid recursing on the range of the second operand. */
- if (vr->type == VR_VARYING
- && (code == PLUS_EXPR || code == MINUS_EXPR)
- && TREE_CODE (op1) == SSA_NAME
- && vr0.type == VR_RANGE
- && symbolic_range_based_on_p (&vr0, op1))
- {
- const bool minus_p = (code == MINUS_EXPR);
- value_range n_vr1 = VR_INITIALIZER;
-
- /* Try with VR0 and [-INF, OP1]. */
- if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
- set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
-
- /* Try with VR0 and [OP1, +INF]. */
- else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
- set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
-
- /* Try with VR0 and [OP1, OP1]. */
- else
- set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
-
- extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
- }
-
- if (vr->type == VR_VARYING
- && (code == PLUS_EXPR || code == MINUS_EXPR)
- && TREE_CODE (op0) == SSA_NAME
- && vr1.type == VR_RANGE
- && symbolic_range_based_on_p (&vr1, op0))
- {
- const bool minus_p = (code == MINUS_EXPR);
- value_range n_vr0 = VR_INITIALIZER;
-
- /* Try with [-INF, OP0] and VR1. */
- if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
- set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
-
- /* Try with [OP0, +INF] and VR1. */
- else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
- set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
-
- /* Try with [OP0, OP0] and VR1. */
- else
- set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
-
- extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
- }
-
- /* If we didn't derive a range for MINUS_EXPR, and
- op1's range is ~[op0,op0] or vice-versa, then we
- can derive a non-null range. This happens often for
- pointer subtraction. */
- if (vr->type == VR_VARYING
- && code == MINUS_EXPR
- && TREE_CODE (op0) == SSA_NAME
- && ((vr0.type == VR_ANTI_RANGE
- && vr0.min == op1
- && vr0.min == vr0.max)
- || (vr1.type == VR_ANTI_RANGE
- && vr1.min == op0
- && vr1.min == vr1.max)))
- set_value_range_to_nonnull (vr, TREE_TYPE (op0));
-}
-
/* Extract range information from a unary operation CODE based on
the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
The resulting range is stored in *VR. */
@@ -3383,1040 +2543,11 @@ extract_range_from_unary_expr (value_range *vr,
return;
}
-
-/* Extract range information from a unary expression CODE OP0 based on
- the range of its operand with resulting type TYPE.
- The resulting range is stored in *VR. */
-
-static void
-extract_range_from_unary_expr (value_range *vr, enum tree_code code,
- tree type, tree op0)
-{
- value_range vr0 = VR_INITIALIZER;
-
- /* Get value ranges for the operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
-}
-
-
-/* Extract range information from a conditional expression STMT based on
- the ranges of each of its operands and the expression code. */
-
-static void
-extract_range_from_cond_expr (value_range *vr, gassign *stmt)
-{
- tree op0, op1;
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
-
- /* Get value ranges for each operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- op0 = gimple_assign_rhs2 (stmt);
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- op1 = gimple_assign_rhs3 (stmt);
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- set_value_range_to_value (&vr1, op1, NULL);
- else
- set_value_range_to_varying (&vr1);
-
- /* The resulting value range is the union of the operand ranges */
- copy_value_range (vr, &vr0);
- vrp_meet (vr, &vr1);
-}
-
-
-/* Extract range information from a comparison expression EXPR based
- on the range of its operand and the expression code. */
-
-static void
-extract_range_from_comparison (value_range *vr, enum tree_code code,
- tree type, tree op0, tree op1)
-{
- bool sop;
- tree val;
-
- val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
- NULL);
- if (val)
- {
- /* Since this expression was found on the RHS of an assignment,
- its type may be different from _Bool. Convert VAL to EXPR's
- type. */
- val = fold_convert (type, val);
- if (is_gimple_min_invariant (val))
- set_value_range_to_value (vr, val, vr->equiv);
- else
- set_value_range (vr, VR_RANGE, val, val, vr->equiv);
- }
- else
- /* The result of a comparison is always true or false. */
- set_value_range_to_truthvalue (vr, type);
-}
-
-/* Helper function for simplify_internal_call_using_ranges and
- extract_range_basic. Return true if OP0 SUBCODE OP1 for
- SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
- always overflow. Set *OVF to true if it is known to always
- overflow. */
-
-static bool
-check_for_binary_op_overflow (enum tree_code subcode, tree type,
- tree op0, tree op1, bool *ovf)
-{
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *get_value_range (op0);
- else if (TREE_CODE (op0) == INTEGER_CST)
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *get_value_range (op1);
- else if (TREE_CODE (op1) == INTEGER_CST)
- set_value_range_to_value (&vr1, op1, NULL);
- else
- set_value_range_to_varying (&vr1);
-
- if (!range_int_cst_p (&vr0)
- || TREE_OVERFLOW (vr0.min)
- || TREE_OVERFLOW (vr0.max))
- {
- vr0.min = vrp_val_min (TREE_TYPE (op0));
- vr0.max = vrp_val_max (TREE_TYPE (op0));
- }
- if (!range_int_cst_p (&vr1)
- || TREE_OVERFLOW (vr1.min)
- || TREE_OVERFLOW (vr1.max))
- {
- vr1.min = vrp_val_min (TREE_TYPE (op1));
- vr1.max = vrp_val_max (TREE_TYPE (op1));
- }
- *ovf = arith_overflowed_p (subcode, type, vr0.min,
- subcode == MINUS_EXPR ? vr1.max : vr1.min);
- if (arith_overflowed_p (subcode, type, vr0.max,
- subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
- return false;
- if (subcode == MULT_EXPR)
- {
- if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
- || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
- return false;
- }
- if (*ovf)
- {
- /* So far we found that there is an overflow on the boundaries.
- That doesn't prove that there is an overflow even for all values
- in between the boundaries. For that compute widest_int range
- of the result and see if it doesn't overlap the range of
- type. */
- widest_int wmin, wmax;
- widest_int w[4];
- int i;
- w[0] = wi::to_widest (vr0.min);
- w[1] = wi::to_widest (vr0.max);
- w[2] = wi::to_widest (vr1.min);
- w[3] = wi::to_widest (vr1.max);
- for (i = 0; i < 4; i++)
- {
- widest_int wt;
- switch (subcode)
- {
- case PLUS_EXPR:
- wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
- break;
- case MINUS_EXPR:
- wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
- break;
- case MULT_EXPR:
- wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
- break;
- default:
- gcc_unreachable ();
- }
- if (i == 0)
- {
- wmin = wt;
- wmax = wt;
- }
- else
- {
- wmin = wi::smin (wmin, wt);
- wmax = wi::smax (wmax, wt);
- }
- }
- /* The result of op0 CODE op1 is known to be in range
- [wmin, wmax]. */
- widest_int wtmin = wi::to_widest (vrp_val_min (type));
- widest_int wtmax = wi::to_widest (vrp_val_max (type));
- /* If all values in [wmin, wmax] are smaller than
- [wtmin, wtmax] or all are larger than [wtmin, wtmax],
- the arithmetic operation will always overflow. */
- if (wmax < wtmin || wmin > wtmax)
- return true;
- return false;
- }
- return true;
-}
-
-/* Try to derive a nonnegative or nonzero range out of STMT relying
- primarily on generic routines in fold in conjunction with range data.
- Store the result in *VR */
-
-static void
-extract_range_basic (value_range *vr, gimple *stmt)
-{
- bool sop;
- tree type = gimple_expr_type (stmt);
-
- if (is_gimple_call (stmt))
- {
- tree arg;
- int mini, maxi, zerov = 0, prec;
- enum tree_code subcode = ERROR_MARK;
- combined_fn cfn = gimple_call_combined_fn (stmt);
- scalar_int_mode mode;
-
- switch (cfn)
- {
- case CFN_BUILT_IN_CONSTANT_P:
- /* If the call is __builtin_constant_p and the argument is a
- function parameter resolve it to false. This avoids bogus
- array bound warnings.
- ??? We could do this as early as inlining is finished. */
- arg = gimple_call_arg (stmt, 0);
- if (TREE_CODE (arg) == SSA_NAME
- && SSA_NAME_IS_DEFAULT_DEF (arg)
- && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
- && cfun->after_inlining)
- {
- set_value_range_to_null (vr, type);
- return;
- }
- break;
- /* Both __builtin_ffs* and __builtin_popcount return
- [0, prec]. */
- CASE_CFN_FFS:
- CASE_CFN_POPCOUNT:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec;
- if (TREE_CODE (arg) == SSA_NAME)
- {
- value_range *vr0 = get_value_range (arg);
- /* If arg is non-zero, then ffs or popcount
- are non-zero. */
- if ((vr0->type == VR_RANGE
- && range_includes_zero_p (vr0->min, vr0->max) == 0)
- || (vr0->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr0->min, vr0->max) == 1))
- mini = 1;
- /* If some high bits are known to be zero,
- we can decrease the maximum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->max) == INTEGER_CST
- && !operand_less_p (vr0->min,
- build_zero_cst (TREE_TYPE (vr0->min))))
- maxi = tree_floor_log2 (vr0->max) + 1;
- }
- goto bitop_builtin;
- /* __builtin_parity* returns [0, 1]. */
- CASE_CFN_PARITY:
- mini = 0;
- maxi = 1;
- goto bitop_builtin;
- /* __builtin_c[lt]z* return [0, prec-1], except for
- when the argument is 0, but that is undefined behavior.
- On many targets where the CLZ RTL or optab value is defined
- for 0 the value is prec, so include that in the range
- by default. */
- CASE_CFN_CLZ:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec;
- mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
- if (optab_handler (clz_optab, mode) != CODE_FOR_nothing
- && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov)
- /* Handle only the single common value. */
- && zerov != prec)
- /* Magic value to give up, unless vr0 proves
- arg is non-zero. */
- mini = -2;
- if (TREE_CODE (arg) == SSA_NAME)
- {
- value_range *vr0 = get_value_range (arg);
- /* From clz of VR_RANGE minimum we can compute
- result maximum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->min) == INTEGER_CST)
- {
- maxi = prec - 1 - tree_floor_log2 (vr0->min);
- if (maxi != prec)
- mini = 0;
- }
- else if (vr0->type == VR_ANTI_RANGE
- && integer_zerop (vr0->min))
- {
- maxi = prec - 1;
- mini = 0;
- }
- if (mini == -2)
- break;
- /* From clz of VR_RANGE maximum we can compute
- result minimum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->max) == INTEGER_CST)
- {
- mini = prec - 1 - tree_floor_log2 (vr0->max);
- if (mini == prec)
- break;
- }
- }
- if (mini == -2)
- break;
- goto bitop_builtin;
- /* __builtin_ctz* return [0, prec-1], except for
- when the argument is 0, but that is undefined behavior.
- If there is a ctz optab for this mode and
- CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
- otherwise just assume 0 won't be seen. */
- CASE_CFN_CTZ:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec - 1;
- mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
- if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing
- && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov))
- {
- /* Handle only the two common values. */
- if (zerov == -1)
- mini = -1;
- else if (zerov == prec)
- maxi = prec;
- else
- /* Magic value to give up, unless vr0 proves
- arg is non-zero. */
- mini = -2;
- }
- if (TREE_CODE (arg) == SSA_NAME)
- {
- value_range *vr0 = get_value_range (arg);
- /* If arg is non-zero, then use [0, prec - 1]. */
- if ((vr0->type == VR_RANGE
- && integer_nonzerop (vr0->min))
- || (vr0->type == VR_ANTI_RANGE
- && integer_zerop (vr0->min)))
- {
- mini = 0;
- maxi = prec - 1;
- }
- /* If some high bits are known to be zero,
- we can decrease the result maximum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->max) == INTEGER_CST)
- {
- maxi = tree_floor_log2 (vr0->max);
- /* For vr0 [0, 0] give up. */
- if (maxi == -1)
- break;
- }
- }
- if (mini == -2)
- break;
- goto bitop_builtin;
- /* __builtin_clrsb* returns [0, prec-1]. */
- CASE_CFN_CLRSB:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec - 1;
- goto bitop_builtin;
- bitop_builtin:
- set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
- build_int_cst (type, maxi), NULL);
- return;
- case CFN_UBSAN_CHECK_ADD:
- subcode = PLUS_EXPR;
- break;
- case CFN_UBSAN_CHECK_SUB:
- subcode = MINUS_EXPR;
- break;
- case CFN_UBSAN_CHECK_MUL:
- subcode = MULT_EXPR;
- break;
- case CFN_GOACC_DIM_SIZE:
- case CFN_GOACC_DIM_POS:
- /* Optimizing these two internal functions helps the loop
- optimizer eliminate outer comparisons. Size is [1,N]
- and pos is [0,N-1]. */
- {
- bool is_pos = cfn == CFN_GOACC_DIM_POS;
- int axis = oacc_get_ifn_dim_arg (stmt);
- int size = oacc_get_fn_dim_size (current_function_decl, axis);
-
- if (!size)
- /* If it's dynamic, the backend might know a hardware
- limitation. */
- size = targetm.goacc.dim_limit (axis);
-
- tree type = TREE_TYPE (gimple_call_lhs (stmt));
- set_value_range (vr, VR_RANGE,
- build_int_cst (type, is_pos ? 0 : 1),
- size ? build_int_cst (type, size - is_pos)
- : vrp_val_max (type), NULL);
- }
- return;
- case CFN_BUILT_IN_STRLEN:
- if (tree lhs = gimple_call_lhs (stmt))
- if (ptrdiff_type_node
- && (TYPE_PRECISION (ptrdiff_type_node)
- == TYPE_PRECISION (TREE_TYPE (lhs))))
- {
- tree type = TREE_TYPE (lhs);
- tree max = vrp_val_max (ptrdiff_type_node);
- wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
- tree range_min = build_zero_cst (type);
- tree range_max = wide_int_to_tree (type, wmax - 1);
- set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
- return;
- }
- break;
- default:
- break;
- }
- if (subcode != ERROR_MARK)
- {
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, we'll complain, but will actually do
- wrapping operation. */
- flag_wrapv = 1;
- extract_range_from_binary_expr (vr, subcode, type,
- gimple_call_arg (stmt, 0),
- gimple_call_arg (stmt, 1));
- flag_wrapv = saved_flag_wrapv;
-
- /* If for both arguments vrp_valueize returned non-NULL,
- this should have been already folded and if not, it
- wasn't folded because of overflow. Avoid removing the
- UBSAN_CHECK_* calls in that case. */
- if (vr->type == VR_RANGE
- && (vr->min == vr->max
- || operand_equal_p (vr->min, vr->max, 0)))
- set_value_range_to_varying (vr);
- return;
- }
- }
- /* Handle extraction of the two results (result of arithmetics and
- a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
- internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
- else if (is_gimple_assign (stmt)
- && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
- || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
- && INTEGRAL_TYPE_P (type))
- {
- enum tree_code code = gimple_assign_rhs_code (stmt);
- tree op = gimple_assign_rhs1 (stmt);
- if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
- {
- gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
- if (is_gimple_call (g) && gimple_call_internal_p (g))
- {
- enum tree_code subcode = ERROR_MARK;
- switch (gimple_call_internal_fn (g))
- {
- case IFN_ADD_OVERFLOW:
- subcode = PLUS_EXPR;
- break;
- case IFN_SUB_OVERFLOW:
- subcode = MINUS_EXPR;
- break;
- case IFN_MUL_OVERFLOW:
- subcode = MULT_EXPR;
- break;
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- if (code == IMAGPART_EXPR)
- {
- /* This is the boolean return value whether compare and
- exchange changed anything or not. */
- set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
- build_int_cst (type, 1), NULL);
- return;
- }
- break;
- default:
- break;
- }
- if (subcode != ERROR_MARK)
- {
- tree op0 = gimple_call_arg (g, 0);
- tree op1 = gimple_call_arg (g, 1);
- if (code == IMAGPART_EXPR)
- {
- bool ovf = false;
- if (check_for_binary_op_overflow (subcode, type,
- op0, op1, &ovf))
- set_value_range_to_value (vr,
- build_int_cst (type, ovf),
- NULL);
- else if (TYPE_PRECISION (type) == 1
- && !TYPE_UNSIGNED (type))
- set_value_range_to_varying (vr);
- else
- set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
- build_int_cst (type, 1), NULL);
- }
- else if (types_compatible_p (type, TREE_TYPE (op0))
- && types_compatible_p (type, TREE_TYPE (op1)))
- {
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, IMAGPART_EXPR will be set. */
- flag_wrapv = 1;
- extract_range_from_binary_expr (vr, subcode, type,
- op0, op1);
- flag_wrapv = saved_flag_wrapv;
- }
- else
- {
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, IMAGPART_EXPR will be set. */
- flag_wrapv = 1;
- extract_range_from_unary_expr (&vr0, NOP_EXPR,
- type, op0);
- extract_range_from_unary_expr (&vr1, NOP_EXPR,
- type, op1);
- extract_range_from_binary_expr_1 (vr, subcode, type,
- &vr0, &vr1);
- flag_wrapv = saved_flag_wrapv;
- }
- return;
- }
- }
- }
- }
- if (INTEGRAL_TYPE_P (type)
- && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
- set_value_range_to_nonnegative (vr, type);
- else if (vrp_stmt_computes_nonzero (stmt))
- set_value_range_to_nonnull (vr, type);
- else
- set_value_range_to_varying (vr);
-}
-
-
-/* Try to compute a useful range out of assignment STMT and store it
- in *VR. */
-
-static void
-extract_range_from_assignment (value_range *vr, gassign *stmt)
-{
- enum tree_code code = gimple_assign_rhs_code (stmt);
-
- if (code == ASSERT_EXPR)
- extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
- else if (code == SSA_NAME)
- extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
- else if (TREE_CODE_CLASS (code) == tcc_binary)
- extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (TREE_CODE_CLASS (code) == tcc_unary)
- extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt));
- else if (code == COND_EXPR)
- extract_range_from_cond_expr (vr, stmt);
- else if (TREE_CODE_CLASS (code) == tcc_comparison)
- extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
- && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
- set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
- else
- set_value_range_to_varying (vr);
-
- if (vr->type == VR_VARYING)
- extract_range_basic (vr, stmt);
-}
-
-/* Given a range VR, a LOOP and a variable VAR, determine whether it
- would be profitable to adjust VR using scalar evolution information
- for VAR. If so, update VR with the new limits. */
-
-static void
-adjust_range_with_scev (value_range *vr, struct loop *loop,
- gimple *stmt, tree var)
-{
- tree init, step, chrec, tmin, tmax, min, max, type, tem;
- enum ev_direction dir;
-
- /* TODO. Don't adjust anti-ranges. An anti-range may provide
- better opportunities than a regular range, but I'm not sure. */
- if (vr->type == VR_ANTI_RANGE)
- return;
-
- chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
-
- /* Like in PR19590, scev can return a constant function. */
- if (is_gimple_min_invariant (chrec))
- {
- set_value_range_to_value (vr, chrec, vr->equiv);
- return;
- }
-
- if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
- return;
-
- init = initial_condition_in_loop_num (chrec, loop->num);
- tem = op_with_constant_singleton_value_range (init);
- if (tem)
- init = tem;
- step = evolution_part_in_loop_num (chrec, loop->num);
- tem = op_with_constant_singleton_value_range (step);
- if (tem)
- step = tem;
-
- /* If STEP is symbolic, we can't know whether INIT will be the
- minimum or maximum value in the range. Also, unless INIT is
- a simple expression, compare_values and possibly other functions
- in tree-vrp won't be able to handle it. */
- if (step == NULL_TREE
- || !is_gimple_min_invariant (step)
- || !valid_value_p (init))
- return;
-
- dir = scev_direction (chrec);
- if (/* Do not adjust ranges if we do not know whether the iv increases
- or decreases, ... */
- dir == EV_DIR_UNKNOWN
- /* ... or if it may wrap. */
- || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
- get_chrec_loop (chrec), true))
- return;
-
- type = TREE_TYPE (var);
- if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
- tmin = lower_bound_in_type (type, type);
- else
- tmin = TYPE_MIN_VALUE (type);
- if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
- tmax = upper_bound_in_type (type, type);
- else
- tmax = TYPE_MAX_VALUE (type);
-
- /* Try to use estimated number of iterations for the loop to constrain the
- final value in the evolution. */
- if (TREE_CODE (step) == INTEGER_CST
- && is_gimple_val (init)
- && (TREE_CODE (init) != SSA_NAME
- || get_value_range (init)->type == VR_RANGE))
- {
- widest_int nit;
-
- /* We are only entering here for loop header PHI nodes, so using
- the number of latch executions is the correct thing to use. */
- if (max_loop_iterations (loop, &nit))
- {
- value_range maxvr = VR_INITIALIZER;
- signop sgn = TYPE_SIGN (TREE_TYPE (step));
- bool overflow;
-
- widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
- &overflow);
- /* If the multiplication overflowed we can't do a meaningful
- adjustment. Likewise if the result doesn't fit in the type
- of the induction variable. For a signed type we have to
- check whether the result has the expected signedness which
- is that of the step as number of iterations is unsigned. */
- if (!overflow
- && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
- && (sgn == UNSIGNED
- || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
- {
- tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
- extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
- TREE_TYPE (init), init, tem);
- /* Likewise if the addition did. */
- if (maxvr.type == VR_RANGE)
- {
- value_range initvr = VR_INITIALIZER;
-
- if (TREE_CODE (init) == SSA_NAME)
- initvr = *(get_value_range (init));
- else if (is_gimple_min_invariant (init))
- set_value_range_to_value (&initvr, init, NULL);
- else
- return;
-
- /* Check if init + nit * step overflows. Though we checked
- scev {init, step}_loop doesn't wrap, it is not enough
- because the loop may exit immediately. Overflow could
- happen in the plus expression in this case. */
- if ((dir == EV_DIR_DECREASES
- && compare_values (maxvr.min, initvr.min) != -1)
- || (dir == EV_DIR_GROWS
- && compare_values (maxvr.max, initvr.max) != 1))
- return;
-
- tmin = maxvr.min;
- tmax = maxvr.max;
- }
- }
- }
- }
-
- if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
- {
- min = tmin;
- max = tmax;
-
- /* For VARYING or UNDEFINED ranges, just about anything we get
- from scalar evolutions should be better. */
-
- if (dir == EV_DIR_DECREASES)
- max = init;
- else
- min = init;
- }
- else if (vr->type == VR_RANGE)
- {
- min = vr->min;
- max = vr->max;
-
- if (dir == EV_DIR_DECREASES)
- {
- /* INIT is the maximum value. If INIT is lower than VR->MAX
- but no smaller than VR->MIN, set VR->MAX to INIT. */
- if (compare_values (init, max) == -1)
- max = init;
-
- /* According to the loop information, the variable does not
- overflow. */
- if (compare_values (min, tmin) == -1)
- min = tmin;
-
- }
- else
- {
- /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
- if (compare_values (init, min) == 1)
- min = init;
-
- if (compare_values (tmax, max) == -1)
- max = tmax;
- }
- }
- else
- return;
-
- /* If we just created an invalid range with the minimum
- greater than the maximum, we fail conservatively.
- This should happen only in unreachable
- parts of code, or for invalid programs. */
- if (compare_values (min, max) == 1)
- return;
-
- /* Even for valid range info, sometimes overflow flag will leak in.
- As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
- drop them. */
- if (TREE_OVERFLOW_P (min))
- min = drop_tree_overflow (min);
- if (TREE_OVERFLOW_P (max))
- max = drop_tree_overflow (max);
-
- set_value_range (vr, VR_RANGE, min, max, vr->equiv);
-}
-
-
-/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
-
- - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
- all the values in the ranges.
-
- - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
-
- - Return NULL_TREE if it is not always possible to determine the
- value of the comparison.
-
- Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
- assumed signed overflow is undefined. */
-
-
-static tree
-compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
- bool *strict_overflow_p)
-{
- /* VARYING or UNDEFINED ranges cannot be compared. */
- if (vr0->type == VR_VARYING
- || vr0->type == VR_UNDEFINED
- || vr1->type == VR_VARYING
- || vr1->type == VR_UNDEFINED)
- return NULL_TREE;
-
- /* Anti-ranges need to be handled separately. */
- if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
- {
- /* If both are anti-ranges, then we cannot compute any
- comparison. */
- if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
- return NULL_TREE;
-
- /* These comparisons are never statically computable. */
- if (comp == GT_EXPR
- || comp == GE_EXPR
- || comp == LT_EXPR
- || comp == LE_EXPR)
- return NULL_TREE;
-
- /* Equality can be computed only between a range and an
- anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
- if (vr0->type == VR_RANGE)
- {
- /* To simplify processing, make VR0 the anti-range. */
- value_range *tmp = vr0;
- vr0 = vr1;
- vr1 = tmp;
- }
-
- gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
-
- if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
- && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
- return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
-
- return NULL_TREE;
- }
-
- /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
- operands around and change the comparison code. */
- if (comp == GT_EXPR || comp == GE_EXPR)
- {
- comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
- std::swap (vr0, vr1);
- }
-
- if (comp == EQ_EXPR)
- {
- /* Equality may only be computed if both ranges represent
- exactly one value. */
- if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
- && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
- {
- int cmp_min = compare_values_warnv (vr0->min, vr1->min,
- strict_overflow_p);
- int cmp_max = compare_values_warnv (vr0->max, vr1->max,
- strict_overflow_p);
- if (cmp_min == 0 && cmp_max == 0)
- return boolean_true_node;
- else if (cmp_min != -2 && cmp_max != -2)
- return boolean_false_node;
- }
- /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
- else if (compare_values_warnv (vr0->min, vr1->max,
- strict_overflow_p) == 1
- || compare_values_warnv (vr1->min, vr0->max,
- strict_overflow_p) == 1)
- return boolean_false_node;
-
- return NULL_TREE;
- }
- else if (comp == NE_EXPR)
- {
- int cmp1, cmp2;
-
- /* If VR0 is completely to the left or completely to the right
- of VR1, they are always different. Notice that we need to
- make sure that both comparisons yield similar results to
- avoid comparing values that cannot be compared at
- compile-time. */
- cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
- cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
- if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
- return boolean_true_node;
-
- /* If VR0 and VR1 represent a single value and are identical,
- return false. */
- else if (compare_values_warnv (vr0->min, vr0->max,
- strict_overflow_p) == 0
- && compare_values_warnv (vr1->min, vr1->max,
- strict_overflow_p) == 0
- && compare_values_warnv (vr0->min, vr1->min,
- strict_overflow_p) == 0
- && compare_values_warnv (vr0->max, vr1->max,
- strict_overflow_p) == 0)
- return boolean_false_node;
-
- /* Otherwise, they may or may not be different. */
- else
- return NULL_TREE;
- }
- else if (comp == LT_EXPR || comp == LE_EXPR)
- {
- int tst;
-
- /* If VR0 is to the left of VR1, return true. */
- tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
- if ((comp == LT_EXPR && tst == -1)
- || (comp == LE_EXPR && (tst == -1 || tst == 0)))
- return boolean_true_node;
-
- /* If VR0 is to the right of VR1, return false. */
- tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
- if ((comp == LT_EXPR && (tst == 0 || tst == 1))
- || (comp == LE_EXPR && tst == 1))
- return boolean_false_node;
-
- /* Otherwise, we don't know. */
- return NULL_TREE;
- }
-
- gcc_unreachable ();
-}
-
-
-/* Given a value range VR, a value VAL and a comparison code COMP, return
- BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
- values in VR. Return BOOLEAN_FALSE_NODE if the comparison
- always returns false. Return NULL_TREE if it is not always
- possible to determine the value of the comparison. Also set
- *STRICT_OVERFLOW_P to indicate whether comparision evaluation
- assumed signed overflow is undefined. */
-
-static tree
-compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
- bool *strict_overflow_p)
-{
- if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
- return NULL_TREE;
-
- /* Anti-ranges need to be handled separately. */
- if (vr->type == VR_ANTI_RANGE)
- {
- /* For anti-ranges, the only predicates that we can compute at
- compile time are equality and inequality. */
- if (comp == GT_EXPR
- || comp == GE_EXPR
- || comp == LT_EXPR
- || comp == LE_EXPR)
- return NULL_TREE;
-
- /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
- if (value_inside_range (val, vr->min, vr->max) == 1)
- return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
-
- return NULL_TREE;
- }
-
- if (comp == EQ_EXPR)
- {
- /* EQ_EXPR may only be computed if VR represents exactly
- one value. */
- if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
- {
- int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
- if (cmp == 0)
- return boolean_true_node;
- else if (cmp == -1 || cmp == 1 || cmp == 2)
- return boolean_false_node;
- }
- else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
- || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
- return boolean_false_node;
-
- return NULL_TREE;
- }
- else if (comp == NE_EXPR)
- {
- /* If VAL is not inside VR, then they are always different. */
- if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
- || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
- return boolean_true_node;
-
- /* If VR represents exactly one value equal to VAL, then return
- false. */
- if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
- && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
- return boolean_false_node;
-
- /* Otherwise, they may or may not be different. */
- return NULL_TREE;
- }
- else if (comp == LT_EXPR || comp == LE_EXPR)
- {
- int tst;
-
- /* If VR is to the left of VAL, return true. */
- tst = compare_values_warnv (vr->max, val, strict_overflow_p);
- if ((comp == LT_EXPR && tst == -1)
- || (comp == LE_EXPR && (tst == -1 || tst == 0)))
- return boolean_true_node;
-
- /* If VR is to the right of VAL, return false. */
- tst = compare_values_warnv (vr->min, val, strict_overflow_p);
- if ((comp == LT_EXPR && (tst == 0 || tst == 1))
- || (comp == LE_EXPR && tst == 1))
- return boolean_false_node;
-
- /* Otherwise, we don't know. */
- return NULL_TREE;
- }
- else if (comp == GT_EXPR || comp == GE_EXPR)
- {
- int tst;
-
- /* If VR is to the right of VAL, return true. */
- tst = compare_values_warnv (vr->min, val, strict_overflow_p);
- if ((comp == GT_EXPR && tst == 1)
- || (comp == GE_EXPR && (tst == 0 || tst == 1)))
- return boolean_true_node;
-
- /* If VR is to the left of VAL, return false. */
- tst = compare_values_warnv (vr->max, val, strict_overflow_p);
- if ((comp == GT_EXPR && (tst == -1 || tst == 0))
- || (comp == GE_EXPR && tst == -1))
- return boolean_false_node;
-
- /* Otherwise, we don't know. */
- return NULL_TREE;
- }
-
- gcc_unreachable ();
-}
-
-
/* Debugging dumps. */
void dump_value_range (FILE *, const value_range *);
void debug_value_range (value_range *);
void dump_all_value_ranges (FILE *);
-void debug_all_value_ranges (void);
void dump_vr_equiv (FILE *, bitmap);
void debug_vr_equiv (bitmap);
@@ -4487,37 +2618,6 @@ debug_value_range (value_range *vr)
}
-/* Dump value ranges of all SSA_NAMEs to FILE. */
-
-void
-dump_all_value_ranges (FILE *file)
-{
- size_t i;
-
- for (i = 0; i < num_vr_values; i++)
- {
- if (vr_value[i])
- {
- print_generic_expr (file, ssa_name (i));
- fprintf (file, ": ");
- dump_value_range (file, vr_value[i]);
- fprintf (file, "\n");
- }
- }
-
- fprintf (file, "\n");
-}
-
-
-/* Dump all value ranges to stderr. */
-
-DEBUG_FUNCTION void
-debug_all_value_ranges (void)
-{
- dump_all_value_ranges (stderr);
-}
-
-
/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
create a new SSA name N and return the assertion assignment
'N = ASSERT_EXPR <V, V OP W>'. */
@@ -4565,7 +2665,7 @@ fp_predicate (gimple *stmt)
describes the inferred range. Return true if a range could be
inferred. */
-static bool
+bool
infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
{
*val_p = NULL_TREE;
@@ -5000,7 +3100,7 @@ overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
{ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
the alternate range representation is often useful within VRP. */
-static bool
+bool
overflow_comparison_p (tree_code code, tree name, tree val,
bool use_equiv_p, tree *new_cst)
{
@@ -5737,7 +3837,7 @@ is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
the condition COND contributing to the conditional jump pointed to by
SI. */
-static void
+void
register_edge_assert_for (tree name, edge e,
enum tree_code cond_code, tree cond_op0,
tree cond_op1, vec<assert_info> &asserts)
@@ -6649,6 +4749,34 @@ insert_range_assertions (void)
BITMAP_FREE (need_assert_for);
}
+class vrp_prop : public ssa_propagation_engine
+{
+ public:
+ enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
+ enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
+
+ void vrp_initialize (void);
+ void vrp_finalize (bool);
+ void check_all_array_refs (void);
+ void check_array_ref (location_t, tree, bool);
+ void search_for_addr_array (tree, location_t);
+
+ class vr_values vr_values;
+ /* Temporary delegator to minimize code churn. */
+ value_range *get_value_range (const_tree op)
+ { return vr_values.get_value_range (op); }
+ void set_defs_to_varying (gimple *stmt)
+ { return vr_values.set_defs_to_varying (stmt); }
+ void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
+ tree *output_p, value_range *vr)
+ { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
+ bool update_value_range (const_tree op, value_range *vr)
+ { return vr_values.update_value_range (op, vr); }
+ void extract_range_basic (value_range *vr, gimple *stmt)
+ { vr_values.extract_range_basic (vr, stmt); }
+ void extract_range_from_phi_node (gphi *phi, value_range *vr)
+ { vr_values.extract_range_from_phi_node (phi, vr); }
+};
/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
and "struct" hacks. If VRP can determine that the
array subscript is a constant, check if it is outside valid
@@ -6656,8 +4784,9 @@ insert_range_assertions (void)
non-overlapping with valid range.
IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
-static void
-check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
+void
+vrp_prop::check_array_ref (location_t location, tree ref,
+ bool ignore_off_by_one)
{
value_range *vr = NULL;
tree low_sub, up_sub;
@@ -6749,8 +4878,8 @@ check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
/* Searches if the expr T, located at LOCATION computes
address of an ARRAY_REF, and call check_array_ref on it. */
-static void
-search_for_addr_array (tree t, location_t location)
+void
+vrp_prop::search_for_addr_array (tree t, location_t location)
{
/* Check each ARRAY_REFs in the reference chain. */
do
@@ -6837,12 +4966,13 @@ check_array_bounds (tree *tp, int *walk_subtree, void *data)
*walk_subtree = TRUE;
+ vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
if (TREE_CODE (t) == ARRAY_REF)
- check_array_ref (location, t, false /*ignore_off_by_one*/);
+ vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
else if (TREE_CODE (t) == ADDR_EXPR)
{
- search_for_addr_array (t, location);
+ vrp_prop->search_for_addr_array (t, location);
*walk_subtree = FALSE;
}
@@ -6852,8 +4982,8 @@ check_array_bounds (tree *tp, int *walk_subtree, void *data)
/* Walk over all statements of all reachable BBs and call check_array_bounds
on them. */
-static void
-check_all_array_refs (void)
+void
+vrp_prop::check_all_array_refs ()
{
basic_block bb;
gimple_stmt_iterator si;
@@ -6880,6 +5010,8 @@ check_all_array_refs (void)
memset (&wi, 0, sizeof (wi));
+ wi.info = this;
+
walk_gimple_op (gsi_stmt (si),
check_array_bounds,
&wi);
@@ -7079,10 +5211,9 @@ remove_range_assertions (void)
}
}
-
/* Return true if STMT is interesting for VRP. */
-static bool
+bool
stmt_interesting_for_vrp (gimple *stmt)
{
if (gimple_code (stmt) == GIMPLE_PHI)
@@ -7128,22 +5259,10 @@ stmt_interesting_for_vrp (gimple *stmt)
return false;
}
-/* Initialize VRP lattice. */
-
-static void
-vrp_initialize_lattice ()
-{
- values_propagated = false;
- num_vr_values = num_ssa_names;
- vr_value = XCNEWVEC (value_range *, num_vr_values);
- vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
- bitmap_obstack_initialize (&vrp_equiv_obstack);
-}
-
/* Initialization required by ssa_propagate engine. */
-static void
-vrp_initialize ()
+void
+vrp_prop::vrp_initialize ()
{
basic_block bb;
@@ -7184,595 +5303,6 @@ vrp_initialize ()
}
}
-/* Return the singleton value-range for NAME or NAME. */
-
-static inline tree
-vrp_valueize (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- value_range *vr = get_value_range (name);
- if (vr->type == VR_RANGE
- && (TREE_CODE (vr->min) == SSA_NAME
- || is_gimple_min_invariant (vr->min))
- && vrp_operand_equal_p (vr->min, vr->max))
- return vr->min;
- }
- return name;
-}
-
-/* Return the singleton value-range for NAME if that is a constant
- but signal to not follow SSA edges. */
-
-static inline tree
-vrp_valueize_1 (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- /* If the definition may be simulated again we cannot follow
- this SSA edge as the SSA propagator does not necessarily
- re-visit the use. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- if (!gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt))
- return NULL_TREE;
- value_range *vr = get_value_range (name);
- if (range_int_cst_singleton_p (vr))
- return vr->min;
- }
- return name;
-}
-
-/* Visit assignment STMT. If it produces an interesting range, record
- the range in VR and set LHS to OUTPUT_P. */
-
-static void
-vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr)
-{
- tree lhs;
- enum gimple_code code = gimple_code (stmt);
- lhs = gimple_get_lhs (stmt);
- *output_p = NULL_TREE;
-
- /* We only keep track of ranges in integral and pointer types. */
- if (TREE_CODE (lhs) == SSA_NAME
- && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- /* It is valid to have NULL MIN/MAX values on a type. See
- build_range_type. */
- && TYPE_MIN_VALUE (TREE_TYPE (lhs))
- && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
- || POINTER_TYPE_P (TREE_TYPE (lhs))))
- {
- *output_p = lhs;
-
- /* Try folding the statement to a constant first. */
- tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
- vrp_valueize_1);
- if (tem)
- {
- if (TREE_CODE (tem) == SSA_NAME
- && (SSA_NAME_IS_DEFAULT_DEF (tem)
- || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
- {
- extract_range_from_ssa_name (vr, tem);
- return;
- }
- else if (is_gimple_min_invariant (tem))
- {
- set_value_range_to_value (vr, tem, NULL);
- return;
- }
- }
- /* Then dispatch to value-range extracting functions. */
- if (code == GIMPLE_CALL)
- extract_range_basic (vr, stmt);
- else
- extract_range_from_assignment (vr, as_a <gassign *> (stmt));
- }
-}
-
-/* Helper that gets the value range of the SSA_NAME with version I
- or a symbolic range containing the SSA_NAME only if the value range
- is varying or undefined. */
-
-static inline value_range
-get_vr_for_comparison (int i)
-{
- value_range vr = *get_value_range (ssa_name (i));
-
- /* If name N_i does not have a valid range, use N_i as its own
- range. This allows us to compare against names that may
- have N_i in their ranges. */
- if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
- {
- vr.type = VR_RANGE;
- vr.min = ssa_name (i);
- vr.max = ssa_name (i);
- }
-
- return vr;
-}
-
-/* Compare all the value ranges for names equivalent to VAR with VAL
- using comparison code COMP. Return the same value returned by
- compare_range_with_value, including the setting of
- *STRICT_OVERFLOW_P. */
-
-static tree
-compare_name_with_value (enum tree_code comp, tree var, tree val,
- bool *strict_overflow_p, bool use_equiv_p)
-{
- bitmap_iterator bi;
- unsigned i;
- bitmap e;
- tree retval, t;
- int used_strict_overflow;
- bool sop;
- value_range equiv_vr;
-
- /* Get the set of equivalences for VAR. */
- e = get_value_range (var)->equiv;
-
- /* Start at -1. Set it to 0 if we do a comparison without relying
- on overflow, or 1 if all comparisons rely on overflow. */
- used_strict_overflow = -1;
-
- /* Compare vars' value range with val. */
- equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
- sop = false;
- retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
- if (retval)
- used_strict_overflow = sop ? 1 : 0;
-
- /* If the equiv set is empty we have done all work we need to do. */
- if (e == NULL)
- {
- if (retval
- && used_strict_overflow > 0)
- *strict_overflow_p = true;
- return retval;
- }
-
- EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
- {
- tree name = ssa_name (i);
- if (! name)
- continue;
-
- if (! use_equiv_p
- && ! SSA_NAME_IS_DEFAULT_DEF (name)
- && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
- continue;
-
- equiv_vr = get_vr_for_comparison (i);
- sop = false;
- t = compare_range_with_value (comp, &equiv_vr, val, &sop);
- if (t)
- {
- /* If we get different answers from different members
- of the equivalence set this check must be in a dead
- code region. Folding it to a trap representation
- would be correct here. For now just return don't-know. */
- if (retval != NULL
- && t != retval)
- {
- retval = NULL_TREE;
- break;
- }
- retval = t;
-
- if (!sop)
- used_strict_overflow = 0;
- else if (used_strict_overflow < 0)
- used_strict_overflow = 1;
- }
- }
-
- if (retval
- && used_strict_overflow > 0)
- *strict_overflow_p = true;
-
- return retval;
-}
-
-
-/* Given a comparison code COMP and names N1 and N2, compare all the
- ranges equivalent to N1 against all the ranges equivalent to N2
- to determine the value of N1 COMP N2. Return the same value
- returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
- whether we relied on undefined signed overflow in the comparison. */
-
-
-static tree
-compare_names (enum tree_code comp, tree n1, tree n2,
- bool *strict_overflow_p)
-{
- tree t, retval;
- bitmap e1, e2;
- bitmap_iterator bi1, bi2;
- unsigned i1, i2;
- int used_strict_overflow;
- static bitmap_obstack *s_obstack = NULL;
- static bitmap s_e1 = NULL, s_e2 = NULL;
-
- /* Compare the ranges of every name equivalent to N1 against the
- ranges of every name equivalent to N2. */
- e1 = get_value_range (n1)->equiv;
- e2 = get_value_range (n2)->equiv;
-
- /* Use the fake bitmaps if e1 or e2 are not available. */
- if (s_obstack == NULL)
- {
- s_obstack = XNEW (bitmap_obstack);
- bitmap_obstack_initialize (s_obstack);
- s_e1 = BITMAP_ALLOC (s_obstack);
- s_e2 = BITMAP_ALLOC (s_obstack);
- }
- if (e1 == NULL)
- e1 = s_e1;
- if (e2 == NULL)
- e2 = s_e2;
-
- /* Add N1 and N2 to their own set of equivalences to avoid
- duplicating the body of the loop just to check N1 and N2
- ranges. */
- bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
-
- /* If the equivalence sets have a common intersection, then the two
- names can be compared without checking their ranges. */
- if (bitmap_intersect_p (e1, e2))
- {
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
-
- return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
- ? boolean_true_node
- : boolean_false_node;
- }
-
- /* Start at -1. Set it to 0 if we do a comparison without relying
- on overflow, or 1 if all comparisons rely on overflow. */
- used_strict_overflow = -1;
-
- /* Otherwise, compare all the equivalent ranges. First, add N1 and
- N2 to their own set of equivalences to avoid duplicating the body
- of the loop just to check N1 and N2 ranges. */
- EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
- {
- if (! ssa_name (i1))
- continue;
-
- value_range vr1 = get_vr_for_comparison (i1);
-
- t = retval = NULL_TREE;
- EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
- {
- if (! ssa_name (i2))
- continue;
-
- bool sop = false;
-
- value_range vr2 = get_vr_for_comparison (i2);
-
- t = compare_ranges (comp, &vr1, &vr2, &sop);
- if (t)
- {
- /* If we get different answers from different members
- of the equivalence set this check must be in a dead
- code region. Folding it to a trap representation
- would be correct here. For now just return don't-know. */
- if (retval != NULL
- && t != retval)
- {
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
- return NULL_TREE;
- }
- retval = t;
-
- if (!sop)
- used_strict_overflow = 0;
- else if (used_strict_overflow < 0)
- used_strict_overflow = 1;
- }
- }
-
- if (retval)
- {
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
- if (used_strict_overflow > 0)
- *strict_overflow_p = true;
- return retval;
- }
- }
-
- /* None of the equivalent ranges are useful in computing this
- comparison. */
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
- return NULL_TREE;
-}
-
-/* Helper function for vrp_evaluate_conditional_warnv & other
- optimizers. */
-
-static tree
-vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
- tree op0, tree op1,
- bool * strict_overflow_p)
-{
- value_range *vr0, *vr1;
-
- vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
- vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
-
- tree res = NULL_TREE;
- if (vr0 && vr1)
- res = compare_ranges (code, vr0, vr1, strict_overflow_p);
- if (!res && vr0)
- res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
- if (!res && vr1)
- res = (compare_range_with_value
- (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
- return res;
-}
-
-/* Helper function for vrp_evaluate_conditional_warnv. */
-
-static tree
-vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
- tree op1, bool use_equiv_p,
- bool *strict_overflow_p, bool *only_ranges)
-{
- tree ret;
- if (only_ranges)
- *only_ranges = true;
-
- /* We only deal with integral and pointer types. */
- if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && !POINTER_TYPE_P (TREE_TYPE (op0)))
- return NULL_TREE;
-
- /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
- as a simple equality test, then prefer that over its current form
- for evaluation.
-
- An overflow test which collapses to an equality test can always be
- expressed as a comparison of one argument against zero. Overflow
- occurs when the chosen argument is zero and does not occur if the
- chosen argument is not zero. */
- tree x;
- if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
- {
- wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
- /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
- B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
- B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
- B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
- if (integer_zerop (x))
- {
- op1 = x;
- code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
- }
- /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
- B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
- B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
- B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
- else if (wi::to_wide (x) == max - 1)
- {
- op0 = op1;
- op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
- code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
- }
- }
-
- if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (code, op0, op1, strict_overflow_p)))
- return ret;
- if (only_ranges)
- *only_ranges = false;
- /* Do not use compare_names during propagation, it's quadratic. */
- if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
- && use_equiv_p)
- return compare_names (code, op0, op1, strict_overflow_p);
- else if (TREE_CODE (op0) == SSA_NAME)
- return compare_name_with_value (code, op0, op1,
- strict_overflow_p, use_equiv_p);
- else if (TREE_CODE (op1) == SSA_NAME)
- return compare_name_with_value (swap_tree_comparison (code), op1, op0,
- strict_overflow_p, use_equiv_p);
- return NULL_TREE;
-}
-
-/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
- information. Return NULL if the conditional can not be evaluated.
- The ranges of all the names equivalent with the operands in COND
- will be used when trying to compute the value. If the result is
- based on undefined signed overflow, issue a warning if
- appropriate. */
-
-static tree
-vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
-{
- bool sop;
- tree ret;
- bool only_ranges;
-
- /* Some passes and foldings leak constants with overflow flag set
- into the IL. Avoid doing wrong things with these and bail out. */
- if ((TREE_CODE (op0) == INTEGER_CST
- && TREE_OVERFLOW (op0))
- || (TREE_CODE (op1) == INTEGER_CST
- && TREE_OVERFLOW (op1)))
- return NULL_TREE;
-
- sop = false;
- ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
- &only_ranges);
-
- if (ret && sop)
- {
- enum warn_strict_overflow_code wc;
- const char* warnmsg;
-
- if (is_gimple_min_invariant (ret))
- {
- wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
- warnmsg = G_("assuming signed overflow does not occur when "
- "simplifying conditional to constant");
- }
- else
- {
- wc = WARN_STRICT_OVERFLOW_COMPARISON;
- warnmsg = G_("assuming signed overflow does not occur when "
- "simplifying conditional");
- }
-
- if (issue_strict_overflow_warning (wc))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
- }
- }
-
- if (warn_type_limits
- && ret && only_ranges
- && TREE_CODE_CLASS (code) == tcc_comparison
- && TREE_CODE (op0) == SSA_NAME)
- {
- /* If the comparison is being folded and the operand on the LHS
- is being compared against a constant value that is outside of
- the natural range of OP0's type, then the predicate will
- always fold regardless of the value of OP0. If -Wtype-limits
- was specified, emit a warning. */
- tree type = TREE_TYPE (op0);
- value_range *vr0 = get_value_range (op0);
-
- if (vr0->type == VR_RANGE
- && INTEGRAL_TYPE_P (type)
- && vrp_val_is_min (vr0->min)
- && vrp_val_is_max (vr0->max)
- && is_gimple_min_invariant (op1))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
-
- warning_at (location, OPT_Wtype_limits,
- integer_zerop (ret)
- ? G_("comparison always false "
- "due to limited range of data type")
- : G_("comparison always true "
- "due to limited range of data type"));
- }
- }
-
- return ret;
-}
-
-
-/* Visit conditional statement STMT. If we can determine which edge
- will be taken out of STMT's basic block, record it in
- *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
-
-static void
-vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
-{
- tree val;
-
- *taken_edge_p = NULL;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- tree use;
- ssa_op_iter i;
-
- fprintf (dump_file, "\nVisiting conditional with predicate: ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\nWith known ranges\n");
-
- FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
- {
- fprintf (dump_file, "\t");
- print_generic_expr (dump_file, use);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
- }
-
- fprintf (dump_file, "\n");
- }
-
- /* Compute the value of the predicate COND by checking the known
- ranges of each of its operands.
-
- Note that we cannot evaluate all the equivalent ranges here
- because those ranges may not yet be final and with the current
- propagation strategy, we cannot determine when the value ranges
- of the names in the equivalence set have changed.
-
- For instance, given the following code fragment
-
- i_5 = PHI <8, i_13>
- ...
- i_14 = ASSERT_EXPR <i_5, i_5 != 0>
- if (i_14 == 1)
- ...
-
- Assume that on the first visit to i_14, i_5 has the temporary
- range [8, 8] because the second argument to the PHI function is
- not yet executable. We derive the range ~[0, 0] for i_14 and the
- equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
- the first time, since i_14 is equivalent to the range [8, 8], we
- determine that the predicate is always false.
-
- On the next round of propagation, i_13 is determined to be
- VARYING, which causes i_5 to drop down to VARYING. So, another
- visit to i_14 is scheduled. In this second visit, we compute the
- exact same range and equivalence set for i_14, namely ~[0, 0] and
- { i_5 }. But we did not have the previous range for i_5
- registered, so vrp_visit_assignment thinks that the range for
- i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
- is not visited again, which stops propagation from visiting
- statements in the THEN clause of that if().
-
- To properly fix this we would need to keep the previous range
- value for the names in the equivalence set. This way we would've
- discovered that from one visit to the other i_5 changed from
- range [8, 8] to VR_VARYING.
-
- However, fixing this apparent limitation may not be worth the
- additional checking. Testing on several code bases (GCC, DLV,
- MICO, TRAMP3D and SPEC2000) showed that doing this results in
- 4 more predicates folded in SPEC. */
-
- bool sop;
- val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
- gimple_cond_lhs (stmt),
- gimple_cond_rhs (stmt),
- false, &sop, NULL);
- if (val)
- *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nPredicate evaluates to: ");
- if (val == NULL_TREE)
- fprintf (dump_file, "DON'T KNOW\n");
- else
- print_generic_stmt (dump_file, val);
- }
-}
-
/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
that includes the value VAL. The search is restricted to the range
[START_IDX, n - 1] where n is the size of VEC.
@@ -7786,7 +5316,7 @@ vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
returned. */
-static bool
+bool
find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
{
size_t n = gimple_switch_num_labels (stmt);
@@ -7836,7 +5366,7 @@ find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
then MAX_IDX < MIN_IDX.
Returns true if the default label is not needed. */
-static bool
+bool
find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
size_t *max_idx)
{
@@ -7887,208 +5417,6 @@ find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
}
}
-/* Searches the case label vector VEC for the ranges of CASE_LABELs that are
- used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
- MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
- Returns true if the default label is not needed. */
-
-static bool
-find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
- size_t *max_idx1, size_t *min_idx2,
- size_t *max_idx2)
-{
- size_t i, j, k, l;
- unsigned int n = gimple_switch_num_labels (stmt);
- bool take_default;
- tree case_low, case_high;
- tree min = vr->min, max = vr->max;
-
- gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
-
- take_default = !find_case_label_range (stmt, min, max, &i, &j);
-
- /* Set second range to emtpy. */
- *min_idx2 = 1;
- *max_idx2 = 0;
-
- if (vr->type == VR_RANGE)
- {
- *min_idx1 = i;
- *max_idx1 = j;
- return !take_default;
- }
-
- /* Set first range to all case labels. */
- *min_idx1 = 1;
- *max_idx1 = n - 1;
-
- if (i > j)
- return false;
-
- /* Make sure all the values of case labels [i , j] are contained in
- range [MIN, MAX]. */
- case_low = CASE_LOW (gimple_switch_label (stmt, i));
- case_high = CASE_HIGH (gimple_switch_label (stmt, j));
- if (tree_int_cst_compare (case_low, min) < 0)
- i += 1;
- if (case_high != NULL_TREE
- && tree_int_cst_compare (max, case_high) < 0)
- j -= 1;
-
- if (i > j)
- return false;
-
- /* If the range spans case labels [i, j], the corresponding anti-range spans
- the labels [1, i - 1] and [j + 1, n - 1]. */
- k = j + 1;
- l = n - 1;
- if (k > l)
- {
- k = 1;
- l = 0;
- }
-
- j = i - 1;
- i = 1;
- if (i > j)
- {
- i = k;
- j = l;
- k = 1;
- l = 0;
- }
-
- *min_idx1 = i;
- *max_idx1 = j;
- *min_idx2 = k;
- *max_idx2 = l;
- return false;
-}
-
-/* Visit switch statement STMT. If we can determine which edge
- will be taken out of STMT's basic block, record it in
- *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
-
-static void
-vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
-{
- tree op, val;
- value_range *vr;
- size_t i = 0, j = 0, k, l;
- bool take_default;
-
- *taken_edge_p = NULL;
- op = gimple_switch_index (stmt);
- if (TREE_CODE (op) != SSA_NAME)
- return;
-
- vr = get_value_range (op);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting switch expression with operand ");
- print_generic_expr (dump_file, op);
- fprintf (dump_file, " with known range ");
- dump_value_range (dump_file, vr);
- fprintf (dump_file, "\n");
- }
-
- if ((vr->type != VR_RANGE
- && vr->type != VR_ANTI_RANGE)
- || symbolic_range_p (vr))
- return;
-
- /* Find the single edge that is taken from the switch expression. */
- take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
-
- /* Check if the range spans no CASE_LABEL. If so, we only reach the default
- label */
- if (j < i)
- {
- gcc_assert (take_default);
- val = gimple_switch_default_label (stmt);
- }
- else
- {
- /* Check if labels with index i to j and maybe the default label
- are all reaching the same label. */
-
- val = gimple_switch_label (stmt, i);
- if (take_default
- && CASE_LABEL (gimple_switch_default_label (stmt))
- != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- for (++i; i <= j; ++i)
- {
- if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- }
- for (; k <= l; ++k)
- {
- if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- }
- }
-
- *taken_edge_p = find_edge (gimple_bb (stmt),
- label_to_block (CASE_LABEL (val)));
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, " will take edge to ");
- print_generic_stmt (dump_file, CASE_LABEL (val));
- }
-}
-
-
-/* Evaluate statement STMT. If the statement produces a useful range,
- set VR and corepsponding OUTPUT_P.
-
- If STMT is a conditional branch and we can determine its truth
- value, the taken edge is recorded in *TAKEN_EDGE_P. */
-
-static void
-extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
- tree *output_p, value_range *vr)
-{
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting statement:\n");
- print_gimple_stmt (dump_file, stmt, 0, dump_flags);
- }
-
- if (!stmt_interesting_for_vrp (stmt))
- gcc_assert (stmt_ends_bb_p (stmt));
- else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
- vrp_visit_assignment_or_call (stmt, output_p, vr);
- else if (gimple_code (stmt) == GIMPLE_COND)
- vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
- else if (gimple_code (stmt) == GIMPLE_SWITCH)
- vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
-}
-
-class vrp_prop : public ssa_propagation_engine
-{
- public:
- enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
- enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
-};
-
/* Evaluate statement STMT. If the statement produces a useful range,
return SSA_PROP_INTERESTING and record the SSA name with the
interesting range into *OUTPUT_P.
@@ -8847,7 +6175,9 @@ vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
bitmap_ior_into (vr0->equiv, vr1->equiv);
else if (vr1->equiv && !vr0->equiv)
{
- vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
+ /* All equivalence bitmaps are allocated from the same obstack. So
+ we can use the obstack associated with VR to allocate vr0->equiv. */
+ vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
bitmap_copy (vr0->equiv, vr1->equiv);
}
}
@@ -8972,222 +6302,6 @@ vrp_meet (value_range *vr0, const value_range *vr1)
/* Visit all arguments for PHI node PHI that flow through executable
edges. If a valid value range can be derived from all the incoming
- value ranges, set a new range in VR_RESULT. */
-
-static void
-extract_range_from_phi_node (gphi *phi, value_range *vr_result)
-{
- size_t i;
- tree lhs = PHI_RESULT (phi);
- value_range *lhs_vr = get_value_range (lhs);
- bool first = true;
- int edges, old_edges;
- struct loop *l;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting PHI node: ");
- print_gimple_stmt (dump_file, phi, 0, dump_flags);
- }
-
- bool may_simulate_backedge_again = false;
- edges = 0;
- for (i = 0; i < gimple_phi_num_args (phi); i++)
- {
- edge e = gimple_phi_arg_edge (phi, i);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file,
- " Argument #%d (%d -> %d %sexecutable)\n",
- (int) i, e->src->index, e->dest->index,
- (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
- }
-
- if (e->flags & EDGE_EXECUTABLE)
- {
- tree arg = PHI_ARG_DEF (phi, i);
- value_range vr_arg;
-
- ++edges;
-
- if (TREE_CODE (arg) == SSA_NAME)
- {
- /* See if we are eventually going to change one of the args. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
- if (! gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt)
- && e->flags & EDGE_DFS_BACK)
- may_simulate_backedge_again = true;
-
- vr_arg = *(get_value_range (arg));
- /* Do not allow equivalences or symbolic ranges to leak in from
- backedges. That creates invalid equivalencies.
- See PR53465 and PR54767. */
- if (e->flags & EDGE_DFS_BACK)
- {
- if (vr_arg.type == VR_RANGE
- || vr_arg.type == VR_ANTI_RANGE)
- {
- vr_arg.equiv = NULL;
- if (symbolic_range_p (&vr_arg))
- {
- vr_arg.type = VR_VARYING;
- vr_arg.min = NULL_TREE;
- vr_arg.max = NULL_TREE;
- }
- }
- }
- else
- {
- /* If the non-backedge arguments range is VR_VARYING then
- we can still try recording a simple equivalence. */
- if (vr_arg.type == VR_VARYING)
- {
- vr_arg.type = VR_RANGE;
- vr_arg.min = arg;
- vr_arg.max = arg;
- vr_arg.equiv = NULL;
- }
- }
- }
- else
- {
- if (TREE_OVERFLOW_P (arg))
- arg = drop_tree_overflow (arg);
-
- vr_arg.type = VR_RANGE;
- vr_arg.min = arg;
- vr_arg.max = arg;
- vr_arg.equiv = NULL;
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\t");
- print_generic_expr (dump_file, arg, dump_flags);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, &vr_arg);
- fprintf (dump_file, "\n");
- }
-
- if (first)
- copy_value_range (vr_result, &vr_arg);
- else
- vrp_meet (vr_result, &vr_arg);
- first = false;
-
- if (vr_result->type == VR_VARYING)
- break;
- }
- }
-
- if (vr_result->type == VR_VARYING)
- goto varying;
- else if (vr_result->type == VR_UNDEFINED)
- goto update_range;
-
- old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
- vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
-
- /* To prevent infinite iterations in the algorithm, derive ranges
- when the new value is slightly bigger or smaller than the
- previous one. We don't do this if we have seen a new executable
- edge; this helps us avoid an infinity for conditionals
- which are not in a loop. If the old value-range was VR_UNDEFINED
- use the updated range and iterate one more time. If we will not
- simulate this PHI again via the backedge allow us to iterate. */
- if (edges > 0
- && gimple_phi_num_args (phi) > 1
- && edges == old_edges
- && lhs_vr->type != VR_UNDEFINED
- && may_simulate_backedge_again)
- {
- /* Compare old and new ranges, fall back to varying if the
- values are not comparable. */
- int cmp_min = compare_values (lhs_vr->min, vr_result->min);
- if (cmp_min == -2)
- goto varying;
- int cmp_max = compare_values (lhs_vr->max, vr_result->max);
- if (cmp_max == -2)
- goto varying;
-
- /* For non VR_RANGE or for pointers fall back to varying if
- the range changed. */
- if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && (cmp_min != 0 || cmp_max != 0))
- goto varying;
-
- /* If the new minimum is larger than the previous one
- retain the old value. If the new minimum value is smaller
- than the previous one and not -INF go all the way to -INF + 1.
- In the first case, to avoid infinite bouncing between different
- minimums, and in the other case to avoid iterating millions of
- times to reach -INF. Going to -INF + 1 also lets the following
- iteration compute whether there will be any overflow, at the
- expense of one additional iteration. */
- if (cmp_min < 0)
- vr_result->min = lhs_vr->min;
- else if (cmp_min > 0
- && !vrp_val_is_min (vr_result->min))
- vr_result->min
- = int_const_binop (PLUS_EXPR,
- vrp_val_min (TREE_TYPE (vr_result->min)),
- build_int_cst (TREE_TYPE (vr_result->min), 1));
-
- /* Similarly for the maximum value. */
- if (cmp_max > 0)
- vr_result->max = lhs_vr->max;
- else if (cmp_max < 0
- && !vrp_val_is_max (vr_result->max))
- vr_result->max
- = int_const_binop (MINUS_EXPR,
- vrp_val_max (TREE_TYPE (vr_result->min)),
- build_int_cst (TREE_TYPE (vr_result->min), 1));
-
- /* If we dropped either bound to +-INF then if this is a loop
- PHI node SCEV may known more about its value-range. */
- if (cmp_min > 0 || cmp_min < 0
- || cmp_max < 0 || cmp_max > 0)
- goto scev_check;
-
- goto infinite_check;
- }
-
- goto update_range;
-
-varying:
- set_value_range_to_varying (vr_result);
-
-scev_check:
- /* If this is a loop PHI node SCEV may known more about its value-range.
- scev_check can be reached from two paths, one is a fall through from above
- "varying" label, the other is direct goto from code block which tries to
- avoid infinite simulation. */
- if ((l = loop_containing_stmt (phi))
- && l->header == gimple_bb (phi))
- adjust_range_with_scev (vr_result, l, phi, lhs);
-
-infinite_check:
- /* If we will end up with a (-INF, +INF) range, set it to
- VARYING. Same if the previous max value was invalid for
- the type and we end up with vr_result.min > vr_result.max. */
- if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
- && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
- || compare_values (vr_result->min, vr_result->max) > 0))
- ;
- else
- set_value_range_to_varying (vr_result);
-
- /* If the new range is different than the previous value, keep
- iterating. */
-update_range:
- return;
-}
-
-/* Visit all arguments for PHI node PHI that flow through executable
- edges. If a valid value range can be derived from all the incoming
value ranges, set a new range for the LHS of PHI. */
enum ssa_prop_result
@@ -9217,1259 +6331,31 @@ vrp_prop::visit_phi (gphi *phi)
return SSA_PROP_NOT_INTERESTING;
}
-/* Simplify boolean operations if the source is known
- to be already a boolean. */
-static bool
-simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
- tree lhs, op0, op1;
- bool need_conversion;
-
- /* We handle only !=/== case here. */
- gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
-
- op0 = gimple_assign_rhs1 (stmt);
- if (!op_with_boolean_value_range_p (op0))
- return false;
-
- op1 = gimple_assign_rhs2 (stmt);
- if (!op_with_boolean_value_range_p (op1))
- return false;
-
- /* Reduce number of cases to handle to NE_EXPR. As there is no
- BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
- if (rhs_code == EQ_EXPR)
- {
- if (TREE_CODE (op1) == INTEGER_CST)
- op1 = int_const_binop (BIT_XOR_EXPR, op1,
- build_int_cst (TREE_TYPE (op1), 1));
- else
- return false;
- }
-
- lhs = gimple_assign_lhs (stmt);
- need_conversion
- = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
-
- /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
- if (need_conversion
- && !TYPE_UNSIGNED (TREE_TYPE (op0))
- && TYPE_PRECISION (TREE_TYPE (op0)) == 1
- && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
- return false;
-
- /* For A != 0 we can substitute A itself. */
- if (integer_zerop (op1))
- gimple_assign_set_rhs_with_ops (gsi,
- need_conversion
- ? NOP_EXPR : TREE_CODE (op0), op0);
- /* For A != B we substitute A ^ B. Either with conversion. */
- else if (need_conversion)
- {
- tree tem = make_ssa_name (TREE_TYPE (op0));
- gassign *newop
- = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
- gsi_insert_before (gsi, newop, GSI_SAME_STMT);
- if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
- && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
- set_range_info (tem, VR_RANGE,
- wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
- wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
- gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
- }
- /* Or without. */
- else
- gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
- update_stmt (gsi_stmt (*gsi));
- fold_stmt (gsi, follow_single_use_edges);
-
- return true;
-}
-
-/* Simplify a division or modulo operator to a right shift or bitwise and
- if the first operand is unsigned or is greater than zero and the second
- operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
- constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
- optimize it into just op0 if op0's range is known to be a subset of
- [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
- modulo. */
-
-static bool
-simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
- tree val = NULL;
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- tree op0min = NULL_TREE, op0max = NULL_TREE;
- tree op1min = op1;
- value_range *vr = NULL;
-
- if (TREE_CODE (op0) == INTEGER_CST)
- {
- op0min = op0;
- op0max = op0;
- }
- else
- {
- vr = get_value_range (op0);
- if (range_int_cst_p (vr))
- {
- op0min = vr->min;
- op0max = vr->max;
- }
- }
-
- if (rhs_code == TRUNC_MOD_EXPR
- && TREE_CODE (op1) == SSA_NAME)
- {
- value_range *vr1 = get_value_range (op1);
- if (range_int_cst_p (vr1))
- op1min = vr1->min;
- }
- if (rhs_code == TRUNC_MOD_EXPR
- && TREE_CODE (op1min) == INTEGER_CST
- && tree_int_cst_sgn (op1min) == 1
- && op0max
- && tree_int_cst_lt (op0max, op1min))
- {
- if (TYPE_UNSIGNED (TREE_TYPE (op0))
- || tree_int_cst_sgn (op0min) >= 0
- || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
- op0min))
- {
- /* If op0 already has the range op0 % op1 has,
- then TRUNC_MOD_EXPR won't change anything. */
- gimple_assign_set_rhs_from_tree (gsi, op0);
- return true;
- }
- }
-
- if (TREE_CODE (op0) != SSA_NAME)
- return false;
-
- if (!integer_pow2p (op1))
- {
- /* X % -Y can be only optimized into X % Y either if
- X is not INT_MIN, or Y is not -1. Fold it now, as after
- remove_range_assertions the range info might be not available
- anymore. */
- if (rhs_code == TRUNC_MOD_EXPR
- && fold_stmt (gsi, follow_single_use_edges))
- return true;
- return false;
- }
-
- if (TYPE_UNSIGNED (TREE_TYPE (op0)))
- val = integer_one_node;
- else
- {
- bool sop = false;
-
- val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
-
- if (val
- && sop
- && integer_onep (val)
- && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow,
- "assuming signed overflow does not occur when "
- "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
- }
- }
-
- if (val && integer_onep (val))
- {
- tree t;
-
- if (rhs_code == TRUNC_DIV_EXPR)
- {
- t = build_int_cst (integer_type_node, tree_log2 (op1));
- gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
- gimple_assign_set_rhs1 (stmt, op0);
- gimple_assign_set_rhs2 (stmt, t);
- }
- else
- {
- t = build_int_cst (TREE_TYPE (op1), 1);
- t = int_const_binop (MINUS_EXPR, op1, t);
- t = fold_convert (TREE_TYPE (op0), t);
-
- gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
- gimple_assign_set_rhs1 (stmt, op0);
- gimple_assign_set_rhs2 (stmt, t);
- }
-
- update_stmt (stmt);
- fold_stmt (gsi, follow_single_use_edges);
- return true;
- }
-
- return false;
-}
-
-/* Simplify a min or max if the ranges of the two operands are
- disjoint. Return true if we do simplify. */
-
-static bool
-simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- bool sop = false;
- tree val;
-
- val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (LE_EXPR, op0, op1, &sop));
- if (!val)
- {
- sop = false;
- val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (LT_EXPR, op0, op1, &sop));
- }
-
- if (val)
- {
- if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow,
- "assuming signed overflow does not occur when "
- "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
- }
-
- /* VAL == TRUE -> OP0 < or <= op1
- VAL == FALSE -> OP0 > or >= op1. */
- tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
- == integer_zerop (val)) ? op0 : op1;
- gimple_assign_set_rhs_from_tree (gsi, res);
- return true;
- }
-
- return false;
-}
-
-/* If the operand to an ABS_EXPR is >= 0, then eliminate the
- ABS_EXPR. If the operand is <= 0, then simplify the
- ABS_EXPR into a NEGATE_EXPR. */
-
-static bool
-simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- tree op = gimple_assign_rhs1 (stmt);
- value_range *vr = get_value_range (op);
-
- if (vr)
- {
- tree val = NULL;
- bool sop = false;
-
- val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
- if (!val)
- {
- /* The range is neither <= 0 nor > 0. Now see if it is
- either < 0 or >= 0. */
- sop = false;
- val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
- &sop);
- }
-
- if (val)
- {
- if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow,
- "assuming signed overflow does not occur when "
- "simplifying %<abs (X)%> to %<X%> or %<-X%>");
- }
-
- gimple_assign_set_rhs1 (stmt, op);
- if (integer_zerop (val))
- gimple_assign_set_rhs_code (stmt, SSA_NAME);
- else
- gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
- update_stmt (stmt);
- fold_stmt (gsi, follow_single_use_edges);
- return true;
- }
- }
-
- return false;
-}
-
-/* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
- If all the bits that are being cleared by & are already
- known to be zero from VR, or all the bits that are being
- set by | are already known to be one from VR, the bit
- operation is redundant. */
-
-static bool
-simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- tree op = NULL_TREE;
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
- wide_int may_be_nonzero0, may_be_nonzero1;
- wide_int must_be_nonzero0, must_be_nonzero1;
- wide_int mask;
-
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- return false;
-
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- set_value_range_to_value (&vr1, op1, NULL);
- else
- return false;
-
- if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
- &must_be_nonzero0))
- return false;
- if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
- &must_be_nonzero1))
- return false;
-
- switch (gimple_assign_rhs_code (stmt))
- {
- case BIT_AND_EXPR:
- mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
- if (mask == 0)
- {
- op = op0;
- break;
- }
- mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
- if (mask == 0)
- {
- op = op1;
- break;
- }
- break;
- case BIT_IOR_EXPR:
- mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
- if (mask == 0)
- {
- op = op1;
- break;
- }
- mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
- if (mask == 0)
- {
- op = op0;
- break;
- }
- break;
- default:
- gcc_unreachable ();
- }
-
- if (op == NULL_TREE)
- return false;
-
- gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
- update_stmt (gsi_stmt (*gsi));
- return true;
-}
-
-/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
- a known value range VR.
-
- If there is one and only one value which will satisfy the
- conditional, then return that value. Else return NULL.
-
- If signed overflow must be undefined for the value to satisfy
- the conditional, then set *STRICT_OVERFLOW_P to true. */
-
-static tree
-test_for_singularity (enum tree_code cond_code, tree op0,
- tree op1, value_range *vr)
-{
- tree min = NULL;
- tree max = NULL;
-
- /* Extract minimum/maximum values which satisfy the conditional as it was
- written. */
- if (cond_code == LE_EXPR || cond_code == LT_EXPR)
- {
- min = TYPE_MIN_VALUE (TREE_TYPE (op0));
-
- max = op1;
- if (cond_code == LT_EXPR)
- {
- tree one = build_int_cst (TREE_TYPE (op0), 1);
- max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (max))
- TREE_NO_WARNING (max) = 1;
- }
- }
- else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
- {
- max = TYPE_MAX_VALUE (TREE_TYPE (op0));
-
- min = op1;
- if (cond_code == GT_EXPR)
- {
- tree one = build_int_cst (TREE_TYPE (op0), 1);
- min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (min))
- TREE_NO_WARNING (min) = 1;
- }
- }
-
- /* Now refine the minimum and maximum values using any
- value range information we have for op0. */
- if (min && max)
- {
- if (compare_values (vr->min, min) == 1)
- min = vr->min;
- if (compare_values (vr->max, max) == -1)
- max = vr->max;
-
- /* If the new min/max values have converged to a single value,
- then there is only one value which can satisfy the condition,
- return that value. */
- if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
- return min;
- }
- return NULL;
-}
-
-/* Return whether the value range *VR fits in an integer type specified
- by PRECISION and UNSIGNED_P. */
-
-static bool
-range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
-{
- tree src_type;
- unsigned src_precision;
- widest_int tem;
- signop src_sgn;
-
- /* We can only handle integral and pointer types. */
- src_type = TREE_TYPE (vr->min);
- if (!INTEGRAL_TYPE_P (src_type)
- && !POINTER_TYPE_P (src_type))
- return false;
-
- /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
- and so is an identity transform. */
- src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
- src_sgn = TYPE_SIGN (src_type);
- if ((src_precision < dest_precision
- && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
- || (src_precision == dest_precision && src_sgn == dest_sgn))
- return true;
-
- /* Now we can only handle ranges with constant bounds. */
- if (vr->type != VR_RANGE
- || TREE_CODE (vr->min) != INTEGER_CST
- || TREE_CODE (vr->max) != INTEGER_CST)
- return false;
-
- /* For sign changes, the MSB of the wide_int has to be clear.
- An unsigned value with its MSB set cannot be represented by
- a signed wide_int, while a negative value cannot be represented
- by an unsigned wide_int. */
- if (src_sgn != dest_sgn
- && (wi::lts_p (wi::to_wide (vr->min), 0)
- || wi::lts_p (wi::to_wide (vr->max), 0)))
- return false;
-
- /* Then we can perform the conversion on both ends and compare
- the result for equality. */
- tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
- if (tem != wi::to_widest (vr->min))
- return false;
- tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
- if (tem != wi::to_widest (vr->max))
- return false;
-
- return true;
-}
-
-/* Simplify a conditional using a relational operator to an equality
- test if the range information indicates only one value can satisfy
- the original conditional. */
-
-static bool
-simplify_cond_using_ranges_1 (gcond *stmt)
-{
- tree op0 = gimple_cond_lhs (stmt);
- tree op1 = gimple_cond_rhs (stmt);
- enum tree_code cond_code = gimple_cond_code (stmt);
-
- if (cond_code != NE_EXPR
- && cond_code != EQ_EXPR
- && TREE_CODE (op0) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && is_gimple_min_invariant (op1))
- {
- value_range *vr = get_value_range (op0);
-
- /* If we have range information for OP0, then we might be
- able to simplify this conditional. */
- if (vr->type == VR_RANGE)
- {
- tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
- if (new_tree)
- {
- if (dump_file)
- {
- fprintf (dump_file, "Simplified relational ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, " into ");
- }
-
- gimple_cond_set_code (stmt, EQ_EXPR);
- gimple_cond_set_lhs (stmt, op0);
- gimple_cond_set_rhs (stmt, new_tree);
-
- update_stmt (stmt);
-
- if (dump_file)
- {
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\n");
- }
-
- return true;
- }
-
- /* Try again after inverting the condition. We only deal
- with integral types here, so no need to worry about
- issues with inverting FP comparisons. */
- new_tree = test_for_singularity
- (invert_tree_comparison (cond_code, false),
- op0, op1, vr);
- if (new_tree)
- {
- if (dump_file)
- {
- fprintf (dump_file, "Simplified relational ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, " into ");
- }
-
- gimple_cond_set_code (stmt, NE_EXPR);
- gimple_cond_set_lhs (stmt, op0);
- gimple_cond_set_rhs (stmt, new_tree);
-
- update_stmt (stmt);
-
- if (dump_file)
- {
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\n");
- }
-
- return true;
- }
- }
- }
- return false;
-}
-
-/* STMT is a conditional at the end of a basic block.
-
- If the conditional is of the form SSA_NAME op constant and the SSA_NAME
- was set via a type conversion, try to replace the SSA_NAME with the RHS
- of the type conversion. Doing so makes the conversion dead which helps
- subsequent passes. */
-
-static void
-simplify_cond_using_ranges_2 (gcond *stmt)
-{
- tree op0 = gimple_cond_lhs (stmt);
- tree op1 = gimple_cond_rhs (stmt);
-
- /* If we have a comparison of an SSA_NAME (OP0) against a constant,
- see if OP0 was set by a type conversion where the source of
- the conversion is another SSA_NAME with a range that fits
- into the range of OP0's type.
-
- If so, the conversion is redundant as the earlier SSA_NAME can be
- used for the comparison directly if we just massage the constant in the
- comparison. */
- if (TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == INTEGER_CST)
- {
- gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
- tree innerop;
-
- if (!is_gimple_assign (def_stmt)
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
- return;
-
- innerop = gimple_assign_rhs1 (def_stmt);
-
- if (TREE_CODE (innerop) == SSA_NAME
- && !POINTER_TYPE_P (TREE_TYPE (innerop))
- && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
- && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
- {
- value_range *vr = get_value_range (innerop);
-
- if (range_int_cst_p (vr)
- && range_fits_type_p (vr,
- TYPE_PRECISION (TREE_TYPE (op0)),
- TYPE_SIGN (TREE_TYPE (op0)))
- && int_fits_type_p (op1, TREE_TYPE (innerop)))
- {
- tree newconst = fold_convert (TREE_TYPE (innerop), op1);
- gimple_cond_set_lhs (stmt, innerop);
- gimple_cond_set_rhs (stmt, newconst);
- update_stmt (stmt);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Folded into: ");
- print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
- fprintf (dump_file, "\n");
- }
- }
- }
- }
-}
-
-/* Simplify a switch statement using the value range of the switch
- argument. */
-
-static bool
-simplify_switch_using_ranges (gswitch *stmt)
-{
- tree op = gimple_switch_index (stmt);
- value_range *vr = NULL;
- bool take_default;
- edge e;
- edge_iterator ei;
- size_t i = 0, j = 0, n, n2;
- tree vec2;
- switch_update su;
- size_t k = 1, l = 0;
-
- if (TREE_CODE (op) == SSA_NAME)
- {
- vr = get_value_range (op);
-
- /* We can only handle integer ranges. */
- if ((vr->type != VR_RANGE
- && vr->type != VR_ANTI_RANGE)
- || symbolic_range_p (vr))
- return false;
-
- /* Find case label for min/max of the value range. */
- take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
- }
- else if (TREE_CODE (op) == INTEGER_CST)
- {
- take_default = !find_case_label_index (stmt, 1, op, &i);
- if (take_default)
- {
- i = 1;
- j = 0;
- }
- else
- {
- j = i;
- }
- }
- else
- return false;
-
- n = gimple_switch_num_labels (stmt);
-
- /* We can truncate the case label ranges that partially overlap with OP's
- value range. */
- size_t min_idx = 1, max_idx = 0;
- if (vr != NULL)
- find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
- if (min_idx <= max_idx)
- {
- tree min_label = gimple_switch_label (stmt, min_idx);
- tree max_label = gimple_switch_label (stmt, max_idx);
-
- /* Avoid changing the type of the case labels when truncating. */
- tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
- tree vr_min = fold_convert (case_label_type, vr->min);
- tree vr_max = fold_convert (case_label_type, vr->max);
-
- if (vr->type == VR_RANGE)
- {
- /* If OP's value range is [2,8] and the low label range is
- 0 ... 3, truncate the label's range to 2 .. 3. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
- CASE_LOW (min_label) = vr_min;
-
- /* If OP's value range is [2,8] and the high label range is
- 7 ... 10, truncate the label's range to 7 .. 8. */
- if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
- && CASE_HIGH (max_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
- CASE_HIGH (max_label) = vr_max;
- }
- else if (vr->type == VR_ANTI_RANGE)
- {
- tree one_cst = build_one_cst (case_label_type);
-
- if (min_label == max_label)
- {
- /* If OP's value range is ~[7,8] and the label's range is
- 7 ... 10, truncate the label's range to 9 ... 10. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
- CASE_LOW (min_label)
- = int_const_binop (PLUS_EXPR, vr_max, one_cst);
-
- /* If OP's value range is ~[7,8] and the label's range is
- 5 ... 8, truncate the label's range to 5 ... 6. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
- CASE_HIGH (min_label)
- = int_const_binop (MINUS_EXPR, vr_min, one_cst);
- }
- else
- {
- /* If OP's value range is ~[2,8] and the low label range is
- 0 ... 3, truncate the label's range to 0 ... 1. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
- CASE_HIGH (min_label)
- = int_const_binop (MINUS_EXPR, vr_min, one_cst);
-
- /* If OP's value range is ~[2,8] and the high label range is
- 7 ... 10, truncate the label's range to 9 ... 10. */
- if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
- && CASE_HIGH (max_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
- CASE_LOW (max_label)
- = int_const_binop (PLUS_EXPR, vr_max, one_cst);
- }
- }
-
- /* Canonicalize singleton case ranges. */
- if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
- CASE_HIGH (min_label) = NULL_TREE;
- if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
- CASE_HIGH (max_label) = NULL_TREE;
- }
-
- /* We can also eliminate case labels that lie completely outside OP's value
- range. */
-
- /* Bail out if this is just all edges taken. */
- if (i == 1
- && j == n - 1
- && take_default)
- return false;
-
- /* Build a new vector of taken case labels. */
- vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
- n2 = 0;
-
- /* Add the default edge, if necessary. */
- if (take_default)
- TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
-
- for (; i <= j; ++i, ++n2)
- TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
-
- for (; k <= l; ++k, ++n2)
- TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
-
- /* Mark needed edges. */
- for (i = 0; i < n2; ++i)
- {
- e = find_edge (gimple_bb (stmt),
- label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
- e->aux = (void *)-1;
- }
-
- /* Queue not needed edges for later removal. */
- FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
- {
- if (e->aux == (void *)-1)
- {
- e->aux = NULL;
- continue;
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "removing unreachable case label\n");
- }
- to_remove_edges.safe_push (e);
- e->flags &= ~EDGE_EXECUTABLE;
- }
-
- /* And queue an update for the stmt. */
- su.stmt = stmt;
- su.vec = vec2;
- to_update_switch_stmts.safe_push (su);
- return false;
-}
-
-/* Simplify an integral conversion from an SSA name in STMT. */
-
-static bool
-simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- tree innerop, middleop, finaltype;
- gimple *def_stmt;
- signop inner_sgn, middle_sgn, final_sgn;
- unsigned inner_prec, middle_prec, final_prec;
- widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
-
- finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
- if (!INTEGRAL_TYPE_P (finaltype))
- return false;
- middleop = gimple_assign_rhs1 (stmt);
- def_stmt = SSA_NAME_DEF_STMT (middleop);
- if (!is_gimple_assign (def_stmt)
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
- return false;
- innerop = gimple_assign_rhs1 (def_stmt);
- if (TREE_CODE (innerop) != SSA_NAME
- || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
- return false;
-
- /* Get the value-range of the inner operand. Use get_range_info in
- case innerop was created during substitute-and-fold. */
- wide_int imin, imax;
- if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
- || get_range_info (innerop, &imin, &imax) != VR_RANGE)
- return false;
- innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
- innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
-
- /* Simulate the conversion chain to check if the result is equal if
- the middle conversion is removed. */
- inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
- middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
- final_prec = TYPE_PRECISION (finaltype);
-
- /* If the first conversion is not injective, the second must not
- be widening. */
- if (wi::gtu_p (innermax - innermin,
- wi::mask <widest_int> (middle_prec, false))
- && middle_prec < final_prec)
- return false;
- /* We also want a medium value so that we can track the effect that
- narrowing conversions with sign change have. */
- inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
- if (inner_sgn == UNSIGNED)
- innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
- else
- innermed = 0;
- if (wi::cmp (innermin, innermed, inner_sgn) >= 0
- || wi::cmp (innermed, innermax, inner_sgn) >= 0)
- innermed = innermin;
-
- middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
- middlemin = wi::ext (innermin, middle_prec, middle_sgn);
- middlemed = wi::ext (innermed, middle_prec, middle_sgn);
- middlemax = wi::ext (innermax, middle_prec, middle_sgn);
-
- /* Require that the final conversion applied to both the original
- and the intermediate range produces the same result. */
- final_sgn = TYPE_SIGN (finaltype);
- if (wi::ext (middlemin, final_prec, final_sgn)
- != wi::ext (innermin, final_prec, final_sgn)
- || wi::ext (middlemed, final_prec, final_sgn)
- != wi::ext (innermed, final_prec, final_sgn)
- || wi::ext (middlemax, final_prec, final_sgn)
- != wi::ext (innermax, final_prec, final_sgn))
- return false;
-
- gimple_assign_set_rhs1 (stmt, innerop);
- fold_stmt (gsi, follow_single_use_edges);
- return true;
-}
-
-/* Simplify a conversion from integral SSA name to float in STMT. */
-
-static bool
-simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
-{
- tree rhs1 = gimple_assign_rhs1 (stmt);
- value_range *vr = get_value_range (rhs1);
- scalar_float_mode fltmode
- = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
- scalar_int_mode mode;
- tree tem;
- gassign *conv;
-
- /* We can only handle constant ranges. */
- if (vr->type != VR_RANGE
- || TREE_CODE (vr->min) != INTEGER_CST
- || TREE_CODE (vr->max) != INTEGER_CST)
- return false;
-
- /* First check if we can use a signed type in place of an unsigned. */
- scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
- if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
- && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
- && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
- mode = rhs_mode;
- /* If we can do the conversion in the current input mode do nothing. */
- else if (can_float_p (fltmode, rhs_mode,
- TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
- return false;
- /* Otherwise search for a mode we can use, starting from the narrowest
- integer mode available. */
- else
- {
- mode = NARROWEST_INT_MODE;
- for (;;)
- {
- /* If we cannot do a signed conversion to float from mode
- or if the value-range does not fit in the signed type
- try with a wider mode. */
- if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
- && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
- break;
-
- /* But do not widen the input. Instead leave that to the
- optabs expansion code. */
- if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
- || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
- return false;
- }
- }
-
- /* It works, insert a truncation or sign-change before the
- float conversion. */
- tem = make_ssa_name (build_nonstandard_integer_type
- (GET_MODE_PRECISION (mode), 0));
- conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
- gsi_insert_before (gsi, conv, GSI_SAME_STMT);
- gimple_assign_set_rhs1 (stmt, tem);
- fold_stmt (gsi, follow_single_use_edges);
-
- return true;
-}
-
-/* Simplify an internal fn call using ranges if possible. */
-
-static bool
-simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- enum tree_code subcode;
- bool is_ubsan = false;
- bool ovf = false;
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_UBSAN_CHECK_ADD:
- subcode = PLUS_EXPR;
- is_ubsan = true;
- break;
- case IFN_UBSAN_CHECK_SUB:
- subcode = MINUS_EXPR;
- is_ubsan = true;
- break;
- case IFN_UBSAN_CHECK_MUL:
- subcode = MULT_EXPR;
- is_ubsan = true;
- break;
- case IFN_ADD_OVERFLOW:
- subcode = PLUS_EXPR;
- break;
- case IFN_SUB_OVERFLOW:
- subcode = MINUS_EXPR;
- break;
- case IFN_MUL_OVERFLOW:
- subcode = MULT_EXPR;
- break;
- default:
- return false;
- }
-
- tree op0 = gimple_call_arg (stmt, 0);
- tree op1 = gimple_call_arg (stmt, 1);
- tree type;
- if (is_ubsan)
- {
- type = TREE_TYPE (op0);
- if (VECTOR_TYPE_P (type))
- return false;
- }
- else if (gimple_call_lhs (stmt) == NULL_TREE)
- return false;
- else
- type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
- if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
- || (is_ubsan && ovf))
- return false;
-
- gimple *g;
- location_t loc = gimple_location (stmt);
- if (is_ubsan)
- g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
- else
- {
- int prec = TYPE_PRECISION (type);
- tree utype = type;
- if (ovf
- || !useless_type_conversion_p (type, TREE_TYPE (op0))
- || !useless_type_conversion_p (type, TREE_TYPE (op1)))
- utype = build_nonstandard_integer_type (prec, 1);
- if (TREE_CODE (op0) == INTEGER_CST)
- op0 = fold_convert (utype, op0);
- else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
- {
- g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- op0 = gimple_assign_lhs (g);
- }
- if (TREE_CODE (op1) == INTEGER_CST)
- op1 = fold_convert (utype, op1);
- else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
- {
- g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- op1 = gimple_assign_lhs (g);
- }
- g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- if (utype != type)
- {
- g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
- gimple_assign_lhs (g));
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- }
- g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
- gimple_assign_lhs (g),
- build_int_cst (type, ovf));
- }
- gimple_set_location (g, loc);
- gsi_replace (gsi, g, false);
- return true;
-}
-
-/* Return true if VAR is a two-valued variable. Set a and b with the
- two-values when it is true. Return false otherwise. */
-
-static bool
-two_valued_val_range_p (tree var, tree *a, tree *b)
-{
- value_range *vr = get_value_range (var);
- if ((vr->type != VR_RANGE
- && vr->type != VR_ANTI_RANGE)
- || TREE_CODE (vr->min) != INTEGER_CST
- || TREE_CODE (vr->max) != INTEGER_CST)
- return false;
-
- if (vr->type == VR_RANGE
- && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
- {
- *a = vr->min;
- *b = vr->max;
- return true;
- }
-
- /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
- if (vr->type == VR_ANTI_RANGE
- && (wi::to_wide (vr->min)
- - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
- && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
- - wi::to_wide (vr->max)) == 1)
- {
- *a = vrp_val_min (TREE_TYPE (var));
- *b = vrp_val_max (TREE_TYPE (var));
- return true;
- }
-
- return false;
-}
-
-/* Simplify STMT using ranges if possible. */
-
-static bool
-simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+class vrp_folder : public substitute_and_fold_engine
{
- gimple *stmt = gsi_stmt (*gsi);
- if (is_gimple_assign (stmt))
- {
- enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
- tree rhs1 = gimple_assign_rhs1 (stmt);
- tree rhs2 = gimple_assign_rhs2 (stmt);
- tree lhs = gimple_assign_lhs (stmt);
- tree val1 = NULL_TREE, val2 = NULL_TREE;
- use_operand_p use_p;
- gimple *use_stmt;
-
- /* Convert:
- LHS = CST BINOP VAR
- Where VAR is two-valued and LHS is used in GIMPLE_COND only
- To:
- LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
-
- Also handles:
- LHS = VAR BINOP CST
- Where VAR is two-valued and LHS is used in GIMPLE_COND only
- To:
- LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
-
- if (TREE_CODE_CLASS (rhs_code) == tcc_binary
- && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- && ((TREE_CODE (rhs1) == INTEGER_CST
- && TREE_CODE (rhs2) == SSA_NAME)
- || (TREE_CODE (rhs2) == INTEGER_CST
- && TREE_CODE (rhs1) == SSA_NAME))
- && single_imm_use (lhs, &use_p, &use_stmt)
- && gimple_code (use_stmt) == GIMPLE_COND)
-
- {
- tree new_rhs1 = NULL_TREE;
- tree new_rhs2 = NULL_TREE;
- tree cmp_var = NULL_TREE;
-
- if (TREE_CODE (rhs2) == SSA_NAME
- && two_valued_val_range_p (rhs2, &val1, &val2))
- {
- /* Optimize RHS1 OP [VAL1, VAL2]. */
- new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
- new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
- cmp_var = rhs2;
- }
- else if (TREE_CODE (rhs1) == SSA_NAME
- && two_valued_val_range_p (rhs1, &val1, &val2))
- {
- /* Optimize [VAL1, VAL2] OP RHS2. */
- new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
- new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
- cmp_var = rhs1;
- }
-
- /* If we could not find two-vals or the optimzation is invalid as
- in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
- if (new_rhs1 && new_rhs2)
- {
- tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
- gimple_assign_set_rhs_with_ops (gsi,
- COND_EXPR, cond,
- new_rhs1,
- new_rhs2);
- update_stmt (gsi_stmt (*gsi));
- fold_stmt (gsi, follow_single_use_edges);
- return true;
- }
- }
-
- switch (rhs_code)
- {
- case EQ_EXPR:
- case NE_EXPR:
- /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
- if the RHS is zero or one, and the LHS are known to be boolean
- values. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_truth_ops_using_ranges (gsi, stmt);
- break;
-
- /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
- and BIT_AND_EXPR respectively if the first operand is greater
- than zero and the second operand is an exact power of two.
- Also optimize TRUNC_MOD_EXPR away if the second operand is
- constant and the first operand already has the right value
- range. */
- case TRUNC_DIV_EXPR:
- case TRUNC_MOD_EXPR:
- if ((TREE_CODE (rhs1) == SSA_NAME
- || TREE_CODE (rhs1) == INTEGER_CST)
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_div_or_mod_using_ranges (gsi, stmt);
- break;
-
- /* Transform ABS (X) into X or -X as appropriate. */
- case ABS_EXPR:
- if (TREE_CODE (rhs1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_abs_using_ranges (gsi, stmt);
- break;
-
- case BIT_AND_EXPR:
- case BIT_IOR_EXPR:
- /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
- if all the bits being cleared are already cleared or
- all the bits being set are already set. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_bit_ops_using_ranges (gsi, stmt);
- break;
-
- CASE_CONVERT:
- if (TREE_CODE (rhs1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_conversion_using_ranges (gsi, stmt);
- break;
-
- case FLOAT_EXPR:
- if (TREE_CODE (rhs1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_float_conversion_using_ranges (gsi, stmt);
- break;
-
- case MIN_EXPR:
- case MAX_EXPR:
- return simplify_min_or_max_using_ranges (gsi, stmt);
-
- default:
- break;
- }
- }
- else if (gimple_code (stmt) == GIMPLE_COND)
- return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
- else if (gimple_code (stmt) == GIMPLE_SWITCH)
- return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
- else if (is_gimple_call (stmt)
- && gimple_call_internal_p (stmt))
- return simplify_internal_call_using_ranges (gsi, stmt);
-
- return false;
-}
+ public:
+ tree get_value (tree) FINAL OVERRIDE;
+ bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
+ bool fold_predicate_in (gimple_stmt_iterator *);
+
+ class vr_values *vr_values;
+
+ /* Delegators. */
+ tree vrp_evaluate_conditional (tree_code code, tree op0,
+ tree op1, gimple *stmt)
+ { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
+ bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+ { return vr_values->simplify_stmt_using_ranges (gsi); }
+ tree op_with_constant_singleton_value_range (tree op)
+ { return vr_values->op_with_constant_singleton_value_range (op); }
+};
/* If the statement pointed by SI has a predicate whose value can be
computed using the value range information computed by VRP, compute
its value and return true. Otherwise, return false. */
-static bool
-fold_predicate_in (gimple_stmt_iterator *si)
+bool
+vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
{
bool assignment_p = false;
tree val;
@@ -10526,13 +6412,6 @@ fold_predicate_in (gimple_stmt_iterator *si)
return false;
}
-class vrp_folder : public substitute_and_fold_engine
-{
- public:
- tree get_value (tree) FINAL OVERRIDE;
- bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
-};
-
/* Callback for substitute_and_fold folding the stmt at *SI. */
bool
@@ -10583,6 +6462,9 @@ lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
return op;
}
+/* A hack. */
+static class vr_values *x_vr_values;
+
/* A trivial wrapper so that we can present the generic jump threading
code with a simple API for simplifying statements. STMT is the
statement we want to simplify, WITHIN_STMT provides the location
@@ -10598,6 +6480,7 @@ simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
if (cached_lhs && is_gimple_min_invariant (cached_lhs))
return cached_lhs;
+ vr_values *vr_values = x_vr_values;
if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
{
tree op0 = gimple_cond_lhs (cond_stmt);
@@ -10606,8 +6489,8 @@ simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
tree op1 = gimple_cond_rhs (cond_stmt);
op1 = lhs_of_dominating_assert (op1, bb, stmt);
- return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
- op0, op1, within_stmt);
+ return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
+ op0, op1, within_stmt);
}
/* We simplify a switch statement by trying to determine which case label
@@ -10621,7 +6504,7 @@ simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
op = lhs_of_dominating_assert (op, bb, stmt);
- value_range *vr = get_value_range (op);
+ value_range *vr = vr_values->get_value_range (op);
if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
|| symbolic_range_p (vr))
return NULL_TREE;
@@ -10682,7 +6565,7 @@ simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
&& (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
|| POINTER_TYPE_P (TREE_TYPE (lhs))))
{
- extract_range_from_assignment (&new_vr, assign_stmt);
+ vr_values->extract_range_from_assignment (&new_vr, assign_stmt);
if (range_int_cst_singleton_p (&new_vr))
return new_vr.min;
}
@@ -10705,11 +6588,14 @@ public:
virtual edge before_dom_children (basic_block);
virtual void after_dom_children (basic_block);
+ class vr_values *vr_values;
+
private:
class const_and_copies *m_const_and_copies;
class avail_exprs_stack *m_avail_exprs_stack;
gcond *m_dummy_cond;
+
};
/* Called before processing dominator children of BB. We want to look
@@ -10762,9 +6648,11 @@ vrp_dom_walker::after_dom_children (basic_block bb)
integer_zero_node, integer_zero_node,
NULL, NULL);
+ x_vr_values = vr_values;
thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
m_avail_exprs_stack,
simplify_stmt_for_jump_threading);
+ x_vr_values = NULL;
m_avail_exprs_stack->pop_to_marker ();
m_const_and_copies->pop_to_marker ();
@@ -10791,7 +6679,7 @@ vrp_dom_walker::after_dom_children (basic_block bb)
for later realization. */
static void
-identify_jump_threads (void)
+identify_jump_threads (class vr_values *vr_values)
{
int i;
edge e;
@@ -10823,6 +6711,7 @@ identify_jump_threads (void)
= new class avail_exprs_stack (avail_exprs);
vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
+ walker.vr_values = vr_values;
walker.walk (cfun->cfg->x_entry_block_ptr);
/* Clear EDGE_IGNORE. */
@@ -10837,572 +6726,57 @@ identify_jump_threads (void)
delete avail_exprs_stack;
}
-/* Free VRP lattice. */
-
-static void
-vrp_free_lattice ()
-{
- /* Free allocated memory. */
- free (vr_value);
- free (vr_phi_edge_counts);
- bitmap_obstack_release (&vrp_equiv_obstack);
- vrp_value_range_pool.release ();
-
- /* So that we can distinguish between VRP data being available
- and not available. */
- vr_value = NULL;
- vr_phi_edge_counts = NULL;
-}
-
/* Traverse all the blocks folding conditionals with known ranges. */
-static void
-vrp_finalize (bool warn_array_bounds_p)
+void
+vrp_prop::vrp_finalize (bool warn_array_bounds_p)
{
size_t i;
- values_propagated = true;
+ vr_values.values_propagated = true;
if (dump_file)
{
fprintf (dump_file, "\nValue ranges after VRP:\n\n");
- dump_all_value_ranges (dump_file);
+ vr_values.dump_all_value_ranges (dump_file);
fprintf (dump_file, "\n");
}
/* Set value range to non pointer SSA_NAMEs. */
- for (i = 0; i < num_vr_values; i++)
- if (vr_value[i])
- {
- tree name = ssa_name (i);
+ for (i = 0; i < num_ssa_names; i++)
+ {
+ tree name = ssa_name (i);
+ if (!name)
+ continue;
- if (!name
- || (vr_value[i]->type == VR_VARYING)
- || (vr_value[i]->type == VR_UNDEFINED)
- || (TREE_CODE (vr_value[i]->min) != INTEGER_CST)
- || (TREE_CODE (vr_value[i]->max) != INTEGER_CST))
- continue;
+ value_range *vr = get_value_range (name);
+ if (!name
+ || (vr->type == VR_VARYING)
+ || (vr->type == VR_UNDEFINED)
+ || (TREE_CODE (vr->min) != INTEGER_CST)
+ || (TREE_CODE (vr->max) != INTEGER_CST))
+ continue;
- if (POINTER_TYPE_P (TREE_TYPE (name))
- && ((vr_value[i]->type == VR_RANGE
- && range_includes_zero_p (vr_value[i]->min,
- vr_value[i]->max) == 0)
- || (vr_value[i]->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr_value[i]->min,
- vr_value[i]->max) == 1)))
- set_ptr_nonnull (name);
- else if (!POINTER_TYPE_P (TREE_TYPE (name)))
- set_range_info (name, vr_value[i]->type,
- wi::to_wide (vr_value[i]->min),
- wi::to_wide (vr_value[i]->max));
- }
+ if (POINTER_TYPE_P (TREE_TYPE (name))
+ && ((vr->type == VR_RANGE
+ && range_includes_zero_p (vr->min, vr->max) == 0)
+ || (vr->type == VR_ANTI_RANGE
+ && range_includes_zero_p (vr->min, vr->max) == 1)))
+ set_ptr_nonnull (name);
+ else if (!POINTER_TYPE_P (TREE_TYPE (name)))
+ set_range_info (name, vr->type,
+ wi::to_wide (vr->min),
+ wi::to_wide (vr->max));
+ }
class vrp_folder vrp_folder;
+ vrp_folder.vr_values = &vr_values;
vrp_folder.substitute_and_fold ();
if (warn_array_bounds && warn_array_bounds_p)
check_all_array_refs ();
}
-/* evrp_dom_walker visits the basic blocks in the dominance order and set
- the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
- discover more VRs. */
-
-class evrp_dom_walker : public dom_walker
-{
-public:
- evrp_dom_walker ()
- : dom_walker (CDI_DOMINATORS), stack (10)
- {
- need_eh_cleanup = BITMAP_ALLOC (NULL);
- }
- ~evrp_dom_walker ()
- {
- BITMAP_FREE (need_eh_cleanup);
- }
- virtual edge before_dom_children (basic_block);
- virtual void after_dom_children (basic_block);
- void push_value_range (tree var, value_range *vr);
- value_range *pop_value_range (tree var);
- value_range *try_find_new_range (tree, tree op, tree_code code, tree limit);
-
- /* Cond_stack holds the old VR. */
- auto_vec<std::pair <tree, value_range*> > stack;
- bitmap need_eh_cleanup;
- auto_vec<gimple *> stmts_to_fixup;
- auto_vec<gimple *> stmts_to_remove;
-};
-
-/* Find new range for NAME such that (OP CODE LIMIT) is true. */
-
-value_range *
-evrp_dom_walker::try_find_new_range (tree name,
- tree op, tree_code code, tree limit)
-{
- value_range vr = VR_INITIALIZER;
- value_range *old_vr = get_value_range (name);
-
- /* Discover VR when condition is true. */
- extract_range_for_var_from_comparison_expr (name, code, op,
- limit, &vr);
- /* If we found any usable VR, set the VR to ssa_name and create a
- PUSH old value in the stack with the old VR. */
- if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)
- {
- if (old_vr->type == vr.type
- && vrp_operand_equal_p (old_vr->min, vr.min)
- && vrp_operand_equal_p (old_vr->max, vr.max))
- return NULL;
- value_range *new_vr = vrp_value_range_pool.allocate ();
- *new_vr = vr;
- return new_vr;
- }
- return NULL;
-}
-
-/* See if there is any new scope is entered with new VR and set that VR to
- ssa_name before visiting the statements in the scope. */
-
-edge
-evrp_dom_walker::before_dom_children (basic_block bb)
-{
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Visiting BB%d\n", bb->index);
-
- stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL));
-
- edge pred_e = single_pred_edge_ignoring_loop_edges (bb, false);
- if (pred_e)
- {
- gimple *stmt = last_stmt (pred_e->src);
- tree op0 = NULL_TREE;
-
- if (stmt
- && gimple_code (stmt) == GIMPLE_COND
- && (op0 = gimple_cond_lhs (stmt))
- && TREE_CODE (op0) == SSA_NAME
- && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
- || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Visiting controlling predicate ");
- print_gimple_stmt (dump_file, stmt, 0);
- }
- /* Entering a new scope. Try to see if we can find a VR
- here. */
- tree op1 = gimple_cond_rhs (stmt);
- if (TREE_OVERFLOW_P (op1))
- op1 = drop_tree_overflow (op1);
- tree_code code = gimple_cond_code (stmt);
-
- auto_vec<assert_info, 8> asserts;
- register_edge_assert_for (op0, pred_e, code, op0, op1, asserts);
- if (TREE_CODE (op1) == SSA_NAME)
- register_edge_assert_for (op1, pred_e, code, op0, op1, asserts);
-
- auto_vec<std::pair<tree, value_range *>, 8> vrs;
- for (unsigned i = 0; i < asserts.length (); ++i)
- {
- value_range *vr = try_find_new_range (asserts[i].name,
- asserts[i].expr,
- asserts[i].comp_code,
- asserts[i].val);
- if (vr)
- vrs.safe_push (std::make_pair (asserts[i].name, vr));
- }
- /* Push updated ranges only after finding all of them to avoid
- ordering issues that can lead to worse ranges. */
- for (unsigned i = 0; i < vrs.length (); ++i)
- push_value_range (vrs[i].first, vrs[i].second);
- }
- }
-
- /* Visit PHI stmts and discover any new VRs possible. */
- bool has_unvisited_preds = false;
- edge_iterator ei;
- edge e;
- FOR_EACH_EDGE (e, ei, bb->preds)
- if (e->flags & EDGE_EXECUTABLE
- && !(e->src->flags & BB_VISITED))
- {
- has_unvisited_preds = true;
- break;
- }
-
- for (gphi_iterator gpi = gsi_start_phis (bb);
- !gsi_end_p (gpi); gsi_next (&gpi))
- {
- gphi *phi = gpi.phi ();
- tree lhs = PHI_RESULT (phi);
- if (virtual_operand_p (lhs))
- continue;
- value_range vr_result = VR_INITIALIZER;
- bool interesting = stmt_interesting_for_vrp (phi);
- if (interesting && dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Visiting PHI node ");
- print_gimple_stmt (dump_file, phi, 0);
- }
- if (!has_unvisited_preds
- && interesting)
- extract_range_from_phi_node (phi, &vr_result);
- else
- {
- set_value_range_to_varying (&vr_result);
- /* When we have an unvisited executable predecessor we can't
- use PHI arg ranges which may be still UNDEFINED but have
- to use VARYING for them. But we can still resort to
- SCEV for loop header PHIs. */
- struct loop *l;
- if (interesting
- && (l = loop_containing_stmt (phi))
- && l->header == gimple_bb (phi))
- adjust_range_with_scev (&vr_result, l, phi, lhs);
- }
- update_value_range (lhs, &vr_result);
-
- /* Mark PHIs whose lhs we fully propagate for removal. */
- tree val = op_with_constant_singleton_value_range (lhs);
- if (val && may_propagate_copy (lhs, val))
- {
- stmts_to_remove.safe_push (phi);
- continue;
- }
-
- /* Set the SSA with the value range. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
- {
- if ((vr_result.type == VR_RANGE
- || vr_result.type == VR_ANTI_RANGE)
- && (TREE_CODE (vr_result.min) == INTEGER_CST)
- && (TREE_CODE (vr_result.max) == INTEGER_CST))
- set_range_info (lhs, vr_result.type,
- wi::to_wide (vr_result.min),
- wi::to_wide (vr_result.max));
- }
- else if (POINTER_TYPE_P (TREE_TYPE (lhs))
- && ((vr_result.type == VR_RANGE
- && range_includes_zero_p (vr_result.min,
- vr_result.max) == 0)
- || (vr_result.type == VR_ANTI_RANGE
- && range_includes_zero_p (vr_result.min,
- vr_result.max) == 1)))
- set_ptr_nonnull (lhs);
- }
-
- edge taken_edge = NULL;
-
- /* Visit all other stmts and discover any new VRs possible. */
- for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
- !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple *stmt = gsi_stmt (gsi);
- tree output = NULL_TREE;
- gimple *old_stmt = stmt;
- bool was_noreturn = (is_gimple_call (stmt)
- && gimple_call_noreturn_p (stmt));
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Visiting stmt ");
- print_gimple_stmt (dump_file, stmt, 0);
- }
-
- if (gcond *cond = dyn_cast <gcond *> (stmt))
- {
- vrp_visit_cond_stmt (cond, &taken_edge);
- if (taken_edge)
- {
- if (taken_edge->flags & EDGE_TRUE_VALUE)
- gimple_cond_make_true (cond);
- else if (taken_edge->flags & EDGE_FALSE_VALUE)
- gimple_cond_make_false (cond);
- else
- gcc_unreachable ();
- update_stmt (stmt);
- }
- }
- else if (stmt_interesting_for_vrp (stmt))
- {
- edge taken_edge;
- value_range vr = VR_INITIALIZER;
- extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
- if (output
- && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE))
- {
- update_value_range (output, &vr);
- vr = *get_value_range (output);
-
- /* Mark stmts whose output we fully propagate for removal. */
- tree val;
- if ((val = op_with_constant_singleton_value_range (output))
- && may_propagate_copy (output, val)
- && !stmt_could_throw_p (stmt)
- && !gimple_has_side_effects (stmt))
- {
- stmts_to_remove.safe_push (stmt);
- continue;
- }
-
- /* Set the SSA with the value range. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (output)))
- {
- if ((vr.type == VR_RANGE
- || vr.type == VR_ANTI_RANGE)
- && (TREE_CODE (vr.min) == INTEGER_CST)
- && (TREE_CODE (vr.max) == INTEGER_CST))
- set_range_info (output, vr.type,
- wi::to_wide (vr.min),
- wi::to_wide (vr.max));
- }
- else if (POINTER_TYPE_P (TREE_TYPE (output))
- && ((vr.type == VR_RANGE
- && range_includes_zero_p (vr.min,
- vr.max) == 0)
- || (vr.type == VR_ANTI_RANGE
- && range_includes_zero_p (vr.min,
- vr.max) == 1)))
- set_ptr_nonnull (output);
- }
- else
- set_defs_to_varying (stmt);
- }
- else
- set_defs_to_varying (stmt);
-
- /* See if we can derive a range for any of STMT's operands. */
- tree op;
- ssa_op_iter i;
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
- {
- tree value;
- enum tree_code comp_code;
-
- /* If OP is used in such a way that we can infer a value
- range for it, and we don't find a previous assertion for
- it, create a new assertion location node for OP. */
- if (infer_value_range (stmt, op, &comp_code, &value))
- {
- /* If we are able to infer a nonzero value range for OP,
- then walk backwards through the use-def chain to see if OP
- was set via a typecast.
- If so, then we can also infer a nonzero value range
- for the operand of the NOP_EXPR. */
- if (comp_code == NE_EXPR && integer_zerop (value))
- {
- tree t = op;
- gimple *def_stmt = SSA_NAME_DEF_STMT (t);
- while (is_gimple_assign (def_stmt)
- && CONVERT_EXPR_CODE_P
- (gimple_assign_rhs_code (def_stmt))
- && TREE_CODE
- (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
- && POINTER_TYPE_P
- (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
- {
- t = gimple_assign_rhs1 (def_stmt);
- def_stmt = SSA_NAME_DEF_STMT (t);
-
- /* Add VR when (T COMP_CODE value) condition is
- true. */
- value_range *op_range
- = try_find_new_range (t, t, comp_code, value);
- if (op_range)
- push_value_range (t, op_range);
- }
- }
- /* Add VR when (OP COMP_CODE value) condition is true. */
- value_range *op_range = try_find_new_range (op, op,
- comp_code, value);
- if (op_range)
- push_value_range (op, op_range);
- }
- }
-
- /* Try folding stmts with the VR discovered. */
- class vrp_folder vrp_folder;
- bool did_replace = vrp_folder.replace_uses_in (stmt);
- if (fold_stmt (&gsi, follow_single_use_edges)
- || did_replace)
- {
- stmt = gsi_stmt (gsi);
- update_stmt (stmt);
- did_replace = true;
- }
-
- if (did_replace)
- {
- /* If we cleaned up EH information from the statement,
- remove EH edges. */
- if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
- bitmap_set_bit (need_eh_cleanup, bb->index);
-
- /* If we turned a not noreturn call into a noreturn one
- schedule it for fixup. */
- if (!was_noreturn
- && is_gimple_call (stmt)
- && gimple_call_noreturn_p (stmt))
- stmts_to_fixup.safe_push (stmt);
-
- if (gimple_assign_single_p (stmt))
- {
- tree rhs = gimple_assign_rhs1 (stmt);
- if (TREE_CODE (rhs) == ADDR_EXPR)
- recompute_tree_invariant_for_addr_expr (rhs);
- }
- }
- }
-
- /* Visit BB successor PHI nodes and replace PHI args. */
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- for (gphi_iterator gpi = gsi_start_phis (e->dest);
- !gsi_end_p (gpi); gsi_next (&gpi))
- {
- gphi *phi = gpi.phi ();
- use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
- tree arg = USE_FROM_PTR (use_p);
- if (TREE_CODE (arg) != SSA_NAME
- || virtual_operand_p (arg))
- continue;
- tree val = op_with_constant_singleton_value_range (arg);
- if (val && may_propagate_copy (arg, val))
- propagate_value (use_p, val);
- }
- }
-
- bb->flags |= BB_VISITED;
-
- return taken_edge;
-}
-
-/* Restore/pop VRs valid only for BB when we leave BB. */
-
-void
-evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED)
-{
- gcc_checking_assert (!stack.is_empty ());
- while (stack.last ().first != NULL_TREE)
- pop_value_range (stack.last ().first);
- stack.pop ();
-}
-
-/* Push the Value Range of VAR to the stack and update it with new VR. */
-
-void
-evrp_dom_walker::push_value_range (tree var, value_range *vr)
-{
- if (SSA_NAME_VERSION (var) >= num_vr_values)
- return;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "pushing new range for ");
- print_generic_expr (dump_file, var);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, vr);
- fprintf (dump_file, "\n");
- }
- stack.safe_push (std::make_pair (var, get_value_range (var)));
- vr_value[SSA_NAME_VERSION (var)] = vr;
-}
-
-/* Pop the Value Range from the vrp_stack and update VAR with it. */
-
-value_range *
-evrp_dom_walker::pop_value_range (tree var)
-{
- value_range *vr = stack.last ().second;
- gcc_checking_assert (var == stack.last ().first);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "popping range for ");
- print_generic_expr (dump_file, var);
- fprintf (dump_file, ", restoring ");
- dump_value_range (dump_file, vr);
- fprintf (dump_file, "\n");
- }
- vr_value[SSA_NAME_VERSION (var)] = vr;
- stack.pop ();
- return vr;
-}
-
-
-/* Main entry point for the early vrp pass which is a simplified non-iterative
- version of vrp where basic blocks are visited in dominance order. Value
- ranges discovered in early vrp will also be used by ipa-vrp. */
-
-static unsigned int
-execute_early_vrp ()
-{
- edge e;
- edge_iterator ei;
- basic_block bb;
-
- loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
- rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
- scev_initialize ();
- calculate_dominance_info (CDI_DOMINATORS);
- FOR_EACH_BB_FN (bb, cfun)
- {
- bb->flags &= ~BB_VISITED;
- FOR_EACH_EDGE (e, ei, bb->preds)
- e->flags |= EDGE_EXECUTABLE;
- }
- vrp_initialize_lattice ();
-
- /* Walk stmts in dominance order and propagate VRP. */
- evrp_dom_walker walker;
- walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
-
- if (dump_file)
- {
- fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
- dump_all_value_ranges (dump_file);
- fprintf (dump_file, "\n");
- }
-
- /* Remove stmts in reverse order to make debug stmt creation possible. */
- while (! walker.stmts_to_remove.is_empty ())
- {
- gimple *stmt = walker.stmts_to_remove.pop ();
- if (dump_file && dump_flags & TDF_DETAILS)
- {
- fprintf (dump_file, "Removing dead stmt ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\n");
- }
- gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
- if (gimple_code (stmt) == GIMPLE_PHI)
- remove_phi_node (&gsi, true);
- else
- {
- unlink_stmt_vdef (stmt);
- gsi_remove (&gsi, true);
- release_defs (stmt);
- }
- }
-
- if (!bitmap_empty_p (walker.need_eh_cleanup))
- gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
-
- /* Fixup stmts that became noreturn calls. This may require splitting
- blocks and thus isn't possible during the dominator walk. Do this
- in reverse order so we don't inadvertedly remove a stmt we want to
- fixup by visiting a dominating now noreturn call first. */
- while (!walker.stmts_to_fixup.is_empty ())
- {
- gimple *stmt = walker.stmts_to_fixup.pop ();
- fixup_noreturn_call (stmt);
- }
-
- vrp_free_lattice ();
- scev_finalize ();
- loop_optimizer_finalize ();
- return 0;
-}
-
-
/* Main entry point to VRP (Value Range Propagation). This pass is
loosely based on J. R. C. Patterson, ``Accurate Static Branch
Prediction by Value Range Propagation,'' in SIGPLAN Conference on
@@ -11470,15 +6844,14 @@ execute_vrp (bool warn_array_bounds_p)
/* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
mark_dfs_back_edges ();
- vrp_initialize_lattice ();
- vrp_initialize ();
class vrp_prop vrp_prop;
+ vrp_prop.vrp_initialize ();
vrp_prop.ssa_propagate ();
- vrp_finalize (warn_array_bounds_p);
+ vrp_prop.vrp_finalize (warn_array_bounds_p);
/* We must identify jump threading opportunities before we release
the datastructures built by VRP. */
- identify_jump_threads ();
+ identify_jump_threads (&vrp_prop.vr_values);
/* A comparison of an SSA_NAME against a constant where the SSA_NAME
was set by a type conversion can often be rewritten to use the
@@ -11492,11 +6865,9 @@ execute_vrp (bool warn_array_bounds_p)
{
gimple *last = last_stmt (bb);
if (last && gimple_code (last) == GIMPLE_COND)
- simplify_cond_using_ranges_2 (as_a <gcond *> (last));
+ vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
}
- vrp_free_lattice ();
-
free_numbers_of_iterations_estimates (cfun);
/* ASSERT_EXPRs must be removed before finalizing jump threads
@@ -11599,44 +6970,3 @@ make_pass_vrp (gcc::context *ctxt)
{
return new pass_vrp (ctxt);
}
-
-namespace {
-
-const pass_data pass_data_early_vrp =
-{
- GIMPLE_PASS, /* type */
- "evrp", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- TV_TREE_EARLY_VRP, /* tv_id */
- PROP_ssa, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
-};
-
-class pass_early_vrp : public gimple_opt_pass
-{
-public:
- pass_early_vrp (gcc::context *ctxt)
- : gimple_opt_pass (pass_data_early_vrp, ctxt)
- {}
-
- /* opt_pass methods: */
- opt_pass * clone () { return new pass_early_vrp (m_ctxt); }
- virtual bool gate (function *)
- {
- return flag_tree_vrp != 0;
- }
- virtual unsigned int execute (function *)
- { return execute_early_vrp (); }
-
-}; // class pass_vrp
-} // anon namespace
-
-gimple_opt_pass *
-make_pass_early_vrp (gcc::context *ctxt)
-{
- return new pass_early_vrp (ctxt);
-}
-
diff --git a/gcc/tree-vrp.h b/gcc/tree-vrp.h
index f84403a0f83..a0f72db2627 100644
--- a/gcc/tree-vrp.h
+++ b/gcc/tree-vrp.h
@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_VRP_H
#define GCC_TREE_VRP_H
-/* Type of value ranges. See value_range_d In tree-vrp.c for a
+/* Type of value ranges. See value_range below for a
description of these types. */
enum value_range_type { VR_UNDEFINED, VR_RANGE,
VR_ANTI_RANGE, VR_VARYING, VR_LAST };
@@ -60,4 +60,70 @@ extern void extract_range_from_unary_expr (value_range *vr,
value_range *vr0_,
tree op0_type);
+extern bool vrp_operand_equal_p (const_tree, const_tree);
+
+struct assert_info
+{
+ /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
+ enum tree_code comp_code;
+
+ /* Name to register the assert for. */
+ tree name;
+
+ /* Value being compared against. */
+ tree val;
+
+ /* Expression to compare. */
+ tree expr;
+};
+
+extern void register_edge_assert_for (tree, edge, enum tree_code,
+ tree, tree, vec<assert_info> &);
+extern bool stmt_interesting_for_vrp (gimple *);
+extern void set_value_range_to_varying (value_range *);
+extern int range_includes_zero_p (tree, tree);
+extern bool infer_value_range (gimple *, tree, tree_code *, tree *);
+
+extern void set_value_range_to_nonnull (value_range *, tree);
+extern void set_value_range (value_range *, enum value_range_type, tree,
+ tree, bitmap);
+extern void set_and_canonicalize_value_range (value_range *,
+ enum value_range_type,
+ tree, tree, bitmap);
+extern bool vrp_bitmap_equal_p (const_bitmap, const_bitmap);
+extern bool range_is_nonnull (value_range *);
+extern tree value_range_constant_singleton (value_range *);
+extern bool symbolic_range_p (value_range *);
+extern int compare_values (tree, tree);
+extern int compare_values_warnv (tree, tree, bool *);
+extern bool vrp_val_is_min (const_tree);
+extern bool vrp_val_is_max (const_tree);
+extern void copy_value_range (value_range *, value_range *);
+extern void set_value_range_to_value (value_range *, tree, bitmap);
+extern void extract_range_from_binary_expr_1 (value_range *, enum tree_code,
+ tree, value_range *,
+ value_range *);
+extern tree vrp_val_min (const_tree);
+extern tree vrp_val_max (const_tree);
+extern void set_value_range_to_null (value_range *, tree);
+extern bool range_int_cst_p (value_range *);
+extern int operand_less_p (tree, tree);
+extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
+extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
+extern bool zero_nonzero_bits_from_vr (const tree, value_range *,
+ wide_int *, wide_int *);
+extern bool overflow_comparison_p (tree_code, tree, tree, bool, tree *);
+extern bool range_int_cst_singleton_p (value_range *);
+extern int value_inside_range (tree, tree, tree);
+extern tree get_single_symbol (tree, bool *, tree *);
+
+
+struct switch_update {
+ gswitch *stmt;
+ tree vec;
+};
+
+extern vec<edge> to_remove_edges;
+extern vec<switch_update> to_update_switch_stmts;
+
#endif /* GCC_TREE_VRP_H */
diff --git a/gcc/tree.c b/gcc/tree.c
index e5ee29e49ce..7e1ccfd9cbe 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -2976,13 +2976,9 @@ really_constant_p (const_tree exp)
/* Return true if T holds a polynomial pointer difference, storing it in
*VALUE if so. A true return means that T's precision is no greater
than 64 bits, which is the largest address space we support, so *VALUE
- never loses precision. However, the signedness of the result is
- somewhat arbitrary, since if B lives near the end of a 64-bit address
- range and A lives near the beginning, B - A is a large positive value
- outside the range of int64_t. A - B is likewise a large negative value
- outside the range of int64_t. All the pointer difference really
- gives is a raw pointer-sized bitstring that can be added to the first
- pointer value to get the second. */
+ never loses precision. However, the signedness of the result does
+ not necessarily match the signedness of T: sometimes an unsigned type
+ like sizetype is used to encode a value that is actually negative. */
bool
ptrdiff_tree_p (const_tree t, poly_int64_pod *value)
diff --git a/gcc/tree.def b/gcc/tree.def
index 051ecd4897e..a87e3fd6102 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -1302,7 +1302,7 @@ DEFTREECODE (REDUC_AND_EXPR, "reduc_and_expr", tcc_unary, 1)
DEFTREECODE (REDUC_IOR_EXPR, "reduc_ior_expr", tcc_unary, 1)
DEFTREECODE (REDUC_XOR_EXPR, "reduc_xor_expr", tcc_unary, 1)
-DEFTREECODE (STRICT_REDUC_PLUS_EXPR, "strict_reduc_plus_expr", tcc_binary, 2)
+DEFTREECODE (FOLD_LEFT_PLUS_EXPR, "fold_left_plus_expr", tcc_binary, 2)
/* Widening dot-product.
The first two arguments are of type t1.
diff --git a/gcc/tree.h b/gcc/tree.h
index a73928fa3ee..dcf3b7a97da 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -5000,6 +5000,13 @@ struct tree_decl_map_cache_hasher : ggc_cache_ptr_hash<tree_decl_map>
#define tree_vec_map_hash tree_decl_map_hash
#define tree_vec_map_marked_p tree_map_base_marked_p
+/* A hash_map of two trees for use with GTY((cache)). Garbage collection for
+ such a map will not mark keys, and will mark values if the key is already
+ marked. */
+struct tree_cache_traits
+ : simple_cache_map_traits<default_hash_traits<tree>, tree> { };
+typedef hash_map<tree,tree,tree_cache_traits> tree_cache_map;
+
/* Initialize the abstract argument list iterator object ITER with the
arguments from CALL_EXPR node EXP. */
static inline void
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index 3a0584271a3..30cff128570 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -675,12 +675,10 @@ ubsan_create_edge (gimple *stmt)
{
gcall *call_stmt = dyn_cast <gcall *> (stmt);
basic_block bb = gimple_bb (stmt);
- int freq = compute_call_stmt_bb_frequency (current_function_decl, bb);
cgraph_node *node = cgraph_node::get (current_function_decl);
tree decl = gimple_call_fndecl (call_stmt);
if (decl)
- node->create_edge (cgraph_node::get_create (decl), call_stmt, bb->count,
- freq);
+ node->create_edge (cgraph_node::get_create (decl), call_stmt, bb->count);
}
/* Expand the UBSAN_BOUNDS special builtin function. */
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index 85de3189f83..354279a6712 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -673,11 +673,6 @@ gimple_value_profile_transformations (void)
}
}
- if (changed)
- {
- counts_to_freqs ();
- }
-
return changed;
}
diff --git a/gcc/vr-values.c b/gcc/vr-values.c
new file mode 100644
index 00000000000..d4434ded75d
--- /dev/null
+++ b/gcc/vr-values.c
@@ -0,0 +1,4183 @@
+/* Support routines for Value Range Propagation (VRP).
+ Copyright (C) 2005-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "insn-codes.h"
+#include "tree.h"
+#include "gimple.h"
+#include "ssa.h"
+#include "optabs-tree.h"
+#include "gimple-pretty-print.h"
+#include "diagnostic-core.h"
+#include "flags.h"
+#include "fold-const.h"
+#include "calls.h"
+#include "cfganal.h"
+#include "gimple-fold.h"
+#include "gimple-iterator.h"
+#include "tree-cfg.h"
+#include "tree-ssa-loop-niter.h"
+#include "tree-ssa-loop.h"
+#include "intl.h"
+#include "cfgloop.h"
+#include "tree-scalar-evolution.h"
+#include "tree-ssa-propagate.h"
+#include "tree-chrec.h"
+#include "omp-general.h"
+#include "case-cfn-macros.h"
+#include "alloc-pool.h"
+#include "attribs.h"
+#include "vr-values.h"
+
+/* Set value range VR to a non-negative range of type TYPE. */
+
+static inline void
+set_value_range_to_nonnegative (value_range *vr, tree type)
+{
+ tree zero = build_int_cst (type, 0);
+ set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv);
+}
+
+/* Set value range VR to a range of a truthvalue of type TYPE. */
+
+static inline void
+set_value_range_to_truthvalue (value_range *vr, tree type)
+{
+ if (TYPE_PRECISION (type) == 1)
+ set_value_range_to_varying (vr);
+ else
+ set_value_range (vr, VR_RANGE,
+ build_int_cst (type, 0), build_int_cst (type, 1),
+ vr->equiv);
+}
+
+
+/* Return value range information for VAR.
+
+ If we have no values ranges recorded (ie, VRP is not running), then
+ return NULL. Otherwise create an empty range if none existed for VAR. */
+
+value_range *
+vr_values::get_value_range (const_tree var)
+{
+ static const value_range vr_const_varying
+ = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
+ value_range *vr;
+ tree sym;
+ unsigned ver = SSA_NAME_VERSION (var);
+
+ /* If we have no recorded ranges, then return NULL. */
+ if (! vr_value)
+ return NULL;
+
+ /* If we query the range for a new SSA name return an unmodifiable VARYING.
+ We should get here at most from the substitute-and-fold stage which
+ will never try to change values. */
+ if (ver >= num_vr_values)
+ return CONST_CAST (value_range *, &vr_const_varying);
+
+ vr = vr_value[ver];
+ if (vr)
+ return vr;
+
+ /* After propagation finished do not allocate new value-ranges. */
+ if (values_propagated)
+ return CONST_CAST (value_range *, &vr_const_varying);
+
+ /* Create a default value range. */
+ vr_value[ver] = vr = vrp_value_range_pool.allocate ();
+ memset (vr, 0, sizeof (*vr));
+
+ /* Defer allocating the equivalence set. */
+ vr->equiv = NULL;
+
+ /* If VAR is a default definition of a parameter, the variable can
+ take any value in VAR's type. */
+ if (SSA_NAME_IS_DEFAULT_DEF (var))
+ {
+ sym = SSA_NAME_VAR (var);
+ if (TREE_CODE (sym) == PARM_DECL)
+ {
+ /* Try to use the "nonnull" attribute to create ~[0, 0]
+ anti-ranges for pointers. Note that this is only valid with
+ default definitions of PARM_DECLs. */
+ if (POINTER_TYPE_P (TREE_TYPE (sym))
+ && (nonnull_arg_p (sym)
+ || get_ptr_nonnull (var)))
+ set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
+ {
+ wide_int min, max;
+ value_range_type rtype = get_range_info (var, &min, &max);
+ if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
+ set_value_range (vr, rtype,
+ wide_int_to_tree (TREE_TYPE (var), min),
+ wide_int_to_tree (TREE_TYPE (var), max),
+ NULL);
+ else
+ set_value_range_to_varying (vr);
+ }
+ else
+ set_value_range_to_varying (vr);
+ }
+ else if (TREE_CODE (sym) == RESULT_DECL
+ && DECL_BY_REFERENCE (sym))
+ set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+ }
+
+ return vr;
+}
+
+/* Set value-ranges of all SSA names defined by STMT to varying. */
+
+void
+vr_values::set_defs_to_varying (gimple *stmt)
+{
+ ssa_op_iter i;
+ tree def;
+ FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
+ {
+ value_range *vr = get_value_range (def);
+ /* Avoid writing to vr_const_varying get_value_range may return. */
+ if (vr->type != VR_VARYING)
+ set_value_range_to_varying (vr);
+ }
+}
+
+/* Update the value range and equivalence set for variable VAR to
+ NEW_VR. Return true if NEW_VR is different from VAR's previous
+ value.
+
+ NOTE: This function assumes that NEW_VR is a temporary value range
+ object created for the sole purpose of updating VAR's range. The
+ storage used by the equivalence set from NEW_VR will be freed by
+ this function. Do not call update_value_range when NEW_VR
+ is the range object associated with another SSA name. */
+
+bool
+vr_values::update_value_range (const_tree var, value_range *new_vr)
+{
+ value_range *old_vr;
+ bool is_new;
+
+ /* If there is a value-range on the SSA name from earlier analysis
+ factor that in. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
+ {
+ wide_int min, max;
+ value_range_type rtype = get_range_info (var, &min, &max);
+ if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
+ {
+ tree nr_min, nr_max;
+ nr_min = wide_int_to_tree (TREE_TYPE (var), min);
+ nr_max = wide_int_to_tree (TREE_TYPE (var), max);
+ value_range nr = VR_INITIALIZER;
+ set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
+ vrp_intersect_ranges (new_vr, &nr);
+ }
+ }
+
+ /* Update the value range, if necessary. */
+ old_vr = get_value_range (var);
+ is_new = old_vr->type != new_vr->type
+ || !vrp_operand_equal_p (old_vr->min, new_vr->min)
+ || !vrp_operand_equal_p (old_vr->max, new_vr->max)
+ || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
+
+ if (is_new)
+ {
+ /* Do not allow transitions up the lattice. The following
+ is slightly more awkward than just new_vr->type < old_vr->type
+ because VR_RANGE and VR_ANTI_RANGE need to be considered
+ the same. We may not have is_new when transitioning to
+ UNDEFINED. If old_vr->type is VARYING, we shouldn't be
+ called. */
+ if (new_vr->type == VR_UNDEFINED)
+ {
+ BITMAP_FREE (new_vr->equiv);
+ set_value_range_to_varying (old_vr);
+ set_value_range_to_varying (new_vr);
+ return true;
+ }
+ else
+ set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
+ new_vr->equiv);
+ }
+
+ BITMAP_FREE (new_vr->equiv);
+
+ return is_new;
+}
+
+
+/* Add VAR and VAR's equivalence set to EQUIV. This is the central
+ point where equivalence processing can be turned on/off. */
+
+void
+vr_values::add_equivalence (bitmap *equiv, const_tree var)
+{
+ unsigned ver = SSA_NAME_VERSION (var);
+ value_range *vr = get_value_range (var);
+
+ if (*equiv == NULL)
+ *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
+ bitmap_set_bit (*equiv, ver);
+ if (vr && vr->equiv)
+ bitmap_ior_into (*equiv, vr->equiv);
+}
+
+/* Return true if value range VR involves exactly one symbol SYM. */
+
+static bool
+symbolic_range_based_on_p (value_range *vr, const_tree sym)
+{
+ bool neg, min_has_symbol, max_has_symbol;
+ tree inv;
+
+ if (is_gimple_min_invariant (vr->min))
+ min_has_symbol = false;
+ else if (get_single_symbol (vr->min, &neg, &inv) == sym)
+ min_has_symbol = true;
+ else
+ return false;
+
+ if (is_gimple_min_invariant (vr->max))
+ max_has_symbol = false;
+ else if (get_single_symbol (vr->max, &neg, &inv) == sym)
+ max_has_symbol = true;
+ else
+ return false;
+
+ return (min_has_symbol || max_has_symbol);
+}
+
+/* Return true if the result of assignment STMT is know to be non-zero. */
+
+static bool
+gimple_assign_nonzero_p (gimple *stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ bool strict_overflow_p;
+ switch (get_gimple_rhs_class (code))
+ {
+ case GIMPLE_UNARY_RHS:
+ return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ &strict_overflow_p);
+ case GIMPLE_BINARY_RHS:
+ return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt),
+ &strict_overflow_p);
+ case GIMPLE_TERNARY_RHS:
+ return false;
+ case GIMPLE_SINGLE_RHS:
+ return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
+ &strict_overflow_p);
+ case GIMPLE_INVALID_RHS:
+ gcc_unreachable ();
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return true if STMT is known to compute a non-zero value. */
+
+static bool
+gimple_stmt_nonzero_p (gimple *stmt)
+{
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
+ return gimple_assign_nonzero_p (stmt);
+ case GIMPLE_CALL:
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (!fndecl) return false;
+ if (flag_delete_null_pointer_checks && !flag_check_new
+ && DECL_IS_OPERATOR_NEW (fndecl)
+ && !TREE_NOTHROW (fndecl))
+ return true;
+ /* References are always non-NULL. */
+ if (flag_delete_null_pointer_checks
+ && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
+ return true;
+ if (flag_delete_null_pointer_checks &&
+ lookup_attribute ("returns_nonnull",
+ TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
+ return true;
+
+ gcall *call_stmt = as_a<gcall *> (stmt);
+ unsigned rf = gimple_call_return_flags (call_stmt);
+ if (rf & ERF_RETURNS_ARG)
+ {
+ unsigned argnum = rf & ERF_RETURN_ARG_MASK;
+ if (argnum < gimple_call_num_args (call_stmt))
+ {
+ tree arg = gimple_call_arg (call_stmt, argnum);
+ if (SSA_VAR_P (arg)
+ && infer_nonnull_range_by_attribute (stmt, arg))
+ return true;
+ }
+ }
+ return gimple_alloca_call_p (stmt);
+ }
+ default:
+ gcc_unreachable ();
+ }
+}
+/* Like tree_expr_nonzero_p, but this function uses value ranges
+ obtained so far. */
+
+bool
+vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
+{
+ if (gimple_stmt_nonzero_p (stmt))
+ return true;
+
+ /* If we have an expression of the form &X->a, then the expression
+ is nonnull if X is nonnull. */
+ if (is_gimple_assign (stmt)
+ && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
+ {
+ tree expr = gimple_assign_rhs1 (stmt);
+ tree base = get_base_address (TREE_OPERAND (expr, 0));
+
+ if (base != NULL_TREE
+ && TREE_CODE (base) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+ {
+ value_range *vr = get_value_range (TREE_OPERAND (base, 0));
+ if (range_is_nonnull (vr))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Returns true if EXPR is a valid value (as expected by compare_values) --
+ a gimple invariant, or SSA_NAME +- CST. */
+
+static bool
+valid_value_p (tree expr)
+{
+ if (TREE_CODE (expr) == SSA_NAME)
+ return true;
+
+ if (TREE_CODE (expr) == PLUS_EXPR
+ || TREE_CODE (expr) == MINUS_EXPR)
+ return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
+ && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
+
+ return is_gimple_min_invariant (expr);
+}
+
+/* If OP has a value range with a single constant value return that,
+ otherwise return NULL_TREE. This returns OP itself if OP is a
+ constant. */
+
+tree
+vr_values::op_with_constant_singleton_value_range (tree op)
+{
+ if (is_gimple_min_invariant (op))
+ return op;
+
+ if (TREE_CODE (op) != SSA_NAME)
+ return NULL_TREE;
+
+ return value_range_constant_singleton (get_value_range (op));
+}
+
+/* Return true if op is in a boolean [0, 1] value-range. */
+
+bool
+vr_values::op_with_boolean_value_range_p (tree op)
+{
+ value_range *vr;
+
+ if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
+ return true;
+
+ if (integer_zerop (op)
+ || integer_onep (op))
+ return true;
+
+ if (TREE_CODE (op) != SSA_NAME)
+ return false;
+
+ vr = get_value_range (op);
+ return (vr->type == VR_RANGE
+ && integer_zerop (vr->min)
+ && integer_onep (vr->max));
+}
+
+/* Extract value range information for VAR when (OP COND_CODE LIMIT) is
+ true and store it in *VR_P. */
+
+void
+vr_values::extract_range_for_var_from_comparison_expr (tree var,
+ enum tree_code cond_code,
+ tree op, tree limit,
+ value_range *vr_p)
+{
+ tree min, max, type;
+ value_range *limit_vr;
+ type = TREE_TYPE (var);
+ gcc_assert (limit != var);
+
+ /* For pointer arithmetic, we only keep track of pointer equality
+ and inequality. */
+ if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
+ {
+ set_value_range_to_varying (vr_p);
+ return;
+ }
+
+ /* If LIMIT is another SSA name and LIMIT has a range of its own,
+ try to use LIMIT's range to avoid creating symbolic ranges
+ unnecessarily. */
+ limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
+
+ /* LIMIT's range is only interesting if it has any useful information. */
+ if (! limit_vr
+ || limit_vr->type == VR_UNDEFINED
+ || limit_vr->type == VR_VARYING
+ || (symbolic_range_p (limit_vr)
+ && ! (limit_vr->type == VR_RANGE
+ && (limit_vr->min == limit_vr->max
+ || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
+ limit_vr = NULL;
+
+ /* Initially, the new range has the same set of equivalences of
+ VAR's range. This will be revised before returning the final
+ value. Since assertions may be chained via mutually exclusive
+ predicates, we will need to trim the set of equivalences before
+ we are done. */
+ gcc_assert (vr_p->equiv == NULL);
+ add_equivalence (&vr_p->equiv, var);
+
+ /* Extract a new range based on the asserted comparison for VAR and
+ LIMIT's value range. Notice that if LIMIT has an anti-range, we
+ will only use it for equality comparisons (EQ_EXPR). For any
+ other kind of assertion, we cannot derive a range from LIMIT's
+ anti-range that can be used to describe the new range. For
+ instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
+ then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
+ no single range for x_2 that could describe LE_EXPR, so we might
+ as well build the range [b_4, +INF] for it.
+ One special case we handle is extracting a range from a
+ range test encoded as (unsigned)var + CST <= limit. */
+ if (TREE_CODE (op) == NOP_EXPR
+ || TREE_CODE (op) == PLUS_EXPR)
+ {
+ if (TREE_CODE (op) == PLUS_EXPR)
+ {
+ min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
+ TREE_OPERAND (op, 1));
+ max = int_const_binop (PLUS_EXPR, limit, min);
+ op = TREE_OPERAND (op, 0);
+ }
+ else
+ {
+ min = build_int_cst (TREE_TYPE (var), 0);
+ max = limit;
+ }
+
+ /* Make sure to not set TREE_OVERFLOW on the final type
+ conversion. We are willingly interpreting large positive
+ unsigned values as negative signed values here. */
+ min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
+ max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
+
+ /* We can transform a max, min range to an anti-range or
+ vice-versa. Use set_and_canonicalize_value_range which does
+ this for us. */
+ if (cond_code == LE_EXPR)
+ set_and_canonicalize_value_range (vr_p, VR_RANGE,
+ min, max, vr_p->equiv);
+ else if (cond_code == GT_EXPR)
+ set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
+ min, max, vr_p->equiv);
+ else
+ gcc_unreachable ();
+ }
+ else if (cond_code == EQ_EXPR)
+ {
+ enum value_range_type range_type;
+
+ if (limit_vr)
+ {
+ range_type = limit_vr->type;
+ min = limit_vr->min;
+ max = limit_vr->max;
+ }
+ else
+ {
+ range_type = VR_RANGE;
+ min = limit;
+ max = limit;
+ }
+
+ set_value_range (vr_p, range_type, min, max, vr_p->equiv);
+
+ /* When asserting the equality VAR == LIMIT and LIMIT is another
+ SSA name, the new range will also inherit the equivalence set
+ from LIMIT. */
+ if (TREE_CODE (limit) == SSA_NAME)
+ add_equivalence (&vr_p->equiv, limit);
+ }
+ else if (cond_code == NE_EXPR)
+ {
+ /* As described above, when LIMIT's range is an anti-range and
+ this assertion is an inequality (NE_EXPR), then we cannot
+ derive anything from the anti-range. For instance, if
+ LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
+ not imply that VAR's range is [0, 0]. So, in the case of
+ anti-ranges, we just assert the inequality using LIMIT and
+ not its anti-range.
+
+ If LIMIT_VR is a range, we can only use it to build a new
+ anti-range if LIMIT_VR is a single-valued range. For
+ instance, if LIMIT_VR is [0, 1], the predicate
+ VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
+ Rather, it means that for value 0 VAR should be ~[0, 0]
+ and for value 1, VAR should be ~[1, 1]. We cannot
+ represent these ranges.
+
+ The only situation in which we can build a valid
+ anti-range is when LIMIT_VR is a single-valued range
+ (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
+ build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
+ if (limit_vr
+ && limit_vr->type == VR_RANGE
+ && compare_values (limit_vr->min, limit_vr->max) == 0)
+ {
+ min = limit_vr->min;
+ max = limit_vr->max;
+ }
+ else
+ {
+ /* In any other case, we cannot use LIMIT's range to build a
+ valid anti-range. */
+ min = max = limit;
+ }
+
+ /* If MIN and MAX cover the whole range for their type, then
+ just use the original LIMIT. */
+ if (INTEGRAL_TYPE_P (type)
+ && vrp_val_is_min (min)
+ && vrp_val_is_max (max))
+ min = max = limit;
+
+ set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
+ min, max, vr_p->equiv);
+ }
+ else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
+ {
+ min = TYPE_MIN_VALUE (type);
+
+ if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
+ max = limit;
+ else
+ {
+ /* If LIMIT_VR is of the form [N1, N2], we need to build the
+ range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
+ LT_EXPR. */
+ max = limit_vr->max;
+ }
+
+ /* If the maximum value forces us to be out of bounds, simply punt.
+ It would be pointless to try and do anything more since this
+ all should be optimized away above us. */
+ if (cond_code == LT_EXPR
+ && compare_values (max, min) == 0)
+ set_value_range_to_varying (vr_p);
+ else
+ {
+ /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
+ if (cond_code == LT_EXPR)
+ {
+ if (TYPE_PRECISION (TREE_TYPE (max)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (max)))
+ max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
+ build_int_cst (TREE_TYPE (max), -1));
+ else
+ max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
+ build_int_cst (TREE_TYPE (max), 1));
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (max))
+ TREE_NO_WARNING (max) = 1;
+ }
+
+ set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
+ }
+ }
+ else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+ {
+ max = TYPE_MAX_VALUE (type);
+
+ if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
+ min = limit;
+ else
+ {
+ /* If LIMIT_VR is of the form [N1, N2], we need to build the
+ range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
+ GT_EXPR. */
+ min = limit_vr->min;
+ }
+
+ /* If the minimum value forces us to be out of bounds, simply punt.
+ It would be pointless to try and do anything more since this
+ all should be optimized away above us. */
+ if (cond_code == GT_EXPR
+ && compare_values (min, max) == 0)
+ set_value_range_to_varying (vr_p);
+ else
+ {
+ /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
+ if (cond_code == GT_EXPR)
+ {
+ if (TYPE_PRECISION (TREE_TYPE (min)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (min)))
+ min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
+ build_int_cst (TREE_TYPE (min), -1));
+ else
+ min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
+ build_int_cst (TREE_TYPE (min), 1));
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (min))
+ TREE_NO_WARNING (min) = 1;
+ }
+
+ set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
+ }
+ }
+ else
+ gcc_unreachable ();
+
+ /* Finally intersect the new range with what we already know about var. */
+ vrp_intersect_ranges (vr_p, get_value_range (var));
+}
+
+/* Extract value range information from an ASSERT_EXPR EXPR and store
+ it in *VR_P. */
+
+void
+vr_values::extract_range_from_assert (value_range *vr_p, tree expr)
+{
+ tree var = ASSERT_EXPR_VAR (expr);
+ tree cond = ASSERT_EXPR_COND (expr);
+ tree limit, op;
+ enum tree_code cond_code;
+ gcc_assert (COMPARISON_CLASS_P (cond));
+
+ /* Find VAR in the ASSERT_EXPR conditional. */
+ if (var == TREE_OPERAND (cond, 0)
+ || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
+ || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
+ {
+ /* If the predicate is of the form VAR COMP LIMIT, then we just
+ take LIMIT from the RHS and use the same comparison code. */
+ cond_code = TREE_CODE (cond);
+ limit = TREE_OPERAND (cond, 1);
+ op = TREE_OPERAND (cond, 0);
+ }
+ else
+ {
+ /* If the predicate is of the form LIMIT COMP VAR, then we need
+ to flip around the comparison code to create the proper range
+ for VAR. */
+ cond_code = swap_tree_comparison (TREE_CODE (cond));
+ limit = TREE_OPERAND (cond, 0);
+ op = TREE_OPERAND (cond, 1);
+ }
+ extract_range_for_var_from_comparison_expr (var, cond_code, op,
+ limit, vr_p);
+}
+
+/* Extract range information from SSA name VAR and store it in VR. If
+ VAR has an interesting range, use it. Otherwise, create the
+ range [VAR, VAR] and return it. This is useful in situations where
+ we may have conditionals testing values of VARYING names. For
+ instance,
+
+ x_3 = y_5;
+ if (x_3 > y_5)
+ ...
+
+ Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
+ always false. */
+
+void
+vr_values::extract_range_from_ssa_name (value_range *vr, tree var)
+{
+ value_range *var_vr = get_value_range (var);
+
+ if (var_vr->type != VR_VARYING)
+ copy_value_range (vr, var_vr);
+ else
+ set_value_range (vr, VR_RANGE, var, var, NULL);
+
+ add_equivalence (&vr->equiv, var);
+}
+
+/* Extract range information from a binary expression OP0 CODE OP1 based on
+ the ranges of each of its operands with resulting type EXPR_TYPE.
+ The resulting range is stored in *VR. */
+
+void
+vr_values::extract_range_from_binary_expr (value_range *vr,
+ enum tree_code code,
+ tree expr_type, tree op0, tree op1)
+{
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+
+ /* Get value ranges for each operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
+
+ /* Try harder for PLUS and MINUS if the range of one operand is symbolic
+ and based on the other operand, for example if it was deduced from a
+ symbolic comparison. When a bound of the range of the first operand
+ is invariant, we set the corresponding bound of the new range to INF
+ in order to avoid recursing on the range of the second operand. */
+ if (vr->type == VR_VARYING
+ && (code == PLUS_EXPR || code == MINUS_EXPR)
+ && TREE_CODE (op1) == SSA_NAME
+ && vr0.type == VR_RANGE
+ && symbolic_range_based_on_p (&vr0, op1))
+ {
+ const bool minus_p = (code == MINUS_EXPR);
+ value_range n_vr1 = VR_INITIALIZER;
+
+ /* Try with VR0 and [-INF, OP1]. */
+ if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
+ set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
+
+ /* Try with VR0 and [OP1, +INF]. */
+ else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
+ set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
+
+ /* Try with VR0 and [OP1, OP1]. */
+ else
+ set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
+
+ extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
+ }
+
+ if (vr->type == VR_VARYING
+ && (code == PLUS_EXPR || code == MINUS_EXPR)
+ && TREE_CODE (op0) == SSA_NAME
+ && vr1.type == VR_RANGE
+ && symbolic_range_based_on_p (&vr1, op0))
+ {
+ const bool minus_p = (code == MINUS_EXPR);
+ value_range n_vr0 = VR_INITIALIZER;
+
+ /* Try with [-INF, OP0] and VR1. */
+ if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
+ set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
+
+ /* Try with [OP0, +INF] and VR1. */
+ else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
+ set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
+
+ /* Try with [OP0, OP0] and VR1. */
+ else
+ set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
+
+ extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
+ }
+
+ /* If we didn't derive a range for MINUS_EXPR, and
+ op1's range is ~[op0,op0] or vice-versa, then we
+ can derive a non-null range. This happens often for
+ pointer subtraction. */
+ if (vr->type == VR_VARYING
+ && code == MINUS_EXPR
+ && TREE_CODE (op0) == SSA_NAME
+ && ((vr0.type == VR_ANTI_RANGE
+ && vr0.min == op1
+ && vr0.min == vr0.max)
+ || (vr1.type == VR_ANTI_RANGE
+ && vr1.min == op0
+ && vr1.min == vr1.max)))
+ set_value_range_to_nonnull (vr, TREE_TYPE (op0));
+}
+
+/* Extract range information from a unary expression CODE OP0 based on
+ the range of its operand with resulting type TYPE.
+ The resulting range is stored in *VR. */
+
+void
+vr_values::extract_range_from_unary_expr (value_range *vr, enum tree_code code,
+ tree type, tree op0)
+{
+ value_range vr0 = VR_INITIALIZER;
+
+ /* Get value ranges for the operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ ::extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
+}
+
+
+/* Extract range information from a conditional expression STMT based on
+ the ranges of each of its operands and the expression code. */
+
+void
+vr_values::extract_range_from_cond_expr (value_range *vr, gassign *stmt)
+{
+ tree op0, op1;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+
+ /* Get value ranges for each operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ op0 = gimple_assign_rhs2 (stmt);
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ op1 = gimple_assign_rhs3 (stmt);
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ /* The resulting value range is the union of the operand ranges */
+ copy_value_range (vr, &vr0);
+ vrp_meet (vr, &vr1);
+}
+
+
+/* Extract range information from a comparison expression EXPR based
+ on the range of its operand and the expression code. */
+
+void
+vr_values::extract_range_from_comparison (value_range *vr, enum tree_code code,
+ tree type, tree op0, tree op1)
+{
+ bool sop;
+ tree val;
+
+ val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
+ NULL);
+ if (val)
+ {
+ /* Since this expression was found on the RHS of an assignment,
+ its type may be different from _Bool. Convert VAL to EXPR's
+ type. */
+ val = fold_convert (type, val);
+ if (is_gimple_min_invariant (val))
+ set_value_range_to_value (vr, val, vr->equiv);
+ else
+ set_value_range (vr, VR_RANGE, val, val, vr->equiv);
+ }
+ else
+ /* The result of a comparison is always true or false. */
+ set_value_range_to_truthvalue (vr, type);
+}
+
+/* Helper function for simplify_internal_call_using_ranges and
+ extract_range_basic. Return true if OP0 SUBCODE OP1 for
+ SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
+ always overflow. Set *OVF to true if it is known to always
+ overflow. */
+
+bool
+vr_values::check_for_binary_op_overflow (enum tree_code subcode, tree type,
+ tree op0, tree op1, bool *ovf)
+{
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *get_value_range (op0);
+ else if (TREE_CODE (op0) == INTEGER_CST)
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *get_value_range (op1);
+ else if (TREE_CODE (op1) == INTEGER_CST)
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ if (!range_int_cst_p (&vr0)
+ || TREE_OVERFLOW (vr0.min)
+ || TREE_OVERFLOW (vr0.max))
+ {
+ vr0.min = vrp_val_min (TREE_TYPE (op0));
+ vr0.max = vrp_val_max (TREE_TYPE (op0));
+ }
+ if (!range_int_cst_p (&vr1)
+ || TREE_OVERFLOW (vr1.min)
+ || TREE_OVERFLOW (vr1.max))
+ {
+ vr1.min = vrp_val_min (TREE_TYPE (op1));
+ vr1.max = vrp_val_max (TREE_TYPE (op1));
+ }
+ *ovf = arith_overflowed_p (subcode, type, vr0.min,
+ subcode == MINUS_EXPR ? vr1.max : vr1.min);
+ if (arith_overflowed_p (subcode, type, vr0.max,
+ subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
+ return false;
+ if (subcode == MULT_EXPR)
+ {
+ if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
+ || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
+ return false;
+ }
+ if (*ovf)
+ {
+ /* So far we found that there is an overflow on the boundaries.
+ That doesn't prove that there is an overflow even for all values
+ in between the boundaries. For that compute widest_int range
+ of the result and see if it doesn't overlap the range of
+ type. */
+ widest_int wmin, wmax;
+ widest_int w[4];
+ int i;
+ w[0] = wi::to_widest (vr0.min);
+ w[1] = wi::to_widest (vr0.max);
+ w[2] = wi::to_widest (vr1.min);
+ w[3] = wi::to_widest (vr1.max);
+ for (i = 0; i < 4; i++)
+ {
+ widest_int wt;
+ switch (subcode)
+ {
+ case PLUS_EXPR:
+ wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
+ break;
+ case MINUS_EXPR:
+ wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
+ break;
+ case MULT_EXPR:
+ wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (i == 0)
+ {
+ wmin = wt;
+ wmax = wt;
+ }
+ else
+ {
+ wmin = wi::smin (wmin, wt);
+ wmax = wi::smax (wmax, wt);
+ }
+ }
+ /* The result of op0 CODE op1 is known to be in range
+ [wmin, wmax]. */
+ widest_int wtmin = wi::to_widest (vrp_val_min (type));
+ widest_int wtmax = wi::to_widest (vrp_val_max (type));
+ /* If all values in [wmin, wmax] are smaller than
+ [wtmin, wtmax] or all are larger than [wtmin, wtmax],
+ the arithmetic operation will always overflow. */
+ if (wmax < wtmin || wmin > wtmax)
+ return true;
+ return false;
+ }
+ return true;
+}
+
+/* Try to derive a nonnegative or nonzero range out of STMT relying
+ primarily on generic routines in fold in conjunction with range data.
+ Store the result in *VR */
+
+void
+vr_values::extract_range_basic (value_range *vr, gimple *stmt)
+{
+ bool sop;
+ tree type = gimple_expr_type (stmt);
+
+ if (is_gimple_call (stmt))
+ {
+ tree arg;
+ int mini, maxi, zerov = 0, prec;
+ enum tree_code subcode = ERROR_MARK;
+ combined_fn cfn = gimple_call_combined_fn (stmt);
+ scalar_int_mode mode;
+
+ switch (cfn)
+ {
+ case CFN_BUILT_IN_CONSTANT_P:
+ /* If the call is __builtin_constant_p and the argument is a
+ function parameter resolve it to false. This avoids bogus
+ array bound warnings.
+ ??? We could do this as early as inlining is finished. */
+ arg = gimple_call_arg (stmt, 0);
+ if (TREE_CODE (arg) == SSA_NAME
+ && SSA_NAME_IS_DEFAULT_DEF (arg)
+ && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
+ && cfun->after_inlining)
+ {
+ set_value_range_to_null (vr, type);
+ return;
+ }
+ break;
+ /* Both __builtin_ffs* and __builtin_popcount return
+ [0, prec]. */
+ CASE_CFN_FFS:
+ CASE_CFN_POPCOUNT:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec;
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ value_range *vr0 = get_value_range (arg);
+ /* If arg is non-zero, then ffs or popcount
+ are non-zero. */
+ if ((vr0->type == VR_RANGE
+ && range_includes_zero_p (vr0->min, vr0->max) == 0)
+ || (vr0->type == VR_ANTI_RANGE
+ && range_includes_zero_p (vr0->min, vr0->max) == 1))
+ mini = 1;
+ /* If some high bits are known to be zero,
+ we can decrease the maximum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->max) == INTEGER_CST
+ && !operand_less_p (vr0->min,
+ build_zero_cst (TREE_TYPE (vr0->min))))
+ maxi = tree_floor_log2 (vr0->max) + 1;
+ }
+ goto bitop_builtin;
+ /* __builtin_parity* returns [0, 1]. */
+ CASE_CFN_PARITY:
+ mini = 0;
+ maxi = 1;
+ goto bitop_builtin;
+ /* __builtin_c[lt]z* return [0, prec-1], except for
+ when the argument is 0, but that is undefined behavior.
+ On many targets where the CLZ RTL or optab value is defined
+ for 0 the value is prec, so include that in the range
+ by default. */
+ CASE_CFN_CLZ:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec;
+ mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
+ if (optab_handler (clz_optab, mode) != CODE_FOR_nothing
+ && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov)
+ /* Handle only the single common value. */
+ && zerov != prec)
+ /* Magic value to give up, unless vr0 proves
+ arg is non-zero. */
+ mini = -2;
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ value_range *vr0 = get_value_range (arg);
+ /* From clz of VR_RANGE minimum we can compute
+ result maximum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->min) == INTEGER_CST)
+ {
+ maxi = prec - 1 - tree_floor_log2 (vr0->min);
+ if (maxi != prec)
+ mini = 0;
+ }
+ else if (vr0->type == VR_ANTI_RANGE
+ && integer_zerop (vr0->min))
+ {
+ maxi = prec - 1;
+ mini = 0;
+ }
+ if (mini == -2)
+ break;
+ /* From clz of VR_RANGE maximum we can compute
+ result minimum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->max) == INTEGER_CST)
+ {
+ mini = prec - 1 - tree_floor_log2 (vr0->max);
+ if (mini == prec)
+ break;
+ }
+ }
+ if (mini == -2)
+ break;
+ goto bitop_builtin;
+ /* __builtin_ctz* return [0, prec-1], except for
+ when the argument is 0, but that is undefined behavior.
+ If there is a ctz optab for this mode and
+ CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
+ otherwise just assume 0 won't be seen. */
+ CASE_CFN_CTZ:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec - 1;
+ mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
+ if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing
+ && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov))
+ {
+ /* Handle only the two common values. */
+ if (zerov == -1)
+ mini = -1;
+ else if (zerov == prec)
+ maxi = prec;
+ else
+ /* Magic value to give up, unless vr0 proves
+ arg is non-zero. */
+ mini = -2;
+ }
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ value_range *vr0 = get_value_range (arg);
+ /* If arg is non-zero, then use [0, prec - 1]. */
+ if ((vr0->type == VR_RANGE
+ && integer_nonzerop (vr0->min))
+ || (vr0->type == VR_ANTI_RANGE
+ && integer_zerop (vr0->min)))
+ {
+ mini = 0;
+ maxi = prec - 1;
+ }
+ /* If some high bits are known to be zero,
+ we can decrease the result maximum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->max) == INTEGER_CST)
+ {
+ maxi = tree_floor_log2 (vr0->max);
+ /* For vr0 [0, 0] give up. */
+ if (maxi == -1)
+ break;
+ }
+ }
+ if (mini == -2)
+ break;
+ goto bitop_builtin;
+ /* __builtin_clrsb* returns [0, prec-1]. */
+ CASE_CFN_CLRSB:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec - 1;
+ goto bitop_builtin;
+ bitop_builtin:
+ set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
+ build_int_cst (type, maxi), NULL);
+ return;
+ case CFN_UBSAN_CHECK_ADD:
+ subcode = PLUS_EXPR;
+ break;
+ case CFN_UBSAN_CHECK_SUB:
+ subcode = MINUS_EXPR;
+ break;
+ case CFN_UBSAN_CHECK_MUL:
+ subcode = MULT_EXPR;
+ break;
+ case CFN_GOACC_DIM_SIZE:
+ case CFN_GOACC_DIM_POS:
+ /* Optimizing these two internal functions helps the loop
+ optimizer eliminate outer comparisons. Size is [1,N]
+ and pos is [0,N-1]. */
+ {
+ bool is_pos = cfn == CFN_GOACC_DIM_POS;
+ int axis = oacc_get_ifn_dim_arg (stmt);
+ int size = oacc_get_fn_dim_size (current_function_decl, axis);
+
+ if (!size)
+ /* If it's dynamic, the backend might know a hardware
+ limitation. */
+ size = targetm.goacc.dim_limit (axis);
+
+ tree type = TREE_TYPE (gimple_call_lhs (stmt));
+ set_value_range (vr, VR_RANGE,
+ build_int_cst (type, is_pos ? 0 : 1),
+ size ? build_int_cst (type, size - is_pos)
+ : vrp_val_max (type), NULL);
+ }
+ return;
+ case CFN_BUILT_IN_STRLEN:
+ if (tree lhs = gimple_call_lhs (stmt))
+ if (ptrdiff_type_node
+ && (TYPE_PRECISION (ptrdiff_type_node)
+ == TYPE_PRECISION (TREE_TYPE (lhs))))
+ {
+ tree type = TREE_TYPE (lhs);
+ tree max = vrp_val_max (ptrdiff_type_node);
+ wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
+ tree range_min = build_zero_cst (type);
+ tree range_max = wide_int_to_tree (type, wmax - 1);
+ set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ if (subcode != ERROR_MARK)
+ {
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, we'll complain, but will actually do
+ wrapping operation. */
+ flag_wrapv = 1;
+ extract_range_from_binary_expr (vr, subcode, type,
+ gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 1));
+ flag_wrapv = saved_flag_wrapv;
+
+ /* If for both arguments vrp_valueize returned non-NULL,
+ this should have been already folded and if not, it
+ wasn't folded because of overflow. Avoid removing the
+ UBSAN_CHECK_* calls in that case. */
+ if (vr->type == VR_RANGE
+ && (vr->min == vr->max
+ || operand_equal_p (vr->min, vr->max, 0)))
+ set_value_range_to_varying (vr);
+ return;
+ }
+ }
+ /* Handle extraction of the two results (result of arithmetics and
+ a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
+ internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
+ else if (is_gimple_assign (stmt)
+ && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
+ || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
+ && INTEGRAL_TYPE_P (type))
+ {
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ tree op = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
+ {
+ gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
+ if (is_gimple_call (g) && gimple_call_internal_p (g))
+ {
+ enum tree_code subcode = ERROR_MARK;
+ switch (gimple_call_internal_fn (g))
+ {
+ case IFN_ADD_OVERFLOW:
+ subcode = PLUS_EXPR;
+ break;
+ case IFN_SUB_OVERFLOW:
+ subcode = MINUS_EXPR;
+ break;
+ case IFN_MUL_OVERFLOW:
+ subcode = MULT_EXPR;
+ break;
+ case IFN_ATOMIC_COMPARE_EXCHANGE:
+ if (code == IMAGPART_EXPR)
+ {
+ /* This is the boolean return value whether compare and
+ exchange changed anything or not. */
+ set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
+ build_int_cst (type, 1), NULL);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ if (subcode != ERROR_MARK)
+ {
+ tree op0 = gimple_call_arg (g, 0);
+ tree op1 = gimple_call_arg (g, 1);
+ if (code == IMAGPART_EXPR)
+ {
+ bool ovf = false;
+ if (check_for_binary_op_overflow (subcode, type,
+ op0, op1, &ovf))
+ set_value_range_to_value (vr,
+ build_int_cst (type, ovf),
+ NULL);
+ else if (TYPE_PRECISION (type) == 1
+ && !TYPE_UNSIGNED (type))
+ set_value_range_to_varying (vr);
+ else
+ set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
+ build_int_cst (type, 1), NULL);
+ }
+ else if (types_compatible_p (type, TREE_TYPE (op0))
+ && types_compatible_p (type, TREE_TYPE (op1)))
+ {
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, IMAGPART_EXPR will be set. */
+ flag_wrapv = 1;
+ extract_range_from_binary_expr (vr, subcode, type,
+ op0, op1);
+ flag_wrapv = saved_flag_wrapv;
+ }
+ else
+ {
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, IMAGPART_EXPR will be set. */
+ flag_wrapv = 1;
+ extract_range_from_unary_expr (&vr0, NOP_EXPR,
+ type, op0);
+ extract_range_from_unary_expr (&vr1, NOP_EXPR,
+ type, op1);
+ extract_range_from_binary_expr_1 (vr, subcode, type,
+ &vr0, &vr1);
+ flag_wrapv = saved_flag_wrapv;
+ }
+ return;
+ }
+ }
+ }
+ }
+ if (INTEGRAL_TYPE_P (type)
+ && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
+ set_value_range_to_nonnegative (vr, type);
+ else if (vrp_stmt_computes_nonzero (stmt))
+ set_value_range_to_nonnull (vr, type);
+ else
+ set_value_range_to_varying (vr);
+}
+
+
+/* Try to compute a useful range out of assignment STMT and store it
+ in *VR. */
+
+void
+vr_values::extract_range_from_assignment (value_range *vr, gassign *stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+
+ if (code == ASSERT_EXPR)
+ extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
+ else if (code == SSA_NAME)
+ extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
+ else if (TREE_CODE_CLASS (code) == tcc_binary)
+ extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt));
+ else if (TREE_CODE_CLASS (code) == tcc_unary)
+ extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt));
+ else if (code == COND_EXPR)
+ extract_range_from_cond_expr (vr, stmt);
+ else if (TREE_CODE_CLASS (code) == tcc_comparison)
+ extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt));
+ else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
+ && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
+ set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
+ else
+ set_value_range_to_varying (vr);
+
+ if (vr->type == VR_VARYING)
+ extract_range_basic (vr, stmt);
+}
+
+/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
+
+ - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
+ all the values in the ranges.
+
+ - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
+
+ - Return NULL_TREE if it is not always possible to determine the
+ value of the comparison.
+
+ Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
+ assumed signed overflow is undefined. */
+
+
+static tree
+compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
+ bool *strict_overflow_p)
+{
+ /* VARYING or UNDEFINED ranges cannot be compared. */
+ if (vr0->type == VR_VARYING
+ || vr0->type == VR_UNDEFINED
+ || vr1->type == VR_VARYING
+ || vr1->type == VR_UNDEFINED)
+ return NULL_TREE;
+
+ /* Anti-ranges need to be handled separately. */
+ if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
+ {
+ /* If both are anti-ranges, then we cannot compute any
+ comparison. */
+ if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
+ return NULL_TREE;
+
+ /* These comparisons are never statically computable. */
+ if (comp == GT_EXPR
+ || comp == GE_EXPR
+ || comp == LT_EXPR
+ || comp == LE_EXPR)
+ return NULL_TREE;
+
+ /* Equality can be computed only between a range and an
+ anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
+ if (vr0->type == VR_RANGE)
+ {
+ /* To simplify processing, make VR0 the anti-range. */
+ value_range *tmp = vr0;
+ vr0 = vr1;
+ vr1 = tmp;
+ }
+
+ gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
+
+ if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
+ && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
+ return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+
+ return NULL_TREE;
+ }
+
+ /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
+ operands around and change the comparison code. */
+ if (comp == GT_EXPR || comp == GE_EXPR)
+ {
+ comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
+ std::swap (vr0, vr1);
+ }
+
+ if (comp == EQ_EXPR)
+ {
+ /* Equality may only be computed if both ranges represent
+ exactly one value. */
+ if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
+ && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
+ {
+ int cmp_min = compare_values_warnv (vr0->min, vr1->min,
+ strict_overflow_p);
+ int cmp_max = compare_values_warnv (vr0->max, vr1->max,
+ strict_overflow_p);
+ if (cmp_min == 0 && cmp_max == 0)
+ return boolean_true_node;
+ else if (cmp_min != -2 && cmp_max != -2)
+ return boolean_false_node;
+ }
+ /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
+ else if (compare_values_warnv (vr0->min, vr1->max,
+ strict_overflow_p) == 1
+ || compare_values_warnv (vr1->min, vr0->max,
+ strict_overflow_p) == 1)
+ return boolean_false_node;
+
+ return NULL_TREE;
+ }
+ else if (comp == NE_EXPR)
+ {
+ int cmp1, cmp2;
+
+ /* If VR0 is completely to the left or completely to the right
+ of VR1, they are always different. Notice that we need to
+ make sure that both comparisons yield similar results to
+ avoid comparing values that cannot be compared at
+ compile-time. */
+ cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
+ cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
+ if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
+ return boolean_true_node;
+
+ /* If VR0 and VR1 represent a single value and are identical,
+ return false. */
+ else if (compare_values_warnv (vr0->min, vr0->max,
+ strict_overflow_p) == 0
+ && compare_values_warnv (vr1->min, vr1->max,
+ strict_overflow_p) == 0
+ && compare_values_warnv (vr0->min, vr1->min,
+ strict_overflow_p) == 0
+ && compare_values_warnv (vr0->max, vr1->max,
+ strict_overflow_p) == 0)
+ return boolean_false_node;
+
+ /* Otherwise, they may or may not be different. */
+ else
+ return NULL_TREE;
+ }
+ else if (comp == LT_EXPR || comp == LE_EXPR)
+ {
+ int tst;
+
+ /* If VR0 is to the left of VR1, return true. */
+ tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
+ if ((comp == LT_EXPR && tst == -1)
+ || (comp == LE_EXPR && (tst == -1 || tst == 0)))
+ return boolean_true_node;
+
+ /* If VR0 is to the right of VR1, return false. */
+ tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
+ if ((comp == LT_EXPR && (tst == 0 || tst == 1))
+ || (comp == LE_EXPR && tst == 1))
+ return boolean_false_node;
+
+ /* Otherwise, we don't know. */
+ return NULL_TREE;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Given a value range VR, a value VAL and a comparison code COMP, return
+ BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
+ values in VR. Return BOOLEAN_FALSE_NODE if the comparison
+ always returns false. Return NULL_TREE if it is not always
+ possible to determine the value of the comparison. Also set
+ *STRICT_OVERFLOW_P to indicate whether comparision evaluation
+ assumed signed overflow is undefined. */
+
+static tree
+compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
+ bool *strict_overflow_p)
+{
+ if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+ return NULL_TREE;
+
+ /* Anti-ranges need to be handled separately. */
+ if (vr->type == VR_ANTI_RANGE)
+ {
+ /* For anti-ranges, the only predicates that we can compute at
+ compile time are equality and inequality. */
+ if (comp == GT_EXPR
+ || comp == GE_EXPR
+ || comp == LT_EXPR
+ || comp == LE_EXPR)
+ return NULL_TREE;
+
+ /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
+ if (value_inside_range (val, vr->min, vr->max) == 1)
+ return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+
+ return NULL_TREE;
+ }
+
+ if (comp == EQ_EXPR)
+ {
+ /* EQ_EXPR may only be computed if VR represents exactly
+ one value. */
+ if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
+ {
+ int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
+ if (cmp == 0)
+ return boolean_true_node;
+ else if (cmp == -1 || cmp == 1 || cmp == 2)
+ return boolean_false_node;
+ }
+ else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
+ || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
+ return boolean_false_node;
+
+ return NULL_TREE;
+ }
+ else if (comp == NE_EXPR)
+ {
+ /* If VAL is not inside VR, then they are always different. */
+ if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
+ || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
+ return boolean_true_node;
+
+ /* If VR represents exactly one value equal to VAL, then return
+ false. */
+ if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
+ && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
+ return boolean_false_node;
+
+ /* Otherwise, they may or may not be different. */
+ return NULL_TREE;
+ }
+ else if (comp == LT_EXPR || comp == LE_EXPR)
+ {
+ int tst;
+
+ /* If VR is to the left of VAL, return true. */
+ tst = compare_values_warnv (vr->max, val, strict_overflow_p);
+ if ((comp == LT_EXPR && tst == -1)
+ || (comp == LE_EXPR && (tst == -1 || tst == 0)))
+ return boolean_true_node;
+
+ /* If VR is to the right of VAL, return false. */
+ tst = compare_values_warnv (vr->min, val, strict_overflow_p);
+ if ((comp == LT_EXPR && (tst == 0 || tst == 1))
+ || (comp == LE_EXPR && tst == 1))
+ return boolean_false_node;
+
+ /* Otherwise, we don't know. */
+ return NULL_TREE;
+ }
+ else if (comp == GT_EXPR || comp == GE_EXPR)
+ {
+ int tst;
+
+ /* If VR is to the right of VAL, return true. */
+ tst = compare_values_warnv (vr->min, val, strict_overflow_p);
+ if ((comp == GT_EXPR && tst == 1)
+ || (comp == GE_EXPR && (tst == 0 || tst == 1)))
+ return boolean_true_node;
+
+ /* If VR is to the left of VAL, return false. */
+ tst = compare_values_warnv (vr->max, val, strict_overflow_p);
+ if ((comp == GT_EXPR && (tst == -1 || tst == 0))
+ || (comp == GE_EXPR && tst == -1))
+ return boolean_false_node;
+
+ /* Otherwise, we don't know. */
+ return NULL_TREE;
+ }
+
+ gcc_unreachable ();
+}
+/* Given a range VR, a LOOP and a variable VAR, determine whether it
+ would be profitable to adjust VR using scalar evolution information
+ for VAR. If so, update VR with the new limits. */
+
+void
+vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
+ gimple *stmt, tree var)
+{
+ tree init, step, chrec, tmin, tmax, min, max, type, tem;
+ enum ev_direction dir;
+
+ /* TODO. Don't adjust anti-ranges. An anti-range may provide
+ better opportunities than a regular range, but I'm not sure. */
+ if (vr->type == VR_ANTI_RANGE)
+ return;
+
+ chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
+
+ /* Like in PR19590, scev can return a constant function. */
+ if (is_gimple_min_invariant (chrec))
+ {
+ set_value_range_to_value (vr, chrec, vr->equiv);
+ return;
+ }
+
+ if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
+ return;
+
+ init = initial_condition_in_loop_num (chrec, loop->num);
+ tem = op_with_constant_singleton_value_range (init);
+ if (tem)
+ init = tem;
+ step = evolution_part_in_loop_num (chrec, loop->num);
+ tem = op_with_constant_singleton_value_range (step);
+ if (tem)
+ step = tem;
+
+ /* If STEP is symbolic, we can't know whether INIT will be the
+ minimum or maximum value in the range. Also, unless INIT is
+ a simple expression, compare_values and possibly other functions
+ in tree-vrp won't be able to handle it. */
+ if (step == NULL_TREE
+ || !is_gimple_min_invariant (step)
+ || !valid_value_p (init))
+ return;
+
+ dir = scev_direction (chrec);
+ if (/* Do not adjust ranges if we do not know whether the iv increases
+ or decreases, ... */
+ dir == EV_DIR_UNKNOWN
+ /* ... or if it may wrap. */
+ || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
+ get_chrec_loop (chrec), true))
+ return;
+
+ type = TREE_TYPE (var);
+ if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
+ tmin = lower_bound_in_type (type, type);
+ else
+ tmin = TYPE_MIN_VALUE (type);
+ if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
+ tmax = upper_bound_in_type (type, type);
+ else
+ tmax = TYPE_MAX_VALUE (type);
+
+ /* Try to use estimated number of iterations for the loop to constrain the
+ final value in the evolution. */
+ if (TREE_CODE (step) == INTEGER_CST
+ && is_gimple_val (init)
+ && (TREE_CODE (init) != SSA_NAME
+ || get_value_range (init)->type == VR_RANGE))
+ {
+ widest_int nit;
+
+ /* We are only entering here for loop header PHI nodes, so using
+ the number of latch executions is the correct thing to use. */
+ if (max_loop_iterations (loop, &nit))
+ {
+ value_range maxvr = VR_INITIALIZER;
+ signop sgn = TYPE_SIGN (TREE_TYPE (step));
+ bool overflow;
+
+ widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
+ &overflow);
+ /* If the multiplication overflowed we can't do a meaningful
+ adjustment. Likewise if the result doesn't fit in the type
+ of the induction variable. For a signed type we have to
+ check whether the result has the expected signedness which
+ is that of the step as number of iterations is unsigned. */
+ if (!overflow
+ && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
+ && (sgn == UNSIGNED
+ || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
+ {
+ tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
+ extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
+ TREE_TYPE (init), init, tem);
+ /* Likewise if the addition did. */
+ if (maxvr.type == VR_RANGE)
+ {
+ value_range initvr = VR_INITIALIZER;
+
+ if (TREE_CODE (init) == SSA_NAME)
+ initvr = *(get_value_range (init));
+ else if (is_gimple_min_invariant (init))
+ set_value_range_to_value (&initvr, init, NULL);
+ else
+ return;
+
+ /* Check if init + nit * step overflows. Though we checked
+ scev {init, step}_loop doesn't wrap, it is not enough
+ because the loop may exit immediately. Overflow could
+ happen in the plus expression in this case. */
+ if ((dir == EV_DIR_DECREASES
+ && compare_values (maxvr.min, initvr.min) != -1)
+ || (dir == EV_DIR_GROWS
+ && compare_values (maxvr.max, initvr.max) != 1))
+ return;
+
+ tmin = maxvr.min;
+ tmax = maxvr.max;
+ }
+ }
+ }
+ }
+
+ if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+ {
+ min = tmin;
+ max = tmax;
+
+ /* For VARYING or UNDEFINED ranges, just about anything we get
+ from scalar evolutions should be better. */
+
+ if (dir == EV_DIR_DECREASES)
+ max = init;
+ else
+ min = init;
+ }
+ else if (vr->type == VR_RANGE)
+ {
+ min = vr->min;
+ max = vr->max;
+
+ if (dir == EV_DIR_DECREASES)
+ {
+ /* INIT is the maximum value. If INIT is lower than VR->MAX
+ but no smaller than VR->MIN, set VR->MAX to INIT. */
+ if (compare_values (init, max) == -1)
+ max = init;
+
+ /* According to the loop information, the variable does not
+ overflow. */
+ if (compare_values (min, tmin) == -1)
+ min = tmin;
+
+ }
+ else
+ {
+ /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
+ if (compare_values (init, min) == 1)
+ min = init;
+
+ if (compare_values (tmax, max) == -1)
+ max = tmax;
+ }
+ }
+ else
+ return;
+
+ /* If we just created an invalid range with the minimum
+ greater than the maximum, we fail conservatively.
+ This should happen only in unreachable
+ parts of code, or for invalid programs. */
+ if (compare_values (min, max) == 1)
+ return;
+
+ /* Even for valid range info, sometimes overflow flag will leak in.
+ As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
+ drop them. */
+ if (TREE_OVERFLOW_P (min))
+ min = drop_tree_overflow (min);
+ if (TREE_OVERFLOW_P (max))
+ max = drop_tree_overflow (max);
+
+ set_value_range (vr, VR_RANGE, min, max, vr->equiv);
+}
+
+/* Dump value ranges of all SSA_NAMEs to FILE. */
+
+void
+vr_values::dump_all_value_ranges (FILE *file)
+{
+ size_t i;
+
+ for (i = 0; i < num_vr_values; i++)
+ {
+ if (vr_value[i])
+ {
+ print_generic_expr (file, ssa_name (i));
+ fprintf (file, ": ");
+ dump_value_range (file, vr_value[i]);
+ fprintf (file, "\n");
+ }
+ }
+
+ fprintf (file, "\n");
+}
+
+/* Initialize VRP lattice. */
+
+vr_values::vr_values () : vrp_value_range_pool ("Tree VRP value ranges")
+{
+ values_propagated = false;
+ num_vr_values = num_ssa_names;
+ vr_value = XCNEWVEC (value_range *, num_vr_values);
+ vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
+ bitmap_obstack_initialize (&vrp_equiv_obstack);
+}
+
+/* Free VRP lattice. */
+
+vr_values::~vr_values ()
+{
+ /* Free allocated memory. */
+ free (vr_value);
+ free (vr_phi_edge_counts);
+ bitmap_obstack_release (&vrp_equiv_obstack);
+ vrp_value_range_pool.release ();
+
+ /* So that we can distinguish between VRP data being available
+ and not available. */
+ vr_value = NULL;
+ vr_phi_edge_counts = NULL;
+}
+
+
+/* A hack. */
+static class vr_values *x_vr_values;
+
+/* Return the singleton value-range for NAME or NAME. */
+
+static inline tree
+vrp_valueize (tree name)
+{
+ if (TREE_CODE (name) == SSA_NAME)
+ {
+ value_range *vr = x_vr_values->get_value_range (name);
+ if (vr->type == VR_RANGE
+ && (TREE_CODE (vr->min) == SSA_NAME
+ || is_gimple_min_invariant (vr->min))
+ && vrp_operand_equal_p (vr->min, vr->max))
+ return vr->min;
+ }
+ return name;
+}
+
+/* Return the singleton value-range for NAME if that is a constant
+ but signal to not follow SSA edges. */
+
+static inline tree
+vrp_valueize_1 (tree name)
+{
+ if (TREE_CODE (name) == SSA_NAME)
+ {
+ /* If the definition may be simulated again we cannot follow
+ this SSA edge as the SSA propagator does not necessarily
+ re-visit the use. */
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ if (!gimple_nop_p (def_stmt)
+ && prop_simulate_again_p (def_stmt))
+ return NULL_TREE;
+ value_range *vr = x_vr_values->get_value_range (name);
+ if (range_int_cst_singleton_p (vr))
+ return vr->min;
+ }
+ return name;
+}
+/* Visit assignment STMT. If it produces an interesting range, record
+ the range in VR and set LHS to OUTPUT_P. */
+
+void
+vr_values::vrp_visit_assignment_or_call (gimple *stmt, tree *output_p,
+ value_range *vr)
+{
+ tree lhs;
+ enum gimple_code code = gimple_code (stmt);
+ lhs = gimple_get_lhs (stmt);
+ *output_p = NULL_TREE;
+
+ /* We only keep track of ranges in integral and pointer types. */
+ if (TREE_CODE (lhs) == SSA_NAME
+ && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ /* It is valid to have NULL MIN/MAX values on a type. See
+ build_range_type. */
+ && TYPE_MIN_VALUE (TREE_TYPE (lhs))
+ && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
+ || POINTER_TYPE_P (TREE_TYPE (lhs))))
+ {
+ *output_p = lhs;
+
+ /* Try folding the statement to a constant first. */
+ x_vr_values = this;
+ tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
+ vrp_valueize_1);
+ x_vr_values = NULL;
+ if (tem)
+ {
+ if (TREE_CODE (tem) == SSA_NAME
+ && (SSA_NAME_IS_DEFAULT_DEF (tem)
+ || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
+ {
+ extract_range_from_ssa_name (vr, tem);
+ return;
+ }
+ else if (is_gimple_min_invariant (tem))
+ {
+ set_value_range_to_value (vr, tem, NULL);
+ return;
+ }
+ }
+ /* Then dispatch to value-range extracting functions. */
+ if (code == GIMPLE_CALL)
+ extract_range_basic (vr, stmt);
+ else
+ extract_range_from_assignment (vr, as_a <gassign *> (stmt));
+ }
+}
+
+/* Helper that gets the value range of the SSA_NAME with version I
+ or a symbolic range containing the SSA_NAME only if the value range
+ is varying or undefined. */
+
+value_range
+vr_values::get_vr_for_comparison (int i)
+{
+ value_range vr = *get_value_range (ssa_name (i));
+
+ /* If name N_i does not have a valid range, use N_i as its own
+ range. This allows us to compare against names that may
+ have N_i in their ranges. */
+ if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
+ {
+ vr.type = VR_RANGE;
+ vr.min = ssa_name (i);
+ vr.max = ssa_name (i);
+ }
+
+ return vr;
+}
+
+/* Compare all the value ranges for names equivalent to VAR with VAL
+ using comparison code COMP. Return the same value returned by
+ compare_range_with_value, including the setting of
+ *STRICT_OVERFLOW_P. */
+
+tree
+vr_values::compare_name_with_value (enum tree_code comp, tree var, tree val,
+ bool *strict_overflow_p, bool use_equiv_p)
+{
+ bitmap_iterator bi;
+ unsigned i;
+ bitmap e;
+ tree retval, t;
+ int used_strict_overflow;
+ bool sop;
+ value_range equiv_vr;
+
+ /* Get the set of equivalences for VAR. */
+ e = get_value_range (var)->equiv;
+
+ /* Start at -1. Set it to 0 if we do a comparison without relying
+ on overflow, or 1 if all comparisons rely on overflow. */
+ used_strict_overflow = -1;
+
+ /* Compare vars' value range with val. */
+ equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
+ sop = false;
+ retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
+ if (retval)
+ used_strict_overflow = sop ? 1 : 0;
+
+ /* If the equiv set is empty we have done all work we need to do. */
+ if (e == NULL)
+ {
+ if (retval
+ && used_strict_overflow > 0)
+ *strict_overflow_p = true;
+ return retval;
+ }
+
+ EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
+ {
+ tree name = ssa_name (i);
+ if (! name)
+ continue;
+
+ if (! use_equiv_p
+ && ! SSA_NAME_IS_DEFAULT_DEF (name)
+ && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
+ continue;
+
+ equiv_vr = get_vr_for_comparison (i);
+ sop = false;
+ t = compare_range_with_value (comp, &equiv_vr, val, &sop);
+ if (t)
+ {
+ /* If we get different answers from different members
+ of the equivalence set this check must be in a dead
+ code region. Folding it to a trap representation
+ would be correct here. For now just return don't-know. */
+ if (retval != NULL
+ && t != retval)
+ {
+ retval = NULL_TREE;
+ break;
+ }
+ retval = t;
+
+ if (!sop)
+ used_strict_overflow = 0;
+ else if (used_strict_overflow < 0)
+ used_strict_overflow = 1;
+ }
+ }
+
+ if (retval
+ && used_strict_overflow > 0)
+ *strict_overflow_p = true;
+
+ return retval;
+}
+
+
+/* Given a comparison code COMP and names N1 and N2, compare all the
+ ranges equivalent to N1 against all the ranges equivalent to N2
+ to determine the value of N1 COMP N2. Return the same value
+ returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
+ whether we relied on undefined signed overflow in the comparison. */
+
+
+tree
+vr_values::compare_names (enum tree_code comp, tree n1, tree n2,
+ bool *strict_overflow_p)
+{
+ tree t, retval;
+ bitmap e1, e2;
+ bitmap_iterator bi1, bi2;
+ unsigned i1, i2;
+ int used_strict_overflow;
+ static bitmap_obstack *s_obstack = NULL;
+ static bitmap s_e1 = NULL, s_e2 = NULL;
+
+ /* Compare the ranges of every name equivalent to N1 against the
+ ranges of every name equivalent to N2. */
+ e1 = get_value_range (n1)->equiv;
+ e2 = get_value_range (n2)->equiv;
+
+ /* Use the fake bitmaps if e1 or e2 are not available. */
+ if (s_obstack == NULL)
+ {
+ s_obstack = XNEW (bitmap_obstack);
+ bitmap_obstack_initialize (s_obstack);
+ s_e1 = BITMAP_ALLOC (s_obstack);
+ s_e2 = BITMAP_ALLOC (s_obstack);
+ }
+ if (e1 == NULL)
+ e1 = s_e1;
+ if (e2 == NULL)
+ e2 = s_e2;
+
+ /* Add N1 and N2 to their own set of equivalences to avoid
+ duplicating the body of the loop just to check N1 and N2
+ ranges. */
+ bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
+
+ /* If the equivalence sets have a common intersection, then the two
+ names can be compared without checking their ranges. */
+ if (bitmap_intersect_p (e1, e2))
+ {
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+
+ return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
+ ? boolean_true_node
+ : boolean_false_node;
+ }
+
+ /* Start at -1. Set it to 0 if we do a comparison without relying
+ on overflow, or 1 if all comparisons rely on overflow. */
+ used_strict_overflow = -1;
+
+ /* Otherwise, compare all the equivalent ranges. First, add N1 and
+ N2 to their own set of equivalences to avoid duplicating the body
+ of the loop just to check N1 and N2 ranges. */
+ EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
+ {
+ if (! ssa_name (i1))
+ continue;
+
+ value_range vr1 = get_vr_for_comparison (i1);
+
+ t = retval = NULL_TREE;
+ EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
+ {
+ if (! ssa_name (i2))
+ continue;
+
+ bool sop = false;
+
+ value_range vr2 = get_vr_for_comparison (i2);
+
+ t = compare_ranges (comp, &vr1, &vr2, &sop);
+ if (t)
+ {
+ /* If we get different answers from different members
+ of the equivalence set this check must be in a dead
+ code region. Folding it to a trap representation
+ would be correct here. For now just return don't-know. */
+ if (retval != NULL
+ && t != retval)
+ {
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+ return NULL_TREE;
+ }
+ retval = t;
+
+ if (!sop)
+ used_strict_overflow = 0;
+ else if (used_strict_overflow < 0)
+ used_strict_overflow = 1;
+ }
+ }
+
+ if (retval)
+ {
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+ if (used_strict_overflow > 0)
+ *strict_overflow_p = true;
+ return retval;
+ }
+ }
+
+ /* None of the equivalent ranges are useful in computing this
+ comparison. */
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+ return NULL_TREE;
+}
+
+/* Helper function for vrp_evaluate_conditional_warnv & other
+ optimizers. */
+
+tree
+vr_values::vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (enum tree_code code, tree op0, tree op1, bool * strict_overflow_p)
+{
+ value_range *vr0, *vr1;
+
+ vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
+ vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
+
+ tree res = NULL_TREE;
+ if (vr0 && vr1)
+ res = compare_ranges (code, vr0, vr1, strict_overflow_p);
+ if (!res && vr0)
+ res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
+ if (!res && vr1)
+ res = (compare_range_with_value
+ (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
+ return res;
+}
+
+/* Helper function for vrp_evaluate_conditional_warnv. */
+
+tree
+vr_values::vrp_evaluate_conditional_warnv_with_ops (enum tree_code code,
+ tree op0, tree op1,
+ bool use_equiv_p,
+ bool *strict_overflow_p,
+ bool *only_ranges)
+{
+ tree ret;
+ if (only_ranges)
+ *only_ranges = true;
+
+ /* We only deal with integral and pointer types. */
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && !POINTER_TYPE_P (TREE_TYPE (op0)))
+ return NULL_TREE;
+
+ /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
+ as a simple equality test, then prefer that over its current form
+ for evaluation.
+
+ An overflow test which collapses to an equality test can always be
+ expressed as a comparison of one argument against zero. Overflow
+ occurs when the chosen argument is zero and does not occur if the
+ chosen argument is not zero. */
+ tree x;
+ if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
+ {
+ wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
+ /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
+ B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
+ B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
+ B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
+ if (integer_zerop (x))
+ {
+ op1 = x;
+ code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
+ }
+ /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
+ B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
+ B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
+ B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
+ else if (wi::to_wide (x) == max - 1)
+ {
+ op0 = op1;
+ op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
+ code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
+ }
+ }
+
+ if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (code, op0, op1, strict_overflow_p)))
+ return ret;
+ if (only_ranges)
+ *only_ranges = false;
+ /* Do not use compare_names during propagation, it's quadratic. */
+ if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
+ && use_equiv_p)
+ return compare_names (code, op0, op1, strict_overflow_p);
+ else if (TREE_CODE (op0) == SSA_NAME)
+ return compare_name_with_value (code, op0, op1,
+ strict_overflow_p, use_equiv_p);
+ else if (TREE_CODE (op1) == SSA_NAME)
+ return compare_name_with_value (swap_tree_comparison (code), op1, op0,
+ strict_overflow_p, use_equiv_p);
+ return NULL_TREE;
+}
+
+/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
+ information. Return NULL if the conditional can not be evaluated.
+ The ranges of all the names equivalent with the operands in COND
+ will be used when trying to compute the value. If the result is
+ based on undefined signed overflow, issue a warning if
+ appropriate. */
+
+tree
+vr_values::vrp_evaluate_conditional (tree_code code, tree op0,
+ tree op1, gimple *stmt)
+{
+ bool sop;
+ tree ret;
+ bool only_ranges;
+
+ /* Some passes and foldings leak constants with overflow flag set
+ into the IL. Avoid doing wrong things with these and bail out. */
+ if ((TREE_CODE (op0) == INTEGER_CST
+ && TREE_OVERFLOW (op0))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && TREE_OVERFLOW (op1)))
+ return NULL_TREE;
+
+ sop = false;
+ ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
+ &only_ranges);
+
+ if (ret && sop)
+ {
+ enum warn_strict_overflow_code wc;
+ const char* warnmsg;
+
+ if (is_gimple_min_invariant (ret))
+ {
+ wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
+ warnmsg = G_("assuming signed overflow does not occur when "
+ "simplifying conditional to constant");
+ }
+ else
+ {
+ wc = WARN_STRICT_OVERFLOW_COMPARISON;
+ warnmsg = G_("assuming signed overflow does not occur when "
+ "simplifying conditional");
+ }
+
+ if (issue_strict_overflow_warning (wc))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
+ }
+ }
+
+ if (warn_type_limits
+ && ret && only_ranges
+ && TREE_CODE_CLASS (code) == tcc_comparison
+ && TREE_CODE (op0) == SSA_NAME)
+ {
+ /* If the comparison is being folded and the operand on the LHS
+ is being compared against a constant value that is outside of
+ the natural range of OP0's type, then the predicate will
+ always fold regardless of the value of OP0. If -Wtype-limits
+ was specified, emit a warning. */
+ tree type = TREE_TYPE (op0);
+ value_range *vr0 = get_value_range (op0);
+
+ if (vr0->type == VR_RANGE
+ && INTEGRAL_TYPE_P (type)
+ && vrp_val_is_min (vr0->min)
+ && vrp_val_is_max (vr0->max)
+ && is_gimple_min_invariant (op1))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+
+ warning_at (location, OPT_Wtype_limits,
+ integer_zerop (ret)
+ ? G_("comparison always false "
+ "due to limited range of data type")
+ : G_("comparison always true "
+ "due to limited range of data type"));
+ }
+ }
+
+ return ret;
+}
+
+
+/* Visit conditional statement STMT. If we can determine which edge
+ will be taken out of STMT's basic block, record it in
+ *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
+
+void
+vr_values::vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
+{
+ tree val;
+
+ *taken_edge_p = NULL;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ tree use;
+ ssa_op_iter i;
+
+ fprintf (dump_file, "\nVisiting conditional with predicate: ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\nWith known ranges\n");
+
+ FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
+ {
+ fprintf (dump_file, "\t");
+ print_generic_expr (dump_file, use);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
+ }
+
+ fprintf (dump_file, "\n");
+ }
+
+ /* Compute the value of the predicate COND by checking the known
+ ranges of each of its operands.
+
+ Note that we cannot evaluate all the equivalent ranges here
+ because those ranges may not yet be final and with the current
+ propagation strategy, we cannot determine when the value ranges
+ of the names in the equivalence set have changed.
+
+ For instance, given the following code fragment
+
+ i_5 = PHI <8, i_13>
+ ...
+ i_14 = ASSERT_EXPR <i_5, i_5 != 0>
+ if (i_14 == 1)
+ ...
+
+ Assume that on the first visit to i_14, i_5 has the temporary
+ range [8, 8] because the second argument to the PHI function is
+ not yet executable. We derive the range ~[0, 0] for i_14 and the
+ equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
+ the first time, since i_14 is equivalent to the range [8, 8], we
+ determine that the predicate is always false.
+
+ On the next round of propagation, i_13 is determined to be
+ VARYING, which causes i_5 to drop down to VARYING. So, another
+ visit to i_14 is scheduled. In this second visit, we compute the
+ exact same range and equivalence set for i_14, namely ~[0, 0] and
+ { i_5 }. But we did not have the previous range for i_5
+ registered, so vrp_visit_assignment thinks that the range for
+ i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
+ is not visited again, which stops propagation from visiting
+ statements in the THEN clause of that if().
+
+ To properly fix this we would need to keep the previous range
+ value for the names in the equivalence set. This way we would've
+ discovered that from one visit to the other i_5 changed from
+ range [8, 8] to VR_VARYING.
+
+ However, fixing this apparent limitation may not be worth the
+ additional checking. Testing on several code bases (GCC, DLV,
+ MICO, TRAMP3D and SPEC2000) showed that doing this results in
+ 4 more predicates folded in SPEC. */
+
+ bool sop;
+ val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
+ gimple_cond_lhs (stmt),
+ gimple_cond_rhs (stmt),
+ false, &sop, NULL);
+ if (val)
+ *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nPredicate evaluates to: ");
+ if (val == NULL_TREE)
+ fprintf (dump_file, "DON'T KNOW\n");
+ else
+ print_generic_stmt (dump_file, val);
+ }
+}
+
+/* Searches the case label vector VEC for the ranges of CASE_LABELs that are
+ used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
+ MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
+ Returns true if the default label is not needed. */
+
+static bool
+find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
+ size_t *max_idx1, size_t *min_idx2,
+ size_t *max_idx2)
+{
+ size_t i, j, k, l;
+ unsigned int n = gimple_switch_num_labels (stmt);
+ bool take_default;
+ tree case_low, case_high;
+ tree min = vr->min, max = vr->max;
+
+ gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
+
+ take_default = !find_case_label_range (stmt, min, max, &i, &j);
+
+ /* Set second range to emtpy. */
+ *min_idx2 = 1;
+ *max_idx2 = 0;
+
+ if (vr->type == VR_RANGE)
+ {
+ *min_idx1 = i;
+ *max_idx1 = j;
+ return !take_default;
+ }
+
+ /* Set first range to all case labels. */
+ *min_idx1 = 1;
+ *max_idx1 = n - 1;
+
+ if (i > j)
+ return false;
+
+ /* Make sure all the values of case labels [i , j] are contained in
+ range [MIN, MAX]. */
+ case_low = CASE_LOW (gimple_switch_label (stmt, i));
+ case_high = CASE_HIGH (gimple_switch_label (stmt, j));
+ if (tree_int_cst_compare (case_low, min) < 0)
+ i += 1;
+ if (case_high != NULL_TREE
+ && tree_int_cst_compare (max, case_high) < 0)
+ j -= 1;
+
+ if (i > j)
+ return false;
+
+ /* If the range spans case labels [i, j], the corresponding anti-range spans
+ the labels [1, i - 1] and [j + 1, n - 1]. */
+ k = j + 1;
+ l = n - 1;
+ if (k > l)
+ {
+ k = 1;
+ l = 0;
+ }
+
+ j = i - 1;
+ i = 1;
+ if (i > j)
+ {
+ i = k;
+ j = l;
+ k = 1;
+ l = 0;
+ }
+
+ *min_idx1 = i;
+ *max_idx1 = j;
+ *min_idx2 = k;
+ *max_idx2 = l;
+ return false;
+}
+
+/* Visit switch statement STMT. If we can determine which edge
+ will be taken out of STMT's basic block, record it in
+ *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
+
+void
+vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
+{
+ tree op, val;
+ value_range *vr;
+ size_t i = 0, j = 0, k, l;
+ bool take_default;
+
+ *taken_edge_p = NULL;
+ op = gimple_switch_index (stmt);
+ if (TREE_CODE (op) != SSA_NAME)
+ return;
+
+ vr = get_value_range (op);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nVisiting switch expression with operand ");
+ print_generic_expr (dump_file, op);
+ fprintf (dump_file, " with known range ");
+ dump_value_range (dump_file, vr);
+ fprintf (dump_file, "\n");
+ }
+
+ if ((vr->type != VR_RANGE
+ && vr->type != VR_ANTI_RANGE)
+ || symbolic_range_p (vr))
+ return;
+
+ /* Find the single edge that is taken from the switch expression. */
+ take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
+
+ /* Check if the range spans no CASE_LABEL. If so, we only reach the default
+ label */
+ if (j < i)
+ {
+ gcc_assert (take_default);
+ val = gimple_switch_default_label (stmt);
+ }
+ else
+ {
+ /* Check if labels with index i to j and maybe the default label
+ are all reaching the same label. */
+
+ val = gimple_switch_label (stmt, i);
+ if (take_default
+ && CASE_LABEL (gimple_switch_default_label (stmt))
+ != CASE_LABEL (val))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a single destination for this "
+ "range\n");
+ return;
+ }
+ for (++i; i <= j; ++i)
+ {
+ if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a single destination for this "
+ "range\n");
+ return;
+ }
+ }
+ for (; k <= l; ++k)
+ {
+ if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a single destination for this "
+ "range\n");
+ return;
+ }
+ }
+ }
+
+ *taken_edge_p = find_edge (gimple_bb (stmt),
+ label_to_block (CASE_LABEL (val)));
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " will take edge to ");
+ print_generic_stmt (dump_file, CASE_LABEL (val));
+ }
+}
+
+
+/* Evaluate statement STMT. If the statement produces a useful range,
+ set VR and corepsponding OUTPUT_P.
+
+ If STMT is a conditional branch and we can determine its truth
+ value, the taken edge is recorded in *TAKEN_EDGE_P. */
+
+void
+vr_values::extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
+ tree *output_p, value_range *vr)
+{
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nVisiting statement:\n");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ }
+
+ if (!stmt_interesting_for_vrp (stmt))
+ gcc_assert (stmt_ends_bb_p (stmt));
+ else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
+ vrp_visit_assignment_or_call (stmt, output_p, vr);
+ else if (gimple_code (stmt) == GIMPLE_COND)
+ vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
+ else if (gimple_code (stmt) == GIMPLE_SWITCH)
+ vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
+}
+
+/* Visit all arguments for PHI node PHI that flow through executable
+ edges. If a valid value range can be derived from all the incoming
+ value ranges, set a new range in VR_RESULT. */
+
+void
+vr_values::extract_range_from_phi_node (gphi *phi, value_range *vr_result)
+{
+ size_t i;
+ tree lhs = PHI_RESULT (phi);
+ value_range *lhs_vr = get_value_range (lhs);
+ bool first = true;
+ int edges, old_edges;
+ struct loop *l;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nVisiting PHI node: ");
+ print_gimple_stmt (dump_file, phi, 0, dump_flags);
+ }
+
+ bool may_simulate_backedge_again = false;
+ edges = 0;
+ for (i = 0; i < gimple_phi_num_args (phi); i++)
+ {
+ edge e = gimple_phi_arg_edge (phi, i);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file,
+ " Argument #%d (%d -> %d %sexecutable)\n",
+ (int) i, e->src->index, e->dest->index,
+ (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
+ }
+
+ if (e->flags & EDGE_EXECUTABLE)
+ {
+ tree arg = PHI_ARG_DEF (phi, i);
+ value_range vr_arg;
+
+ ++edges;
+
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ /* See if we are eventually going to change one of the args. */
+ gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
+ if (! gimple_nop_p (def_stmt)
+ && prop_simulate_again_p (def_stmt)
+ && e->flags & EDGE_DFS_BACK)
+ may_simulate_backedge_again = true;
+
+ vr_arg = *(get_value_range (arg));
+ /* Do not allow equivalences or symbolic ranges to leak in from
+ backedges. That creates invalid equivalencies.
+ See PR53465 and PR54767. */
+ if (e->flags & EDGE_DFS_BACK)
+ {
+ if (vr_arg.type == VR_RANGE
+ || vr_arg.type == VR_ANTI_RANGE)
+ {
+ vr_arg.equiv = NULL;
+ if (symbolic_range_p (&vr_arg))
+ {
+ vr_arg.type = VR_VARYING;
+ vr_arg.min = NULL_TREE;
+ vr_arg.max = NULL_TREE;
+ }
+ }
+ }
+ else
+ {
+ /* If the non-backedge arguments range is VR_VARYING then
+ we can still try recording a simple equivalence. */
+ if (vr_arg.type == VR_VARYING)
+ {
+ vr_arg.type = VR_RANGE;
+ vr_arg.min = arg;
+ vr_arg.max = arg;
+ vr_arg.equiv = NULL;
+ }
+ }
+ }
+ else
+ {
+ if (TREE_OVERFLOW_P (arg))
+ arg = drop_tree_overflow (arg);
+
+ vr_arg.type = VR_RANGE;
+ vr_arg.min = arg;
+ vr_arg.max = arg;
+ vr_arg.equiv = NULL;
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\t");
+ print_generic_expr (dump_file, arg, dump_flags);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, &vr_arg);
+ fprintf (dump_file, "\n");
+ }
+
+ if (first)
+ copy_value_range (vr_result, &vr_arg);
+ else
+ vrp_meet (vr_result, &vr_arg);
+ first = false;
+
+ if (vr_result->type == VR_VARYING)
+ break;
+ }
+ }
+
+ if (vr_result->type == VR_VARYING)
+ goto varying;
+ else if (vr_result->type == VR_UNDEFINED)
+ goto update_range;
+
+ old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
+ vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
+
+ /* To prevent infinite iterations in the algorithm, derive ranges
+ when the new value is slightly bigger or smaller than the
+ previous one. We don't do this if we have seen a new executable
+ edge; this helps us avoid an infinity for conditionals
+ which are not in a loop. If the old value-range was VR_UNDEFINED
+ use the updated range and iterate one more time. If we will not
+ simulate this PHI again via the backedge allow us to iterate. */
+ if (edges > 0
+ && gimple_phi_num_args (phi) > 1
+ && edges == old_edges
+ && lhs_vr->type != VR_UNDEFINED
+ && may_simulate_backedge_again)
+ {
+ /* Compare old and new ranges, fall back to varying if the
+ values are not comparable. */
+ int cmp_min = compare_values (lhs_vr->min, vr_result->min);
+ if (cmp_min == -2)
+ goto varying;
+ int cmp_max = compare_values (lhs_vr->max, vr_result->max);
+ if (cmp_max == -2)
+ goto varying;
+
+ /* For non VR_RANGE or for pointers fall back to varying if
+ the range changed. */
+ if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
+ || POINTER_TYPE_P (TREE_TYPE (lhs)))
+ && (cmp_min != 0 || cmp_max != 0))
+ goto varying;
+
+ /* If the new minimum is larger than the previous one
+ retain the old value. If the new minimum value is smaller
+ than the previous one and not -INF go all the way to -INF + 1.
+ In the first case, to avoid infinite bouncing between different
+ minimums, and in the other case to avoid iterating millions of
+ times to reach -INF. Going to -INF + 1 also lets the following
+ iteration compute whether there will be any overflow, at the
+ expense of one additional iteration. */
+ if (cmp_min < 0)
+ vr_result->min = lhs_vr->min;
+ else if (cmp_min > 0
+ && !vrp_val_is_min (vr_result->min))
+ vr_result->min
+ = int_const_binop (PLUS_EXPR,
+ vrp_val_min (TREE_TYPE (vr_result->min)),
+ build_int_cst (TREE_TYPE (vr_result->min), 1));
+
+ /* Similarly for the maximum value. */
+ if (cmp_max > 0)
+ vr_result->max = lhs_vr->max;
+ else if (cmp_max < 0
+ && !vrp_val_is_max (vr_result->max))
+ vr_result->max
+ = int_const_binop (MINUS_EXPR,
+ vrp_val_max (TREE_TYPE (vr_result->min)),
+ build_int_cst (TREE_TYPE (vr_result->min), 1));
+
+ /* If we dropped either bound to +-INF then if this is a loop
+ PHI node SCEV may known more about its value-range. */
+ if (cmp_min > 0 || cmp_min < 0
+ || cmp_max < 0 || cmp_max > 0)
+ goto scev_check;
+
+ goto infinite_check;
+ }
+
+ goto update_range;
+
+varying:
+ set_value_range_to_varying (vr_result);
+
+scev_check:
+ /* If this is a loop PHI node SCEV may known more about its value-range.
+ scev_check can be reached from two paths, one is a fall through from above
+ "varying" label, the other is direct goto from code block which tries to
+ avoid infinite simulation. */
+ if ((l = loop_containing_stmt (phi))
+ && l->header == gimple_bb (phi))
+ adjust_range_with_scev (vr_result, l, phi, lhs);
+
+infinite_check:
+ /* If we will end up with a (-INF, +INF) range, set it to
+ VARYING. Same if the previous max value was invalid for
+ the type and we end up with vr_result.min > vr_result.max. */
+ if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
+ && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
+ || compare_values (vr_result->min, vr_result->max) > 0))
+ ;
+ else
+ set_value_range_to_varying (vr_result);
+
+ /* If the new range is different than the previous value, keep
+ iterating. */
+update_range:
+ return;
+}
+
+/* Simplify boolean operations if the source is known
+ to be already a boolean. */
+bool
+vr_values::simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree lhs, op0, op1;
+ bool need_conversion;
+
+ /* We handle only !=/== case here. */
+ gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
+
+ op0 = gimple_assign_rhs1 (stmt);
+ if (!op_with_boolean_value_range_p (op0))
+ return false;
+
+ op1 = gimple_assign_rhs2 (stmt);
+ if (!op_with_boolean_value_range_p (op1))
+ return false;
+
+ /* Reduce number of cases to handle to NE_EXPR. As there is no
+ BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
+ if (rhs_code == EQ_EXPR)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST)
+ op1 = int_const_binop (BIT_XOR_EXPR, op1,
+ build_int_cst (TREE_TYPE (op1), 1));
+ else
+ return false;
+ }
+
+ lhs = gimple_assign_lhs (stmt);
+ need_conversion
+ = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
+
+ /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
+ if (need_conversion
+ && !TYPE_UNSIGNED (TREE_TYPE (op0))
+ && TYPE_PRECISION (TREE_TYPE (op0)) == 1
+ && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
+ return false;
+
+ /* For A != 0 we can substitute A itself. */
+ if (integer_zerop (op1))
+ gimple_assign_set_rhs_with_ops (gsi,
+ need_conversion
+ ? NOP_EXPR : TREE_CODE (op0), op0);
+ /* For A != B we substitute A ^ B. Either with conversion. */
+ else if (need_conversion)
+ {
+ tree tem = make_ssa_name (TREE_TYPE (op0));
+ gassign *newop
+ = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
+ gsi_insert_before (gsi, newop, GSI_SAME_STMT);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
+ && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
+ set_range_info (tem, VR_RANGE,
+ wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
+ wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
+ gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
+ }
+ /* Or without. */
+ else
+ gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
+ update_stmt (gsi_stmt (*gsi));
+ fold_stmt (gsi, follow_single_use_edges);
+
+ return true;
+}
+
+/* Simplify a division or modulo operator to a right shift or bitwise and
+ if the first operand is unsigned or is greater than zero and the second
+ operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
+ constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
+ optimize it into just op0 if op0's range is known to be a subset of
+ [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
+ modulo. */
+
+bool
+vr_values::simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree val = NULL;
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ tree op0min = NULL_TREE, op0max = NULL_TREE;
+ tree op1min = op1;
+ value_range *vr = NULL;
+
+ if (TREE_CODE (op0) == INTEGER_CST)
+ {
+ op0min = op0;
+ op0max = op0;
+ }
+ else
+ {
+ vr = get_value_range (op0);
+ if (range_int_cst_p (vr))
+ {
+ op0min = vr->min;
+ op0max = vr->max;
+ }
+ }
+
+ if (rhs_code == TRUNC_MOD_EXPR
+ && TREE_CODE (op1) == SSA_NAME)
+ {
+ value_range *vr1 = get_value_range (op1);
+ if (range_int_cst_p (vr1))
+ op1min = vr1->min;
+ }
+ if (rhs_code == TRUNC_MOD_EXPR
+ && TREE_CODE (op1min) == INTEGER_CST
+ && tree_int_cst_sgn (op1min) == 1
+ && op0max
+ && tree_int_cst_lt (op0max, op1min))
+ {
+ if (TYPE_UNSIGNED (TREE_TYPE (op0))
+ || tree_int_cst_sgn (op0min) >= 0
+ || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
+ op0min))
+ {
+ /* If op0 already has the range op0 % op1 has,
+ then TRUNC_MOD_EXPR won't change anything. */
+ gimple_assign_set_rhs_from_tree (gsi, op0);
+ return true;
+ }
+ }
+
+ if (TREE_CODE (op0) != SSA_NAME)
+ return false;
+
+ if (!integer_pow2p (op1))
+ {
+ /* X % -Y can be only optimized into X % Y either if
+ X is not INT_MIN, or Y is not -1. Fold it now, as after
+ remove_range_assertions the range info might be not available
+ anymore. */
+ if (rhs_code == TRUNC_MOD_EXPR
+ && fold_stmt (gsi, follow_single_use_edges))
+ return true;
+ return false;
+ }
+
+ if (TYPE_UNSIGNED (TREE_TYPE (op0)))
+ val = integer_one_node;
+ else
+ {
+ bool sop = false;
+
+ val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
+
+ if (val
+ && sop
+ && integer_onep (val)
+ && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow,
+ "assuming signed overflow does not occur when "
+ "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
+ }
+ }
+
+ if (val && integer_onep (val))
+ {
+ tree t;
+
+ if (rhs_code == TRUNC_DIV_EXPR)
+ {
+ t = build_int_cst (integer_type_node, tree_log2 (op1));
+ gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
+ gimple_assign_set_rhs1 (stmt, op0);
+ gimple_assign_set_rhs2 (stmt, t);
+ }
+ else
+ {
+ t = build_int_cst (TREE_TYPE (op1), 1);
+ t = int_const_binop (MINUS_EXPR, op1, t);
+ t = fold_convert (TREE_TYPE (op0), t);
+
+ gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
+ gimple_assign_set_rhs1 (stmt, op0);
+ gimple_assign_set_rhs2 (stmt, t);
+ }
+
+ update_stmt (stmt);
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+ }
+
+ return false;
+}
+
+/* Simplify a min or max if the ranges of the two operands are
+ disjoint. Return true if we do simplify. */
+
+bool
+vr_values::simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ bool sop = false;
+ tree val;
+
+ val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (LE_EXPR, op0, op1, &sop));
+ if (!val)
+ {
+ sop = false;
+ val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (LT_EXPR, op0, op1, &sop));
+ }
+
+ if (val)
+ {
+ if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow,
+ "assuming signed overflow does not occur when "
+ "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
+ }
+
+ /* VAL == TRUE -> OP0 < or <= op1
+ VAL == FALSE -> OP0 > or >= op1. */
+ tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
+ == integer_zerop (val)) ? op0 : op1;
+ gimple_assign_set_rhs_from_tree (gsi, res);
+ return true;
+ }
+
+ return false;
+}
+
+/* If the operand to an ABS_EXPR is >= 0, then eliminate the
+ ABS_EXPR. If the operand is <= 0, then simplify the
+ ABS_EXPR into a NEGATE_EXPR. */
+
+bool
+vr_values::simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+{
+ tree op = gimple_assign_rhs1 (stmt);
+ value_range *vr = get_value_range (op);
+
+ if (vr)
+ {
+ tree val = NULL;
+ bool sop = false;
+
+ val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
+ if (!val)
+ {
+ /* The range is neither <= 0 nor > 0. Now see if it is
+ either < 0 or >= 0. */
+ sop = false;
+ val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
+ &sop);
+ }
+
+ if (val)
+ {
+ if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow,
+ "assuming signed overflow does not occur when "
+ "simplifying %<abs (X)%> to %<X%> or %<-X%>");
+ }
+
+ gimple_assign_set_rhs1 (stmt, op);
+ if (integer_zerop (val))
+ gimple_assign_set_rhs_code (stmt, SSA_NAME);
+ else
+ gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
+ update_stmt (stmt);
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
+ If all the bits that are being cleared by & are already
+ known to be zero from VR, or all the bits that are being
+ set by | are already known to be one from VR, the bit
+ operation is redundant. */
+
+bool
+vr_values::simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ tree op = NULL_TREE;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+ wide_int may_be_nonzero0, may_be_nonzero1;
+ wide_int must_be_nonzero0, must_be_nonzero1;
+ wide_int mask;
+
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ return false;
+
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ return false;
+
+ if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
+ &must_be_nonzero0))
+ return false;
+ if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
+ &must_be_nonzero1))
+ return false;
+
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case BIT_AND_EXPR:
+ mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
+ if (mask == 0)
+ {
+ op = op0;
+ break;
+ }
+ mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
+ if (mask == 0)
+ {
+ op = op1;
+ break;
+ }
+ break;
+ case BIT_IOR_EXPR:
+ mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
+ if (mask == 0)
+ {
+ op = op1;
+ break;
+ }
+ mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
+ if (mask == 0)
+ {
+ op = op0;
+ break;
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (op == NULL_TREE)
+ return false;
+
+ gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
+ update_stmt (gsi_stmt (*gsi));
+ return true;
+}
+
+/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
+ a known value range VR.
+
+ If there is one and only one value which will satisfy the
+ conditional, then return that value. Else return NULL.
+
+ If signed overflow must be undefined for the value to satisfy
+ the conditional, then set *STRICT_OVERFLOW_P to true. */
+
+static tree
+test_for_singularity (enum tree_code cond_code, tree op0,
+ tree op1, value_range *vr)
+{
+ tree min = NULL;
+ tree max = NULL;
+
+ /* Extract minimum/maximum values which satisfy the conditional as it was
+ written. */
+ if (cond_code == LE_EXPR || cond_code == LT_EXPR)
+ {
+ min = TYPE_MIN_VALUE (TREE_TYPE (op0));
+
+ max = op1;
+ if (cond_code == LT_EXPR)
+ {
+ tree one = build_int_cst (TREE_TYPE (op0), 1);
+ max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (max))
+ TREE_NO_WARNING (max) = 1;
+ }
+ }
+ else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+ {
+ max = TYPE_MAX_VALUE (TREE_TYPE (op0));
+
+ min = op1;
+ if (cond_code == GT_EXPR)
+ {
+ tree one = build_int_cst (TREE_TYPE (op0), 1);
+ min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (min))
+ TREE_NO_WARNING (min) = 1;
+ }
+ }
+
+ /* Now refine the minimum and maximum values using any
+ value range information we have for op0. */
+ if (min && max)
+ {
+ if (compare_values (vr->min, min) == 1)
+ min = vr->min;
+ if (compare_values (vr->max, max) == -1)
+ max = vr->max;
+
+ /* If the new min/max values have converged to a single value,
+ then there is only one value which can satisfy the condition,
+ return that value. */
+ if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
+ return min;
+ }
+ return NULL;
+}
+
+/* Return whether the value range *VR fits in an integer type specified
+ by PRECISION and UNSIGNED_P. */
+
+static bool
+range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
+{
+ tree src_type;
+ unsigned src_precision;
+ widest_int tem;
+ signop src_sgn;
+
+ /* We can only handle integral and pointer types. */
+ src_type = TREE_TYPE (vr->min);
+ if (!INTEGRAL_TYPE_P (src_type)
+ && !POINTER_TYPE_P (src_type))
+ return false;
+
+ /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
+ and so is an identity transform. */
+ src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
+ src_sgn = TYPE_SIGN (src_type);
+ if ((src_precision < dest_precision
+ && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
+ || (src_precision == dest_precision && src_sgn == dest_sgn))
+ return true;
+
+ /* Now we can only handle ranges with constant bounds. */
+ if (vr->type != VR_RANGE
+ || TREE_CODE (vr->min) != INTEGER_CST
+ || TREE_CODE (vr->max) != INTEGER_CST)
+ return false;
+
+ /* For sign changes, the MSB of the wide_int has to be clear.
+ An unsigned value with its MSB set cannot be represented by
+ a signed wide_int, while a negative value cannot be represented
+ by an unsigned wide_int. */
+ if (src_sgn != dest_sgn
+ && (wi::lts_p (wi::to_wide (vr->min), 0)
+ || wi::lts_p (wi::to_wide (vr->max), 0)))
+ return false;
+
+ /* Then we can perform the conversion on both ends and compare
+ the result for equality. */
+ tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
+ if (tem != wi::to_widest (vr->min))
+ return false;
+ tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
+ if (tem != wi::to_widest (vr->max))
+ return false;
+
+ return true;
+}
+
+/* Simplify a conditional using a relational operator to an equality
+ test if the range information indicates only one value can satisfy
+ the original conditional. */
+
+bool
+vr_values::simplify_cond_using_ranges_1 (gcond *stmt)
+{
+ tree op0 = gimple_cond_lhs (stmt);
+ tree op1 = gimple_cond_rhs (stmt);
+ enum tree_code cond_code = gimple_cond_code (stmt);
+
+ if (cond_code != NE_EXPR
+ && cond_code != EQ_EXPR
+ && TREE_CODE (op0) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && is_gimple_min_invariant (op1))
+ {
+ value_range *vr = get_value_range (op0);
+
+ /* If we have range information for OP0, then we might be
+ able to simplify this conditional. */
+ if (vr->type == VR_RANGE)
+ {
+ tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
+ if (new_tree)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Simplified relational ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, " into ");
+ }
+
+ gimple_cond_set_code (stmt, EQ_EXPR);
+ gimple_cond_set_lhs (stmt, op0);
+ gimple_cond_set_rhs (stmt, new_tree);
+
+ update_stmt (stmt);
+
+ if (dump_file)
+ {
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+
+ return true;
+ }
+
+ /* Try again after inverting the condition. We only deal
+ with integral types here, so no need to worry about
+ issues with inverting FP comparisons. */
+ new_tree = test_for_singularity
+ (invert_tree_comparison (cond_code, false),
+ op0, op1, vr);
+ if (new_tree)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Simplified relational ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, " into ");
+ }
+
+ gimple_cond_set_code (stmt, NE_EXPR);
+ gimple_cond_set_lhs (stmt, op0);
+ gimple_cond_set_rhs (stmt, new_tree);
+
+ update_stmt (stmt);
+
+ if (dump_file)
+ {
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/* STMT is a conditional at the end of a basic block.
+
+ If the conditional is of the form SSA_NAME op constant and the SSA_NAME
+ was set via a type conversion, try to replace the SSA_NAME with the RHS
+ of the type conversion. Doing so makes the conversion dead which helps
+ subsequent passes. */
+
+void
+vr_values::simplify_cond_using_ranges_2 (gcond *stmt)
+{
+ tree op0 = gimple_cond_lhs (stmt);
+ tree op1 = gimple_cond_rhs (stmt);
+
+ /* If we have a comparison of an SSA_NAME (OP0) against a constant,
+ see if OP0 was set by a type conversion where the source of
+ the conversion is another SSA_NAME with a range that fits
+ into the range of OP0's type.
+
+ If so, the conversion is redundant as the earlier SSA_NAME can be
+ used for the comparison directly if we just massage the constant in the
+ comparison. */
+ if (TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == INTEGER_CST)
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
+ tree innerop;
+
+ if (!is_gimple_assign (def_stmt)
+ || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
+ return;
+
+ innerop = gimple_assign_rhs1 (def_stmt);
+
+ if (TREE_CODE (innerop) == SSA_NAME
+ && !POINTER_TYPE_P (TREE_TYPE (innerop))
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
+ && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
+ {
+ value_range *vr = get_value_range (innerop);
+
+ if (range_int_cst_p (vr)
+ && range_fits_type_p (vr,
+ TYPE_PRECISION (TREE_TYPE (op0)),
+ TYPE_SIGN (TREE_TYPE (op0)))
+ && int_fits_type_p (op1, TREE_TYPE (innerop)))
+ {
+ tree newconst = fold_convert (TREE_TYPE (innerop), op1);
+ gimple_cond_set_lhs (stmt, innerop);
+ gimple_cond_set_rhs (stmt, newconst);
+ update_stmt (stmt);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Folded into: ");
+ print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+ fprintf (dump_file, "\n");
+ }
+ }
+ }
+ }
+}
+
+/* Simplify a switch statement using the value range of the switch
+ argument. */
+
+bool
+vr_values::simplify_switch_using_ranges (gswitch *stmt)
+{
+ tree op = gimple_switch_index (stmt);
+ value_range *vr = NULL;
+ bool take_default;
+ edge e;
+ edge_iterator ei;
+ size_t i = 0, j = 0, n, n2;
+ tree vec2;
+ switch_update su;
+ size_t k = 1, l = 0;
+
+ if (TREE_CODE (op) == SSA_NAME)
+ {
+ vr = get_value_range (op);
+
+ /* We can only handle integer ranges. */
+ if ((vr->type != VR_RANGE
+ && vr->type != VR_ANTI_RANGE)
+ || symbolic_range_p (vr))
+ return false;
+
+ /* Find case label for min/max of the value range. */
+ take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
+ }
+ else if (TREE_CODE (op) == INTEGER_CST)
+ {
+ take_default = !find_case_label_index (stmt, 1, op, &i);
+ if (take_default)
+ {
+ i = 1;
+ j = 0;
+ }
+ else
+ {
+ j = i;
+ }
+ }
+ else
+ return false;
+
+ n = gimple_switch_num_labels (stmt);
+
+ /* We can truncate the case label ranges that partially overlap with OP's
+ value range. */
+ size_t min_idx = 1, max_idx = 0;
+ if (vr != NULL)
+ find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
+ if (min_idx <= max_idx)
+ {
+ tree min_label = gimple_switch_label (stmt, min_idx);
+ tree max_label = gimple_switch_label (stmt, max_idx);
+
+ /* Avoid changing the type of the case labels when truncating. */
+ tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
+ tree vr_min = fold_convert (case_label_type, vr->min);
+ tree vr_max = fold_convert (case_label_type, vr->max);
+
+ if (vr->type == VR_RANGE)
+ {
+ /* If OP's value range is [2,8] and the low label range is
+ 0 ... 3, truncate the label's range to 2 .. 3. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
+ CASE_LOW (min_label) = vr_min;
+
+ /* If OP's value range is [2,8] and the high label range is
+ 7 ... 10, truncate the label's range to 7 .. 8. */
+ if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
+ && CASE_HIGH (max_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
+ CASE_HIGH (max_label) = vr_max;
+ }
+ else if (vr->type == VR_ANTI_RANGE)
+ {
+ tree one_cst = build_one_cst (case_label_type);
+
+ if (min_label == max_label)
+ {
+ /* If OP's value range is ~[7,8] and the label's range is
+ 7 ... 10, truncate the label's range to 9 ... 10. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
+ CASE_LOW (min_label)
+ = int_const_binop (PLUS_EXPR, vr_max, one_cst);
+
+ /* If OP's value range is ~[7,8] and the label's range is
+ 5 ... 8, truncate the label's range to 5 ... 6. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
+ CASE_HIGH (min_label)
+ = int_const_binop (MINUS_EXPR, vr_min, one_cst);
+ }
+ else
+ {
+ /* If OP's value range is ~[2,8] and the low label range is
+ 0 ... 3, truncate the label's range to 0 ... 1. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
+ CASE_HIGH (min_label)
+ = int_const_binop (MINUS_EXPR, vr_min, one_cst);
+
+ /* If OP's value range is ~[2,8] and the high label range is
+ 7 ... 10, truncate the label's range to 9 ... 10. */
+ if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
+ && CASE_HIGH (max_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
+ CASE_LOW (max_label)
+ = int_const_binop (PLUS_EXPR, vr_max, one_cst);
+ }
+ }
+
+ /* Canonicalize singleton case ranges. */
+ if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
+ CASE_HIGH (min_label) = NULL_TREE;
+ if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
+ CASE_HIGH (max_label) = NULL_TREE;
+ }
+
+ /* We can also eliminate case labels that lie completely outside OP's value
+ range. */
+
+ /* Bail out if this is just all edges taken. */
+ if (i == 1
+ && j == n - 1
+ && take_default)
+ return false;
+
+ /* Build a new vector of taken case labels. */
+ vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
+ n2 = 0;
+
+ /* Add the default edge, if necessary. */
+ if (take_default)
+ TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
+
+ for (; i <= j; ++i, ++n2)
+ TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
+
+ for (; k <= l; ++k, ++n2)
+ TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
+
+ /* Mark needed edges. */
+ for (i = 0; i < n2; ++i)
+ {
+ e = find_edge (gimple_bb (stmt),
+ label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
+ e->aux = (void *)-1;
+ }
+
+ /* Queue not needed edges for later removal. */
+ FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
+ {
+ if (e->aux == (void *)-1)
+ {
+ e->aux = NULL;
+ continue;
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "removing unreachable case label\n");
+ }
+ to_remove_edges.safe_push (e);
+ e->flags &= ~EDGE_EXECUTABLE;
+ }
+
+ /* And queue an update for the stmt. */
+ su.stmt = stmt;
+ su.vec = vec2;
+ to_update_switch_stmts.safe_push (su);
+ return false;
+}
+
+/* Simplify an integral conversion from an SSA name in STMT. */
+
+static bool
+simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+{
+ tree innerop, middleop, finaltype;
+ gimple *def_stmt;
+ signop inner_sgn, middle_sgn, final_sgn;
+ unsigned inner_prec, middle_prec, final_prec;
+ widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
+
+ finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
+ if (!INTEGRAL_TYPE_P (finaltype))
+ return false;
+ middleop = gimple_assign_rhs1 (stmt);
+ def_stmt = SSA_NAME_DEF_STMT (middleop);
+ if (!is_gimple_assign (def_stmt)
+ || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
+ return false;
+ innerop = gimple_assign_rhs1 (def_stmt);
+ if (TREE_CODE (innerop) != SSA_NAME
+ || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
+ return false;
+
+ /* Get the value-range of the inner operand. Use get_range_info in
+ case innerop was created during substitute-and-fold. */
+ wide_int imin, imax;
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
+ || get_range_info (innerop, &imin, &imax) != VR_RANGE)
+ return false;
+ innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
+ innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
+
+ /* Simulate the conversion chain to check if the result is equal if
+ the middle conversion is removed. */
+ inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
+ middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
+ final_prec = TYPE_PRECISION (finaltype);
+
+ /* If the first conversion is not injective, the second must not
+ be widening. */
+ if (wi::gtu_p (innermax - innermin,
+ wi::mask <widest_int> (middle_prec, false))
+ && middle_prec < final_prec)
+ return false;
+ /* We also want a medium value so that we can track the effect that
+ narrowing conversions with sign change have. */
+ inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
+ if (inner_sgn == UNSIGNED)
+ innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
+ else
+ innermed = 0;
+ if (wi::cmp (innermin, innermed, inner_sgn) >= 0
+ || wi::cmp (innermed, innermax, inner_sgn) >= 0)
+ innermed = innermin;
+
+ middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
+ middlemin = wi::ext (innermin, middle_prec, middle_sgn);
+ middlemed = wi::ext (innermed, middle_prec, middle_sgn);
+ middlemax = wi::ext (innermax, middle_prec, middle_sgn);
+
+ /* Require that the final conversion applied to both the original
+ and the intermediate range produces the same result. */
+ final_sgn = TYPE_SIGN (finaltype);
+ if (wi::ext (middlemin, final_prec, final_sgn)
+ != wi::ext (innermin, final_prec, final_sgn)
+ || wi::ext (middlemed, final_prec, final_sgn)
+ != wi::ext (innermed, final_prec, final_sgn)
+ || wi::ext (middlemax, final_prec, final_sgn)
+ != wi::ext (innermax, final_prec, final_sgn))
+ return false;
+
+ gimple_assign_set_rhs1 (stmt, innerop);
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+}
+
+/* Simplify a conversion from integral SSA name to float in STMT. */
+
+bool
+vr_values::simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ value_range *vr = get_value_range (rhs1);
+ scalar_float_mode fltmode
+ = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
+ scalar_int_mode mode;
+ tree tem;
+ gassign *conv;
+
+ /* We can only handle constant ranges. */
+ if (vr->type != VR_RANGE
+ || TREE_CODE (vr->min) != INTEGER_CST
+ || TREE_CODE (vr->max) != INTEGER_CST)
+ return false;
+
+ /* First check if we can use a signed type in place of an unsigned. */
+ scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
+ if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
+ && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
+ && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
+ mode = rhs_mode;
+ /* If we can do the conversion in the current input mode do nothing. */
+ else if (can_float_p (fltmode, rhs_mode,
+ TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
+ return false;
+ /* Otherwise search for a mode we can use, starting from the narrowest
+ integer mode available. */
+ else
+ {
+ mode = NARROWEST_INT_MODE;
+ for (;;)
+ {
+ /* If we cannot do a signed conversion to float from mode
+ or if the value-range does not fit in the signed type
+ try with a wider mode. */
+ if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
+ && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
+ break;
+
+ /* But do not widen the input. Instead leave that to the
+ optabs expansion code. */
+ if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
+ || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
+ return false;
+ }
+ }
+
+ /* It works, insert a truncation or sign-change before the
+ float conversion. */
+ tem = make_ssa_name (build_nonstandard_integer_type
+ (GET_MODE_PRECISION (mode), 0));
+ conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
+ gsi_insert_before (gsi, conv, GSI_SAME_STMT);
+ gimple_assign_set_rhs1 (stmt, tem);
+ fold_stmt (gsi, follow_single_use_edges);
+
+ return true;
+}
+
+/* Simplify an internal fn call using ranges if possible. */
+
+bool
+vr_values::simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ enum tree_code subcode;
+ bool is_ubsan = false;
+ bool ovf = false;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_UBSAN_CHECK_ADD:
+ subcode = PLUS_EXPR;
+ is_ubsan = true;
+ break;
+ case IFN_UBSAN_CHECK_SUB:
+ subcode = MINUS_EXPR;
+ is_ubsan = true;
+ break;
+ case IFN_UBSAN_CHECK_MUL:
+ subcode = MULT_EXPR;
+ is_ubsan = true;
+ break;
+ case IFN_ADD_OVERFLOW:
+ subcode = PLUS_EXPR;
+ break;
+ case IFN_SUB_OVERFLOW:
+ subcode = MINUS_EXPR;
+ break;
+ case IFN_MUL_OVERFLOW:
+ subcode = MULT_EXPR;
+ break;
+ default:
+ return false;
+ }
+
+ tree op0 = gimple_call_arg (stmt, 0);
+ tree op1 = gimple_call_arg (stmt, 1);
+ tree type;
+ if (is_ubsan)
+ {
+ type = TREE_TYPE (op0);
+ if (VECTOR_TYPE_P (type))
+ return false;
+ }
+ else if (gimple_call_lhs (stmt) == NULL_TREE)
+ return false;
+ else
+ type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
+ if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
+ || (is_ubsan && ovf))
+ return false;
+
+ gimple *g;
+ location_t loc = gimple_location (stmt);
+ if (is_ubsan)
+ g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
+ else
+ {
+ int prec = TYPE_PRECISION (type);
+ tree utype = type;
+ if (ovf
+ || !useless_type_conversion_p (type, TREE_TYPE (op0))
+ || !useless_type_conversion_p (type, TREE_TYPE (op1)))
+ utype = build_nonstandard_integer_type (prec, 1);
+ if (TREE_CODE (op0) == INTEGER_CST)
+ op0 = fold_convert (utype, op0);
+ else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
+ {
+ g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ op0 = gimple_assign_lhs (g);
+ }
+ if (TREE_CODE (op1) == INTEGER_CST)
+ op1 = fold_convert (utype, op1);
+ else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
+ {
+ g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ op1 = gimple_assign_lhs (g);
+ }
+ g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ if (utype != type)
+ {
+ g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
+ gimple_assign_lhs (g));
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ }
+ g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
+ gimple_assign_lhs (g),
+ build_int_cst (type, ovf));
+ }
+ gimple_set_location (g, loc);
+ gsi_replace (gsi, g, false);
+ return true;
+}
+
+/* Return true if VAR is a two-valued variable. Set a and b with the
+ two-values when it is true. Return false otherwise. */
+
+bool
+vr_values::two_valued_val_range_p (tree var, tree *a, tree *b)
+{
+ value_range *vr = get_value_range (var);
+ if ((vr->type != VR_RANGE
+ && vr->type != VR_ANTI_RANGE)
+ || TREE_CODE (vr->min) != INTEGER_CST
+ || TREE_CODE (vr->max) != INTEGER_CST)
+ return false;
+
+ if (vr->type == VR_RANGE
+ && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
+ {
+ *a = vr->min;
+ *b = vr->max;
+ return true;
+ }
+
+ /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
+ if (vr->type == VR_ANTI_RANGE
+ && (wi::to_wide (vr->min)
+ - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
+ && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
+ - wi::to_wide (vr->max)) == 1)
+ {
+ *a = vrp_val_min (TREE_TYPE (var));
+ *b = vrp_val_max (TREE_TYPE (var));
+ return true;
+ }
+
+ return false;
+}
+
+/* Simplify STMT using ranges if possible. */
+
+bool
+vr_values::simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+ if (is_gimple_assign (stmt))
+ {
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree val1 = NULL_TREE, val2 = NULL_TREE;
+ use_operand_p use_p;
+ gimple *use_stmt;
+
+ /* Convert:
+ LHS = CST BINOP VAR
+ Where VAR is two-valued and LHS is used in GIMPLE_COND only
+ To:
+ LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
+
+ Also handles:
+ LHS = VAR BINOP CST
+ Where VAR is two-valued and LHS is used in GIMPLE_COND only
+ To:
+ LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
+
+ if (TREE_CODE_CLASS (rhs_code) == tcc_binary
+ && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && ((TREE_CODE (rhs1) == INTEGER_CST
+ && TREE_CODE (rhs2) == SSA_NAME)
+ || (TREE_CODE (rhs2) == INTEGER_CST
+ && TREE_CODE (rhs1) == SSA_NAME))
+ && single_imm_use (lhs, &use_p, &use_stmt)
+ && gimple_code (use_stmt) == GIMPLE_COND)
+
+ {
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+ tree cmp_var = NULL_TREE;
+
+ if (TREE_CODE (rhs2) == SSA_NAME
+ && two_valued_val_range_p (rhs2, &val1, &val2))
+ {
+ /* Optimize RHS1 OP [VAL1, VAL2]. */
+ new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
+ new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
+ cmp_var = rhs2;
+ }
+ else if (TREE_CODE (rhs1) == SSA_NAME
+ && two_valued_val_range_p (rhs1, &val1, &val2))
+ {
+ /* Optimize [VAL1, VAL2] OP RHS2. */
+ new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
+ new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
+ cmp_var = rhs1;
+ }
+
+ /* If we could not find two-vals or the optimzation is invalid as
+ in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
+ if (new_rhs1 && new_rhs2)
+ {
+ tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
+ gimple_assign_set_rhs_with_ops (gsi,
+ COND_EXPR, cond,
+ new_rhs1,
+ new_rhs2);
+ update_stmt (gsi_stmt (*gsi));
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+ }
+ }
+
+ switch (rhs_code)
+ {
+ case EQ_EXPR:
+ case NE_EXPR:
+ /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
+ if the RHS is zero or one, and the LHS are known to be boolean
+ values. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_truth_ops_using_ranges (gsi, stmt);
+ break;
+
+ /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
+ and BIT_AND_EXPR respectively if the first operand is greater
+ than zero and the second operand is an exact power of two.
+ Also optimize TRUNC_MOD_EXPR away if the second operand is
+ constant and the first operand already has the right value
+ range. */
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ if ((TREE_CODE (rhs1) == SSA_NAME
+ || TREE_CODE (rhs1) == INTEGER_CST)
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_div_or_mod_using_ranges (gsi, stmt);
+ break;
+
+ /* Transform ABS (X) into X or -X as appropriate. */
+ case ABS_EXPR:
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_abs_using_ranges (gsi, stmt);
+ break;
+
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
+ if all the bits being cleared are already cleared or
+ all the bits being set are already set. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_bit_ops_using_ranges (gsi, stmt);
+ break;
+
+ CASE_CONVERT:
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_conversion_using_ranges (gsi, stmt);
+ break;
+
+ case FLOAT_EXPR:
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_float_conversion_using_ranges (gsi, stmt);
+ break;
+
+ case MIN_EXPR:
+ case MAX_EXPR:
+ return simplify_min_or_max_using_ranges (gsi, stmt);
+
+ default:
+ break;
+ }
+ }
+ else if (gimple_code (stmt) == GIMPLE_COND)
+ return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
+ else if (gimple_code (stmt) == GIMPLE_SWITCH)
+ return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
+ else if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt))
+ return simplify_internal_call_using_ranges (gsi, stmt);
+
+ return false;
+}
+
+void
+vr_values::set_vr_value (tree var, value_range *vr)
+{
+ if (SSA_NAME_VERSION (var) >= num_vr_values)
+ return;
+ vr_value[SSA_NAME_VERSION (var)] = vr;
+}
+
diff --git a/gcc/vr-values.h b/gcc/vr-values.h
new file mode 100644
index 00000000000..20bd6c57a6c
--- /dev/null
+++ b/gcc/vr-values.h
@@ -0,0 +1,121 @@
+/* Support routines for Value Range Propagation (VRP).
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VR_VALUES_H
+#define GCC_VR_VALUES_H
+
+/* The VR_VALUES class holds the current view of range information
+ for all the SSA_NAMEs in the IL.
+
+ It can be used to hold context sensitive range information during
+ a dominator walk or it may be used to hold range information in the
+ standard VRP pass as ranges are propagated through the lattice to a
+ steady state.
+
+ This information is independent of the range information that gets
+ attached to SSA_NAMEs. A pass such as VRP may choose to transfer
+ the global information it produces into global range information that
+ gets attached to an SSA_NAME. It's unclear how useful that global
+ information will be in a world where we can compute context sensitive
+ range information fast or perform on-demand queries. */
+class vr_values
+{
+ public:
+ vr_values (void);
+ ~vr_values (void);
+
+ /* Value range array. After propagation, VR_VALUE[I] holds the range
+ of values that SSA name N_I may take. */
+ unsigned int num_vr_values;
+ value_range **vr_value;
+ bool values_propagated;
+
+ /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
+ number of executable edges we saw the last time we visited the
+ node. */
+ int *vr_phi_edge_counts;
+
+ /* Allocation pools for tree-vrp allocations. */
+ object_allocator<value_range> vrp_value_range_pool;
+ bitmap_obstack vrp_equiv_obstack;
+
+ value_range *get_value_range (const_tree);
+ void set_vr_value (tree, value_range *);
+
+ void set_defs_to_varying (gimple *);
+ bool update_value_range (const_tree, value_range *);
+ void add_equivalence (bitmap *, const_tree);
+ bool vrp_stmt_computes_nonzero (gimple *);
+ tree op_with_constant_singleton_value_range (tree);
+ bool op_with_boolean_value_range_p (tree);
+ bool check_for_binary_op_overflow (enum tree_code, tree, tree, tree, bool *);
+ void adjust_range_with_scev (value_range *, struct loop *, gimple *, tree);
+ value_range get_vr_for_comparison (int);
+ tree compare_name_with_value (enum tree_code, tree, tree, bool *, bool);
+ tree compare_names (enum tree_code, tree, tree, bool *);
+ bool two_valued_val_range_p (tree, tree *, tree *);
+
+ tree vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code,
+ tree, tree,
+ bool *);
+ tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
+ tree, tree, bool,
+ bool *, bool *);
+ tree vrp_evaluate_conditional (tree_code, tree, tree, gimple *);
+
+
+ void dump_all_value_ranges (FILE *);
+
+ void extract_range_for_var_from_comparison_expr (tree, enum tree_code,
+ tree, tree, value_range *);
+ void extract_range_from_assert (value_range *, tree);
+ void extract_range_from_ssa_name (value_range *, tree);
+ void extract_range_from_binary_expr (value_range *, enum tree_code,
+ tree, tree, tree);
+ void extract_range_from_unary_expr (value_range *, enum tree_code,
+ tree, tree);
+ void extract_range_from_phi_node (gphi *, value_range *);
+ void extract_range_from_cond_expr (value_range *, gassign *);
+ void extract_range_basic (value_range *, gimple *);
+ void extract_range_from_assignment (value_range *, gassign *);
+ void extract_range_from_stmt (gimple *, edge *, tree *, value_range *);
+ void extract_range_from_comparison (value_range *, enum tree_code,
+ tree, tree, tree);
+
+ void vrp_visit_assignment_or_call (gimple*, tree *, value_range *);
+ void vrp_visit_switch_stmt (gswitch *, edge *);
+ void vrp_visit_cond_stmt (gcond *, edge *);
+
+ bool simplify_truth_ops_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_div_or_mod_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_abs_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_bit_ops_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_min_or_max_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_cond_using_ranges_1 (gcond *);
+ void simplify_cond_using_ranges_2 (gcond *);
+ bool simplify_switch_using_ranges (gswitch *);
+ bool simplify_float_conversion_using_ranges (gimple_stmt_iterator *,
+ gimple *);
+ bool simplify_internal_call_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_stmt_using_ranges (gimple_stmt_iterator *);
+};
+
+#define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
+
+#endif /* GCC_VR_VALUES_H */
diff --git a/include/ChangeLog b/include/ChangeLog
index 8ccc57ae783..9cae3fb2bd8 100644
--- a/include/ChangeLog
+++ b/include/ChangeLog
@@ -1,3 +1,8 @@
+2017-11-10 Stephen Crane <sjc@immunant.com>
+
+ * plugin-api.h: Add plugin API for processing plugin-added
+ input files.
+
2017-10-23 David Malcolm <dmalcolm@redhat.com>
* unique-ptr.h: Make include of <memory> conditional on C++11 or
diff --git a/include/plugin-api.h b/include/plugin-api.h
index 3a3e8b456db..f081f85dfaf 100644
--- a/include/plugin-api.h
+++ b/include/plugin-api.h
@@ -365,6 +365,20 @@ enum ld_plugin_status
(*ld_plugin_get_input_section_size) (const struct ld_plugin_section section,
uint64_t *secsize);
+typedef
+enum ld_plugin_status
+(*ld_plugin_new_input_handler) (const struct ld_plugin_input_file *file);
+
+/* The linker's interface for registering the "new_input" handler. This handler
+ will be notified when a new input file has been added after the
+ all_symbols_read event, allowing the plugin to, for example, set a unique
+ segment for sections in plugin-generated input files. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_new_input) (ld_plugin_new_input_handler handler);
+
+
enum ld_plugin_level
{
LDPL_INFO,
@@ -407,7 +421,8 @@ enum ld_plugin_tag
LDPT_UNIQUE_SEGMENT_FOR_SECTIONS = 27,
LDPT_GET_SYMBOLS_V3 = 28,
LDPT_GET_INPUT_SECTION_ALIGNMENT = 29,
- LDPT_GET_INPUT_SECTION_SIZE = 30
+ LDPT_GET_INPUT_SECTION_SIZE = 30,
+ LDPT_REGISTER_NEW_INPUT_HOOK = 31
};
/* The plugin transfer vector. */
@@ -441,6 +456,7 @@ struct ld_plugin_tv
ld_plugin_unique_segment_for_sections tv_unique_segment_for_sections;
ld_plugin_get_input_section_alignment tv_get_input_section_alignment;
ld_plugin_get_input_section_size tv_get_input_section_size;
+ ld_plugin_register_new_input tv_register_new_input;
} tv_u;
};
diff --git a/intl/ChangeLog b/intl/ChangeLog
index 60822b3b647..6150e7e250f 100644
--- a/intl/ChangeLog
+++ b/intl/ChangeLog
@@ -1,3 +1,8 @@
+2017-11-07 Alan Modra <amodra@gmail.com>
+
+ * configure.ac: Invoke AM_GNU_GETTEXT with need_ngettext.
+ * configure: Regenerate.
+
2016-11-15 Matthias Klose <doko@ubuntu.com>
* configure: Regenerate.
diff --git a/intl/configure b/intl/configure
index d7af03fc71d..2abbcb5b051 100755
--- a/intl/configure
+++ b/intl/configure
@@ -5813,7 +5813,7 @@ $as_echo "$nls_cv_force_use_gnu_gettext" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5
$as_echo_n "checking for GNU gettext in libc... " >&6; }
-if test "${gt_cv_func_gnugettext1_libc+set}" = set; then :
+if test "${gt_cv_func_gnugettext2_libc+set}" = set; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -5825,23 +5825,23 @@ int
main ()
{
bindtextdomain ("", "");
-return (int) gettext ("") + _nl_msg_cat_cntr + *_nl_domain_bindings
+return (int) gettext ("") + (int) ngettext ("", "", 0) + _nl_msg_cat_cntr + *_nl_domain_bindings
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- gt_cv_func_gnugettext1_libc=yes
+ gt_cv_func_gnugettext2_libc=yes
else
- gt_cv_func_gnugettext1_libc=no
+ gt_cv_func_gnugettext2_libc=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libc" >&5
-$as_echo "$gt_cv_func_gnugettext1_libc" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext2_libc" >&5
+$as_echo "$gt_cv_func_gnugettext2_libc" >&6; }
- if test "$gt_cv_func_gnugettext1_libc" != "yes"; then
+ if test "$gt_cv_func_gnugettext2_libc" != "yes"; then
@@ -6223,7 +6223,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5
$as_echo_n "checking for GNU gettext in libintl... " >&6; }
-if test "${gt_cv_func_gnugettext1_libintl+set}" = set; then :
+if test "${gt_cv_func_gnugettext2_libintl+set}" = set; then :
$as_echo_n "(cached) " >&6
else
gt_save_CPPFLAGS="$CPPFLAGS"
@@ -6243,19 +6243,19 @@ int
main ()
{
bindtextdomain ("", "");
-return (int) gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias (0)
+return (int) gettext ("") + (int) ngettext ("", "", 0) + _nl_msg_cat_cntr + *_nl_expand_alias (0)
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- gt_cv_func_gnugettext1_libintl=yes
+ gt_cv_func_gnugettext2_libintl=yes
else
- gt_cv_func_gnugettext1_libintl=no
+ gt_cv_func_gnugettext2_libintl=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
- if test "$gt_cv_func_gnugettext1_libintl" != yes && test -n "$LIBICONV"; then
+ if test "$gt_cv_func_gnugettext2_libintl" != yes && test -n "$LIBICONV"; then
LIBS="$LIBS $LIBICONV"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
@@ -6270,7 +6270,7 @@ int
main ()
{
bindtextdomain ("", "");
-return (int) gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias (0)
+return (int) gettext ("") + (int) ngettext ("", "", 0) + _nl_msg_cat_cntr + *_nl_expand_alias (0)
;
return 0;
}
@@ -6278,7 +6278,7 @@ _ACEOF
if ac_fn_c_try_link "$LINENO"; then :
LIBINTL="$LIBINTL $LIBICONV"
LTLIBINTL="$LTLIBINTL $LTLIBICONV"
- gt_cv_func_gnugettext1_libintl=yes
+ gt_cv_func_gnugettext2_libintl=yes
fi
rm -f core conftest.err conftest.$ac_objext \
@@ -6287,12 +6287,12 @@ rm -f core conftest.err conftest.$ac_objext \
CPPFLAGS="$gt_save_CPPFLAGS"
LIBS="$gt_save_LIBS"
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libintl" >&5
-$as_echo "$gt_cv_func_gnugettext1_libintl" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext2_libintl" >&5
+$as_echo "$gt_cv_func_gnugettext2_libintl" >&6; }
fi
- if test "$gt_cv_func_gnugettext1_libc" = "yes" \
- || { test "$gt_cv_func_gnugettext1_libintl" = "yes" \
+ if test "$gt_cv_func_gnugettext2_libc" = "yes" \
+ || { test "$gt_cv_func_gnugettext2_libintl" = "yes" \
&& test "$PACKAGE" != gettext-runtime \
&& test "$PACKAGE" != gettext-tools; }; then
gt_use_preinstalled_gnugettext=yes
@@ -6340,7 +6340,7 @@ $as_echo "$USE_NLS" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5
$as_echo_n "checking where the gettext function comes from... " >&6; }
if test "$gt_use_preinstalled_gnugettext" = "yes"; then
- if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then
+ if test "$gt_cv_func_gnugettext2_libintl" = "yes"; then
gt_source="external libintl"
else
gt_source="libc"
@@ -6355,7 +6355,7 @@ $as_echo "$gt_source" >&6; }
if test "$USE_NLS" = "yes"; then
if test "$gt_use_preinstalled_gnugettext" = "yes"; then
- if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then
+ if test "$gt_cv_func_gnugettext2_libintl" = "yes"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5
$as_echo_n "checking how to link with libintl... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5
diff --git a/intl/configure.ac b/intl/configure.ac
index 698c330ddb3..36cf97fe4b1 100644
--- a/intl/configure.ac
+++ b/intl/configure.ac
@@ -4,7 +4,7 @@ AC_CONFIG_SRCDIR(gettext.c)
AC_CONFIG_HEADER(config.h)
AC_CONFIG_MACRO_DIR(../config)
AM_GNU_GETTEXT_VERSION(0.12.1)
-AM_GNU_GETTEXT
+AM_GNU_GETTEXT([], [need-ngettext])
# This replaces the extensive use of DEFS in the original Makefile.in.
AC_DEFINE(IN_LIBINTL, 1, [Define because this is libintl.])
diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog
index 39e12bda927..70c834c61d0 100644
--- a/libcpp/ChangeLog
+++ b/libcpp/ChangeLog
@@ -1,3 +1,41 @@
+2017-11-13 Tom Tromey <tom@tromey.com>
+
+ * pch.c (cpp_read_state): Set n__VA_OPT__.
+ * macro.c (vaopt_state): New class.
+ (_cpp_arguments_ok): Check va_opt flag.
+ (replace_args, create_iso_definition): Use vaopt_state.
+ * lex.c (lex_identifier_intern): Possibly issue errors for
+ __VA_OPT__.
+ (lex_identifier): Likewise.
+ (maybe_va_opt_error): New function.
+ * internal.h (struct lexer_state) <va_args_ok>: Update comment.
+ (struct spec_nodes) <n__VA_OPT__>: New field.
+ * init.c (struct lang_flags) <va_opt>: New field.
+ (lang_defaults): Add entries for C++2A. Update all entries for
+ va_opt.
+ (cpp_set_lang): Initialize va_opt.
+ * include/cpplib.h (struct cpp_options) <va_opt>: New field.
+ * identifiers.c (_cpp_init_hashtable): Initialize n__VA_OPT__.
+
+2017-11-13 David Malcolm <dmalcolm@redhat.com>
+
+ * include/line-map.h (linenum_type): Move this typedef and the
+ comment describing column numbering to near the top of the file.
+
+2017-11-06 Mukesh Kapoor <mukesh.kapoor@oracle.com>
+
+ PR c++/80955
+ * lex.c (lex_string): When checking for a valid macro for the
+ warning related to -Wliteral-suffix (CPP_W_LITERAL_SUFFIX),
+ check that the macro name does not start with an underscore
+ before calling is_macro().
+
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * lex.c (BUF_APPEND): Remove semicolon after
+ "do {} while (0)".
+
2017-10-31 David Malcolm <dmalcolm@redhat.com>
* directives.c (_cpp_handle_directive): Update for renaming of
diff --git a/libcpp/identifiers.c b/libcpp/identifiers.c
index 220f9b97f0d..e456fd3a4fc 100644
--- a/libcpp/identifiers.c
+++ b/libcpp/identifiers.c
@@ -70,6 +70,8 @@ _cpp_init_hashtable (cpp_reader *pfile, cpp_hash_table *table)
s->n_false = cpp_lookup (pfile, DSC("false"));
s->n__VA_ARGS__ = cpp_lookup (pfile, DSC("__VA_ARGS__"));
s->n__VA_ARGS__->flags |= NODE_DIAGNOSTIC;
+ s->n__VA_OPT__ = cpp_lookup (pfile, DSC("__VA_OPT__"));
+ s->n__VA_OPT__->flags |= NODE_DIAGNOSTIC;
s->n__has_include__ = cpp_lookup (pfile, DSC("__has_include__"));
s->n__has_include_next__ = cpp_lookup (pfile, DSC("__has_include_next__"));
}
diff --git a/libcpp/include/cpplib.h b/libcpp/include/cpplib.h
index 5a14858c44f..101b33aef48 100644
--- a/libcpp/include/cpplib.h
+++ b/libcpp/include/cpplib.h
@@ -478,6 +478,9 @@ struct cpp_options
/* Nonzero for C++ 2014 Standard digit separators. */
unsigned char digit_separators;
+ /* Nonzero for C++2a __VA_OPT__ feature. */
+ unsigned char va_opt;
+
/* Holds the name of the target (execution) character set. */
const char *narrow_charset;
diff --git a/libcpp/include/line-map.h b/libcpp/include/line-map.h
index e6960410c00..8b7e5dcd0ae 100644
--- a/libcpp/include/line-map.h
+++ b/libcpp/include/line-map.h
@@ -26,6 +26,29 @@ along with this program; see the file COPYING3. If not see
#define GTY(x) /* nothing */
#endif
+/* Both gcc and emacs number source *lines* starting at 1, but
+ they have differing conventions for *columns*.
+
+ GCC uses a 1-based convention for source columns,
+ whereas Emacs's M-x column-number-mode uses a 0-based convention.
+
+ For example, an error in the initial, left-hand
+ column of source line 3 is reported by GCC as:
+
+ some-file.c:3:1: error: ...etc...
+
+ On navigating to the location of that error in Emacs
+ (e.g. via "next-error"),
+ the locus is reported in the Mode Line
+ (assuming M-x column-number-mode) as:
+
+ some-file.c 10% (3, 0)
+
+ i.e. "3:1:" in GCC corresponds to "(3, 0)" in Emacs. */
+
+/* The type of line numbers. */
+typedef unsigned int linenum_type;
+
/* Reason for creating a new line map with linemap_add. LC_ENTER is
when including a new file, e.g. a #include directive in C.
LC_LEAVE is when reaching a file's end. LC_RENAME is when a file
@@ -43,9 +66,6 @@ enum lc_reason
/* FIXME: add support for stringize and paste. */
};
-/* The type of line numbers. */
-typedef unsigned int linenum_type;
-
/* The typedef "source_location" is a key within the location database,
identifying a source location or macro expansion, along with range
information, and (optionally) a pointer for use by gcc.
@@ -1251,26 +1271,6 @@ typedef struct
bool sysp;
} expanded_location;
-/* Both gcc and emacs number source *lines* starting at 1, but
- they have differing conventions for *columns*.
-
- GCC uses a 1-based convention for source columns,
- whereas Emacs's M-x column-number-mode uses a 0-based convention.
-
- For example, an error in the initial, left-hand
- column of source line 3 is reported by GCC as:
-
- some-file.c:3:1: error: ...etc...
-
- On navigating to the location of that error in Emacs
- (e.g. via "next-error"),
- the locus is reported in the Mode Line
- (assuming M-x column-number-mode) as:
-
- some-file.c 10% (3, 0)
-
- i.e. "3:1:" in GCC corresponds to "(3, 0)" in Emacs. */
-
/* A location within a rich_location: a caret&range, with
the caret potentially flagged for display. */
diff --git a/libcpp/init.c b/libcpp/init.c
index ecc81e3138a..8423656ad10 100644
--- a/libcpp/init.c
+++ b/libcpp/init.c
@@ -91,30 +91,31 @@ struct lang_flags
char digit_separators;
char trigraphs;
char utf8_char_literals;
+ char va_opt;
};
static const struct lang_flags lang_defaults[] =
-{ /* c99 c++ xnum xid c11 std digr ulit rlit udlit bincst digsep trig u8chlit */
- /* GNUC89 */ { 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 },
- /* GNUC99 */ { 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0 },
- /* GNUC11 */ { 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0 },
- /* GNUC17 */ { 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0 },
- /* STDC89 */ { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0 },
- /* STDC94 */ { 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0 },
- /* STDC99 */ { 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0 },
- /* STDC11 */ { 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0 },
- /* STDC17 */ { 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0 },
- /* GNUCXX */ { 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 },
- /* CXX98 */ { 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0 },
- /* GNUCXX11 */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0 },
- /* CXX11 */ { 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0 },
- /* GNUCXX14 */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0 },
- /* CXX14 */ { 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
- /* GNUCXX17 */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1 },
- /* CXX17 */ { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1 },
- /* GNUCXX2A */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1 },
- /* CXX2A */ { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1 },
- /* ASM */ { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+{ /* c99 c++ xnum xid c11 std digr ulit rlit udlit bincst digsep trig u8chlit vaopt */
+ /* GNUC89 */ { 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1 },
+ /* GNUC99 */ { 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1 },
+ /* GNUC11 */ { 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1 },
+ /* GNUC17 */ { 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1 },
+ /* STDC89 */ { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0 },
+ /* STDC94 */ { 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0 },
+ /* STDC99 */ { 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0 },
+ /* STDC11 */ { 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0 },
+ /* STDC17 */ { 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0 },
+ /* GNUCXX */ { 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1 },
+ /* CXX98 */ { 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0 },
+ /* GNUCXX11 */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1 },
+ /* CXX11 */ { 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0 },
+ /* GNUCXX14 */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1 },
+ /* CXX14 */ { 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ /* GNUCXX17 */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1 },
+ /* CXX17 */ { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0 },
+ /* GNUCXX2A */ { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1 },
+ /* CXX2A */ { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1 },
+ /* ASM */ { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
/* Sets internal flags correctly for a given language. */
@@ -139,6 +140,7 @@ cpp_set_lang (cpp_reader *pfile, enum c_lang lang)
CPP_OPTION (pfile, digit_separators) = l->digit_separators;
CPP_OPTION (pfile, trigraphs) = l->trigraphs;
CPP_OPTION (pfile, utf8_char_literals) = l->utf8_char_literals;
+ CPP_OPTION (pfile, va_opt) = l->va_opt;
}
/* Initialize library global state. */
diff --git a/libcpp/internal.h b/libcpp/internal.h
index f24e85cfb11..0a33abafd43 100644
--- a/libcpp/internal.h
+++ b/libcpp/internal.h
@@ -246,7 +246,7 @@ struct lexer_state
all directives apart from #define. */
unsigned char save_comments;
- /* Nonzero if lexing __VA_ARGS__ is valid. */
+ /* Nonzero if lexing __VA_ARGS__ and __VA_OPT__ are valid. */
unsigned char va_args_ok;
/* Nonzero if lexing poisoned identifiers is valid. */
@@ -282,6 +282,7 @@ struct spec_nodes
cpp_hashnode *n_true; /* C++ keyword true */
cpp_hashnode *n_false; /* C++ keyword false */
cpp_hashnode *n__VA_ARGS__; /* C99 vararg macros */
+ cpp_hashnode *n__VA_OPT__; /* C++ vararg macros */
cpp_hashnode *n__has_include__; /* __has_include__ operator */
cpp_hashnode *n__has_include_next__; /* __has_include_next__ operator */
};
diff --git a/libcpp/lex.c b/libcpp/lex.c
index 40ff801e8e3..a8dc3bae5e4 100644
--- a/libcpp/lex.c
+++ b/libcpp/lex.c
@@ -1352,6 +1352,28 @@ forms_identifier_p (cpp_reader *pfile, int first,
return false;
}
+/* Helper function to issue error about improper __VA_OPT__ use. */
+static void
+maybe_va_opt_error (cpp_reader *pfile)
+{
+ if (CPP_PEDANTIC (pfile) && !CPP_OPTION (pfile, va_opt))
+ {
+ /* __VA_OPT__ should not be accepted at all, but allow it in
+ system headers. */
+ if (!cpp_in_system_header (pfile))
+ cpp_error (pfile, CPP_DL_PEDWARN,
+ "__VA_OPT__ is not available until C++2a");
+ }
+ else if (!pfile->state.va_args_ok)
+ {
+ /* __VA_OPT__ should only appear in the replacement list of a
+ variadic macro. */
+ cpp_error (pfile, CPP_DL_PEDWARN,
+ "__VA_OPT__ can only appear in the expansion"
+ " of a C++2a variadic macro");
+ }
+}
+
/* Helper function to get the cpp_hashnode of the identifier BASE. */
static cpp_hashnode *
lex_identifier_intern (cpp_reader *pfile, const uchar *base)
@@ -1396,6 +1418,9 @@ lex_identifier_intern (cpp_reader *pfile, const uchar *base)
" of a C99 variadic macro");
}
+ if (result == pfile->spec_nodes.n__VA_OPT__)
+ maybe_va_opt_error (pfile);
+
/* For -Wc++-compat, warn about use of C++ named operators. */
if (result->flags & NODE_WARN_OPERATOR)
cpp_warning (pfile, CPP_W_CXX_OPERATOR_NAMES,
@@ -1485,6 +1510,11 @@ lex_identifier (cpp_reader *pfile, const uchar *base, bool starts_ucn,
" of a C99 variadic macro");
}
+ /* __VA_OPT__ should only appear in the replacement list of a
+ variadic macro. */
+ if (result == pfile->spec_nodes.n__VA_OPT__)
+ maybe_va_opt_error (pfile);
+
/* For -Wc++-compat, warn about use of C++ named operators. */
if (result->flags & NODE_WARN_OPERATOR)
cpp_warning (pfile, CPP_W_CXX_OPERATOR_NAMES,
@@ -1647,7 +1677,7 @@ lex_raw_string (cpp_reader *pfile, cpp_token *token, const uchar *base,
(const uchar *)(STR), (LEN)); \
temp_buffer_len += (LEN); \
} \
- } while (0);
+ } while (0)
orig_base = base;
++cur;
@@ -1871,8 +1901,9 @@ lex_raw_string (cpp_reader *pfile, cpp_token *token, const uchar *base,
/* If a string format macro, say from inttypes.h, is placed touching
a string literal it could be parsed as a C++11 user-defined string
literal thus breaking the program.
- Try to identify macros with is_macro. A warning is issued. */
- if (is_macro (pfile, cur))
+ Try to identify macros with is_macro. A warning is issued.
+ The macro name should not start with '_' for this warning. */
+ if ((*cur != '_') && is_macro (pfile, cur))
{
/* Raise a warning, but do not consume subsequent tokens. */
if (CPP_OPTION (pfile, warn_literal_suffix) && !pfile->state.skipping)
@@ -2001,8 +2032,9 @@ lex_string (cpp_reader *pfile, cpp_token *token, const uchar *base)
/* If a string format macro, say from inttypes.h, is placed touching
a string literal it could be parsed as a C++11 user-defined string
literal thus breaking the program.
- Try to identify macros with is_macro. A warning is issued. */
- if (is_macro (pfile, cur))
+ Try to identify macros with is_macro. A warning is issued.
+ The macro name should not start with '_' for this warning. */
+ if ((*cur != '_') && is_macro (pfile, cur))
{
/* Raise a warning, but do not consume subsequent tokens. */
if (CPP_OPTION (pfile, warn_literal_suffix) && !pfile->state.skipping)
diff --git a/libcpp/macro.c b/libcpp/macro.c
index fab1cb051dc..bf473eae358 100644
--- a/libcpp/macro.c
+++ b/libcpp/macro.c
@@ -89,6 +89,155 @@ struct macro_arg_saved_data {
union _cpp_hashnode_value value;
};
+static const char *vaopt_paste_error =
+ N_("'##' cannot appear at either end of __VA_OPT__");
+
+/* A class for tracking __VA_OPT__ state while iterating over a
+ sequence of tokens. This is used during both macro definition and
+ expansion. */
+class vaopt_state {
+
+ public:
+
+ /* Initialize the state tracker. ANY_ARGS is true if variable
+ arguments were provided to the macro invocation. */
+ vaopt_state (cpp_reader *pfile, bool is_variadic, bool any_args)
+ : m_pfile (pfile),
+ m_allowed (any_args),
+ m_variadic (is_variadic),
+ m_state (0),
+ m_last_was_paste (false),
+ m_paste_location (0),
+ m_location (0)
+ {
+ }
+
+ enum update_type
+ {
+ ERROR,
+ DROP,
+ INCLUDE
+ };
+
+ /* Given a token, update the state of this tracker and return a
+ boolean indicating whether the token should be be included in the
+ expansion. */
+ update_type update (const cpp_token *token)
+ {
+ /* If the macro isn't variadic, just don't bother. */
+ if (!m_variadic)
+ return INCLUDE;
+
+ if (token->type == CPP_NAME
+ && token->val.node.node == m_pfile->spec_nodes.n__VA_OPT__)
+ {
+ if (m_state > 0)
+ {
+ cpp_error_at (m_pfile, CPP_DL_ERROR, token->src_loc,
+ "__VA_OPT__ may not appear in a __VA_OPT__");
+ return ERROR;
+ }
+ ++m_state;
+ m_location = token->src_loc;
+ return DROP;
+ }
+ else if (m_state == 1)
+ {
+ if (token->type != CPP_OPEN_PAREN)
+ {
+ cpp_error_at (m_pfile, CPP_DL_ERROR, m_location,
+ "__VA_OPT__ must be followed by an "
+ "open parenthesis");
+ return ERROR;
+ }
+ ++m_state;
+ return DROP;
+ }
+ else if (m_state >= 2)
+ {
+ if (m_state == 2 && token->type == CPP_PASTE)
+ {
+ cpp_error_at (m_pfile, CPP_DL_ERROR, token->src_loc,
+ vaopt_paste_error);
+ return ERROR;
+ }
+ /* Advance states before further considering this token, in
+ case we see a close paren immediately after the open
+ paren. */
+ if (m_state == 2)
+ ++m_state;
+
+ bool was_paste = m_last_was_paste;
+ m_last_was_paste = false;
+ if (token->type == CPP_PASTE)
+ {
+ m_last_was_paste = true;
+ m_paste_location = token->src_loc;
+ }
+ else if (token->type == CPP_OPEN_PAREN)
+ ++m_state;
+ else if (token->type == CPP_CLOSE_PAREN)
+ {
+ --m_state;
+ if (m_state == 2)
+ {
+ /* Saw the final paren. */
+ m_state = 0;
+
+ if (was_paste)
+ {
+ cpp_error_at (m_pfile, CPP_DL_ERROR, token->src_loc,
+ vaopt_paste_error);
+ return ERROR;
+ }
+
+ return DROP;
+ }
+ }
+ return m_allowed ? INCLUDE : DROP;
+ }
+
+ /* Nothing to do with __VA_OPT__. */
+ return INCLUDE;
+ }
+
+ /* Ensure that any __VA_OPT__ was completed. If ok, return true.
+ Otherwise, issue an error and return false. */
+ bool completed ()
+ {
+ if (m_variadic && m_state != 0)
+ cpp_error_at (m_pfile, CPP_DL_ERROR, m_location,
+ "unterminated __VA_OPT__");
+ return m_state == 0;
+ }
+
+ private:
+
+ /* The cpp_reader. */
+ cpp_reader *m_pfile;
+
+ /* True if there were varargs. */
+ bool m_allowed;
+ /* True if the macro is variadic. */
+ bool m_variadic;
+
+ /* The state variable:
+ 0 means not parsing
+ 1 means __VA_OPT__ seen, looking for "("
+ 2 means "(" seen (so the next token can't be "##")
+ >= 3 means looking for ")", the number encodes the paren depth. */
+ int m_state;
+
+ /* If true, the previous token was ##. This is used to detect when
+ a paste occurs at the end of the sequence. */
+ bool m_last_was_paste;
+ /* The location of the paste token. */
+ source_location m_paste_location;
+
+ /* Location of the __VA_OPT__ token. */
+ source_location m_location;
+};
+
/* Macro expansion. */
static int enter_macro_context (cpp_reader *, cpp_hashnode *,
@@ -776,7 +925,8 @@ _cpp_arguments_ok (cpp_reader *pfile, cpp_macro *macro, const cpp_hashnode *node
if (argc < macro->paramc)
{
- /* As an extension, variadic arguments are allowed to not appear in
+ /* In C++2a (here the va_opt flag is used), and also as a GNU
+ extension, variadic arguments are allowed to not appear in
the invocation at all.
e.g. #define debug(format, args...) something
debug("string");
@@ -786,7 +936,8 @@ _cpp_arguments_ok (cpp_reader *pfile, cpp_macro *macro, const cpp_hashnode *node
if (argc + 1 == macro->paramc && macro->variadic)
{
- if (CPP_PEDANTIC (pfile) && ! macro->syshdr)
+ if (CPP_PEDANTIC (pfile) && ! macro->syshdr
+ && ! CPP_OPTION (pfile, va_opt))
{
if (CPP_OPTION (pfile, cplusplus))
cpp_error (pfile, CPP_DL_PEDWARN,
@@ -1678,6 +1829,8 @@ replace_args (cpp_reader *pfile, cpp_hashnode *node, cpp_macro *macro,
num_macro_tokens);
}
i = 0;
+ vaopt_state vaopt_tracker (pfile, macro->variadic,
+ args[macro->paramc - 1].count > 0);
for (src = macro->exp.tokens; src < limit; src++)
{
unsigned int arg_tokens_count;
@@ -1685,6 +1838,10 @@ replace_args (cpp_reader *pfile, cpp_hashnode *node, cpp_macro *macro,
const cpp_token **paste_flag = NULL;
const cpp_token **tmp_token_ptr;
+ /* __VA_OPT__ handling. */
+ if (vaopt_tracker.update (src) != vaopt_state::INCLUDE)
+ continue;
+
if (src->type != CPP_MACRO_ARG)
{
/* Allocate a virtual location for token SRC, and add that
@@ -3076,6 +3233,9 @@ create_iso_definition (cpp_reader *pfile, cpp_macro *macro)
*token = *ctoken;
}
+ /* The argument doesn't matter here. */
+ vaopt_state vaopt_tracker (pfile, macro->variadic, true);
+
for (;;)
{
/* Check the stringifying # constraint 6.10.3.2.1 of
@@ -3144,10 +3304,16 @@ create_iso_definition (cpp_reader *pfile, cpp_macro *macro)
}
}
+ if (vaopt_tracker.update (token) == vaopt_state::ERROR)
+ return false;
+
following_paste_op = (token->type == CPP_PASTE);
token = lex_expansion_token (pfile, macro);
}
+ if (!vaopt_tracker.completed ())
+ return false;
+
macro->exp.tokens = (cpp_token *) BUFF_FRONT (pfile->a_buff);
macro->traditional = 0;
diff --git a/libcpp/pch.c b/libcpp/pch.c
index cad4b872cda..b685a38a854 100644
--- a/libcpp/pch.c
+++ b/libcpp/pch.c
@@ -835,6 +835,7 @@ cpp_read_state (cpp_reader *r, const char *name, FILE *f,
s->n_true = cpp_lookup (r, DSC("true"));
s->n_false = cpp_lookup (r, DSC("false"));
s->n__VA_ARGS__ = cpp_lookup (r, DSC("__VA_ARGS__"));
+ s->n__VA_OPT__ = cpp_lookup (r, DSC("__VA_OPT__"));
s->n__has_include__ = cpp_lookup (r, DSC("__has_include__"));
s->n__has_include_next__ = cpp_lookup (r, DSC("__has_include_next__"));
}
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index a99d6186ca5..a069cd79511 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,34 @@
+2017-11-14 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config.host (*-*-solaris2*): Adapt comment for Solaris 12
+ renaming.
+ * config/sol2/crtpg.c (__start_crt_compiler): Likewise.
+ * configure.ac (libgcc_cv_solaris_crts): Likewise.
+ * configure: Regenerate.
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ * config/rs6000/aix-unwind.h (REGISTER_CFA_OFFSET_FOR): Remove semicolon
+ after "do {} while (0)".
+
+2017-11-07 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * config/aarch64/sfp-machine.h (FP_HANDLE_EXCEPTIONS): Remove
+ semicolon after "do {} while (0)".
+ * config/i386/sfp-machine.h (FP_HANDLE_EXCEPTIONS): Same.
+ * config/ia64/sfp-machine.h (FP_HANDLE_EXCEPTIONS): Same.
+ * config/mips/sfp-machine.h (FP_HANDLE_EXCEPTIONS): Same.
+ * config/rs6000/sfp-machine.h (FP_HANDLE_EXCEPTIONS): Same.
+
+2017-11-04 Andreas Tobler <andreast@gcc.gnu.org>
+
+ PR libgcc/82635
+ * config/i386/freebsd-unwind.h (MD_FALLBACK_FRAME_STATE_FOR): Use a
+ sysctl to determine whether we're in a trampoline.
+ Keep the pattern matching method for systems without
+ KERN_PROC_SIGTRAMP sysctl.
+
2017-11-03 Cupertino Miranda <cmiranda@synopsys.com>
Vineet Gupta <vgupta@synopsys.com>
diff --git a/libgcc/config.host b/libgcc/config.host
index ad5a9ff621f..255612febf4 100644
--- a/libgcc/config.host
+++ b/libgcc/config.host
@@ -277,7 +277,7 @@ case ${host} in
tmake_file="$tmake_file $cpu_type/t-sol2"
extra_parts="gmon.o crtbegin.o crtend.o"
if test "${libgcc_cv_solaris_crts}" = yes; then
- # Solaris 11.x and 12 provide crt1.o, crti.o, and crtn.o as part of the
+ # Solaris 11.4 provides crt1.o, crti.o, and crtn.o as part of the
# base system. crtp.o and crtpg.o implement the compiler-dependent parts.
extra_parts="$extra_parts crtp.o crtpg.o"
# If the Solaris CRTs are present, both ld and gld will have PIE support.
diff --git a/libgcc/config/aarch64/sfp-machine.h b/libgcc/config/aarch64/sfp-machine.h
index 6cdfe499794..2295713948e 100644
--- a/libgcc/config/aarch64/sfp-machine.h
+++ b/libgcc/config/aarch64/sfp-machine.h
@@ -90,7 +90,7 @@ void __sfp_handle_exceptions (int);
do { \
if (__builtin_expect (_fex, 0)) \
__sfp_handle_exceptions (_fex); \
- } while (0);
+ } while (0)
#define FP_TRAPPING_EXCEPTIONS ((_fpcr >> FP_EX_SHIFT) & FP_EX_ALL)
diff --git a/libgcc/config/i386/freebsd-unwind.h b/libgcc/config/i386/freebsd-unwind.h
index 0eec584a11e..a8e8edb993a 100644
--- a/libgcc/config/i386/freebsd-unwind.h
+++ b/libgcc/config/i386/freebsd-unwind.h
@@ -28,7 +28,10 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include <sys/types.h>
#include <signal.h>
+#include <unistd.h>
+#include <sys/sysctl.h>
#include <sys/ucontext.h>
+#include <sys/user.h>
#include <machine/sigframe.h>
#define REG_NAME(reg) sf_uc.uc_mcontext.mc_## reg
@@ -36,6 +39,38 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#ifdef __x86_64__
#define MD_FALLBACK_FRAME_STATE_FOR x86_64_freebsd_fallback_frame_state
+#ifdef KERN_PROC_SIGTRAMP
+/* FreeBSD past 9.3 provides a kern.proc.sigtramp.<pid> sysctl that
+ returns the location of the signal trampoline. Use this to find
+ out whether we're in a trampoline.
+*/
+static int
+x86_64_outside_sigtramp_range (unsigned char *pc)
+{
+ static int sigtramp_range_determined = 0;
+ static unsigned char *sigtramp_start, *sigtramp_end;
+
+ if (sigtramp_range_determined == 0)
+ {
+ struct kinfo_sigtramp kst = {0};
+ size_t len = sizeof (kst);
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_SIGTRAMP, getpid() };
+
+ sigtramp_range_determined = 1;
+ if (sysctl (mib, 4, &kst, &len, NULL, 0) == 0)
+ {
+ sigtramp_range_determined = 2;
+ sigtramp_start = kst.ksigtramp_start;
+ sigtramp_end = kst.ksigtramp_end;
+ }
+ }
+ if (sigtramp_range_determined < 2) /* sysctl failed if < 2 */
+ return 1;
+
+ return (pc < sigtramp_start || pc >= sigtramp_end);
+}
+#endif
+
static _Unwind_Reason_Code
x86_64_freebsd_fallback_frame_state
(struct _Unwind_Context *context, _Unwind_FrameState *fs)
@@ -43,6 +78,7 @@ x86_64_freebsd_fallback_frame_state
struct sigframe *sf;
long new_cfa;
+#ifndef KERN_PROC_SIGTRAMP
/* Prior to FreeBSD 9, the signal trampoline was located immediately
before the ps_strings. To support non-executable stacks on AMD64,
the sigtramp was moved to a shared page for FreeBSD 9. Unfortunately
@@ -62,12 +98,15 @@ x86_64_freebsd_fallback_frame_state
&& *(unsigned int *)(context->ra + 8) == 0x01a1c0c7
&& *(unsigned int *)(context->ra + 12) == 0x050f0000 ))
return _URC_END_OF_STACK;
+#else
+ if (x86_64_outside_sigtramp_range(context->ra))
+ return _URC_END_OF_STACK;
+#endif
sf = (struct sigframe *) context->cfa;
new_cfa = sf->REG_NAME(rsp);
fs->regs.cfa_how = CFA_REG_OFFSET;
- /* Register 7 is rsp */
- fs->regs.cfa_reg = 7;
+ fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
/* The SVR4 register numbering macros aren't usable in libgcc. */
diff --git a/libgcc/config/i386/sfp-machine.h b/libgcc/config/i386/sfp-machine.h
index 8a1923b6c1a..8319f0550bc 100644
--- a/libgcc/config/i386/sfp-machine.h
+++ b/libgcc/config/i386/sfp-machine.h
@@ -58,7 +58,7 @@ void __sfp_handle_exceptions (int);
do { \
if (__builtin_expect (_fex, 0)) \
__sfp_handle_exceptions (_fex); \
- } while (0);
+ } while (0)
#define FP_TRAPPING_EXCEPTIONS ((~_fcw >> FP_EX_SHIFT) & FP_EX_ALL)
diff --git a/libgcc/config/ia64/sfp-machine.h b/libgcc/config/ia64/sfp-machine.h
index 45e844d3daa..da86b83f0f2 100644
--- a/libgcc/config/ia64/sfp-machine.h
+++ b/libgcc/config/ia64/sfp-machine.h
@@ -68,7 +68,7 @@ void __sfp_handle_exceptions (int);
do { \
if (__builtin_expect (_fex, 0)) \
__sfp_handle_exceptions (_fex); \
- } while (0);
+ } while (0)
#define FP_TRAPPING_EXCEPTIONS (~_fcw & FP_EX_ALL)
diff --git a/libgcc/config/mips/sfp-machine.h b/libgcc/config/mips/sfp-machine.h
index 07d11580ee6..5d70875d221 100644
--- a/libgcc/config/mips/sfp-machine.h
+++ b/libgcc/config/mips/sfp-machine.h
@@ -165,7 +165,7 @@ typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
_fcsr &= ~(1 << 17); \
_fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \
__builtin_mips_set_fcsr (_fcsr); \
- } while (0);
+ } while (0)
#else
#define FP_EX_INVALID (1 << 4)
diff --git a/libgcc/config/rs6000/aix-unwind.h b/libgcc/config/rs6000/aix-unwind.h
index 156165894ea..9b4ca82f938 100644
--- a/libgcc/config/rs6000/aix-unwind.h
+++ b/libgcc/config/rs6000/aix-unwind.h
@@ -209,7 +209,7 @@ ucontext_for (struct _Unwind_Context *context)
do { \
(FS)->regs.reg[REGNO].how = REG_SAVED_OFFSET; \
(FS)->regs.reg[REGNO].loc.offset = (long) (ADDR) - (CFA); \
-} while (0);
+} while (0)
static _Unwind_Reason_Code
ppc_aix_fallback_frame_state (struct _Unwind_Context *context,
diff --git a/libgcc/config/rs6000/sfp-machine.h b/libgcc/config/rs6000/sfp-machine.h
index ab028fe4211..9975fbf60eb 100644
--- a/libgcc/config/rs6000/sfp-machine.h
+++ b/libgcc/config/rs6000/sfp-machine.h
@@ -129,7 +129,7 @@ void __sfp_handle_exceptions (int);
do { \
if (__builtin_expect (_fex, 0)) \
__sfp_handle_exceptions (_fex); \
- } while (0);
+ } while (0)
/* The FP_EX_* bits track whether the exception has occurred. This macro
must set the FP_EX_* bits of those exceptions which are configured to
diff --git a/libgcc/config/sol2/crtpg.c b/libgcc/config/sol2/crtpg.c
index dcebfcdcdf0..d5966b0226b 100644
--- a/libgcc/config/sol2/crtpg.c
+++ b/libgcc/config/sol2/crtpg.c
@@ -31,11 +31,10 @@ extern char _start[], _etext[];
int __start_crt_compiler (int, char **);
-/* Since Solaris 11.x and Solaris 12, the system-provided CRTs provide a
- hook to invoke initialization code early during process startup.
- __start_crt_compiler is documented in crt1.o(5). We use it to perform
- initialization for profiling as a substitute for the earlier separate
- gcrt1.o. */
+/* Since Solaris 11.4, the system-provided CRTs provide a hook to invoke
+ initialization code early during process startup. __start_crt_compiler
+ is documented in crt1.o(5). We use it to perform initialization for
+ profiling as a substitute for the earlier separate gcrt1.o. */
int
__start_crt_compiler (int argc __attribute__ ((unused)),
diff --git a/libgcc/configure b/libgcc/configure
index 63c50c0c7e2..20169b18fe9 100644
--- a/libgcc/configure
+++ b/libgcc/configure
@@ -4875,7 +4875,7 @@ esac
case ${host} in
*-*-solaris2*)
- # Check for system-provided CRTs on Solaris 11.x and Solaris 12.
+ # Check for system-provided CRTs on Solaris 11.4.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking system-provided CRTs on Solaris" >&5
$as_echo_n "checking system-provided CRTs on Solaris... " >&6; }
if test "${libgcc_cv_solaris_crts+set}" = set; then :
diff --git a/libgcc/configure.ac b/libgcc/configure.ac
index dd60b01d2f8..53e77757aa8 100644
--- a/libgcc/configure.ac
+++ b/libgcc/configure.ac
@@ -296,7 +296,7 @@ esac
case ${host} in
*-*-solaris2*)
- # Check for system-provided CRTs on Solaris 11.x and Solaris 12.
+ # Check for system-provided CRTs on Solaris 11.4.
AC_CACHE_CHECK([system-provided CRTs on Solaris],
[libgcc_cv_solaris_crts],
[libgcc_cv_solaris_crts=no
diff --git a/libgo/Makefile.am b/libgo/Makefile.am
index bd0bbddd4ed..f0ecf92ca7d 100644
--- a/libgo/Makefile.am
+++ b/libgo/Makefile.am
@@ -1191,7 +1191,7 @@ syscall/wait.lo: go/syscall/wait.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $(srcdir)/go/syscall/wait.c
-# Solaris 12 changed the type of fields in struct stat.
+# Solaris 11.4 changed the type of fields in struct stat.
# Use a build tag, based on a configure check, to cope.
if LIBGO_IS_SOLARIS
if HAVE_STAT_TIMESPEC
@@ -1474,7 +1474,7 @@ check-am:
@for f in $(TEST_PACKAGES); do \
rm -f $$f-testsum $$f-testlog; \
done
- -@$(MAKE) -k $(TEST_PACKAGES)
+ -@$(MAKE) $(AM_MAKEFLAGS) -k $(TEST_PACKAGES)
@for f in $(TEST_PACKAGES); do \
if test -f $$f-testsum; then \
cat $$f-testsum >> libgo.sum; \
@@ -1488,7 +1488,7 @@ check-multi:
$(MULTIDO) $(AM_MAKEFLAGS) DO=check-am multi-do # $(MAKE)
bench:
- -@$(MAKE) -k $(TEST_PACKAGES) GOBENCH=.
+ -@$(MAKE) $(AM_MAKEFLAGS) -k $(TEST_PACKAGES) GOBENCH=.
MOSTLYCLEANFILES = \
s-runtime_sysinfo s-sigtab s-runtime-inc s-zstdpkglist \
diff --git a/libgo/Makefile.in b/libgo/Makefile.in
index 064df58f00e..794c125516a 100644
--- a/libgo/Makefile.in
+++ b/libgo/Makefile.in
@@ -1218,7 +1218,7 @@ extra_check_libs_cmd_go_internal_load = $(abs_builddir)/libgotool.a
extra_check_libs_cmd_go_internal_work = $(abs_builddir)/libgotool.a
@HAVE_STAT_TIMESPEC_FALSE@@LIBGO_IS_SOLARIS_TRUE@matchargs_os =
-# Solaris 12 changed the type of fields in struct stat.
+# Solaris 11.4 changed the type of fields in struct stat.
# Use a build tag, based on a configure check, to cope.
@HAVE_STAT_TIMESPEC_TRUE@@LIBGO_IS_SOLARIS_TRUE@matchargs_os = --tag=solaristag
@LIBGO_IS_SOLARIS_FALSE@matchargs_os =
@@ -3446,7 +3446,7 @@ check-am:
@for f in $(TEST_PACKAGES); do \
rm -f $$f-testsum $$f-testlog; \
done
- -@$(MAKE) -k $(TEST_PACKAGES)
+ -@$(MAKE) $(AM_MAKEFLAGS) -k $(TEST_PACKAGES)
@for f in $(TEST_PACKAGES); do \
if test -f $$f-testsum; then \
cat $$f-testsum >> libgo.sum; \
@@ -3460,7 +3460,7 @@ check-multi:
$(MULTIDO) $(AM_MAKEFLAGS) DO=check-am multi-do # $(MAKE)
bench:
- -@$(MAKE) -k $(TEST_PACKAGES) GOBENCH=.
+ -@$(MAKE) $(AM_MAKEFLAGS) -k $(TEST_PACKAGES) GOBENCH=.
mostlyclean-local:
find . -name '*.lo' -print | xargs $(LIBTOOL) --mode=clean rm -f
diff --git a/libgo/configure b/libgo/configure
index 1e5e41bed21..4e838166484 100755
--- a/libgo/configure
+++ b/libgo/configure
@@ -11114,7 +11114,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11117 "configure"
+#line 11118 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -11220,7 +11220,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11223 "configure"
+#line 11224 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
diff --git a/libgo/configure.ac b/libgo/configure.ac
index 4bfe1698d32..7b0c629653f 100644
--- a/libgo/configure.ac
+++ b/libgo/configure.ac
@@ -752,7 +752,7 @@ STRUCT_EPOLL_EVENT_FD_OFFSET=${libgo_cv_c_epoll_event_fd_offset}
AC_SUBST(STRUCT_EPOLL_EVENT_FD_OFFSET)
dnl Check if <sys/stat.h> uses timespec_t for st_?tim members. Introduced
-dnl in Solaris 12 for XPG7 compatibility.
+dnl in Solaris 11.4 for XPG7 compatibility.
AC_EGREP_HEADER([timespec_t.*st_atim], [sys/stat.h],
[have_stat_timespec=yes], [have_stat_timespec=no])
AM_CONDITIONAL(HAVE_STAT_TIMESPEC, test $have_stat_timespec = yes)
diff --git a/libgo/go/runtime/internal/atomic/atomic.c b/libgo/go/runtime/internal/atomic/atomic.c
index b584656f817..24820f22a42 100644
--- a/libgo/go/runtime/internal/atomic/atomic.c
+++ b/libgo/go/runtime/internal/atomic/atomic.c
@@ -34,7 +34,7 @@ uint64_t
Load64 (uint64_t *ptr)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
return __atomic_load_n (ptr, __ATOMIC_ACQUIRE);
}
@@ -66,7 +66,7 @@ int64_t
Loadint64 (int64_t *ptr)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
return __atomic_load_n (ptr, __ATOMIC_ACQUIRE);
}
@@ -88,7 +88,7 @@ uint64_t
Xadd64 (uint64_t *ptr, int64_t delta)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
return __atomic_add_fetch (ptr, (uint64_t) delta, __ATOMIC_SEQ_CST);
}
@@ -110,7 +110,7 @@ int64_t
Xaddint64 (int64_t *ptr, int64_t delta)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
return __atomic_add_fetch (ptr, delta, __ATOMIC_SEQ_CST);
}
@@ -132,7 +132,7 @@ uint64_t
Xchg64 (uint64_t *ptr, uint64_t new)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
return __atomic_exchange_n (ptr, new, __ATOMIC_SEQ_CST);
}
@@ -184,7 +184,7 @@ _Bool
Cas64 (uint64_t *ptr, uint64_t old, uint64_t new)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
return __atomic_compare_exchange_n (ptr, &old, new, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
}
@@ -226,7 +226,7 @@ void
Store64 (uint64_t *ptr, uint64_t val)
{
if (((uintptr_t) ptr & 7) != 0)
- ptr = NULL;
+ panicmem ();
__atomic_store_n (ptr, val, __ATOMIC_SEQ_CST);
}
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
index 2f656038a9e..c39a58d0c4b 100644
--- a/libgo/go/runtime/panic.go
+++ b/libgo/go/runtime/panic.go
@@ -22,6 +22,7 @@ import (
//go:linkname makefuncreturning runtime.makefuncreturning
//go:linkname gorecover runtime.gorecover
//go:linkname deferredrecover runtime.deferredrecover
+//go:linkname panicmem runtime.panicmem
// Temporary for C code to call:
//go:linkname throw runtime.throw
diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c
index 7e04027c3f1..32cbf03c5cf 100644
--- a/libgo/go/sync/atomic/atomic.c
+++ b/libgo/go/sync/atomic/atomic.c
@@ -26,7 +26,7 @@ int64_t
SwapInt64 (int64_t *addr, int64_t new)
{
if (((uintptr_t) addr & 7) != 0)
- addr = NULL;
+ panicmem ();
return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST);
}
@@ -48,7 +48,7 @@ uint64_t
SwapUint64 (uint64_t *addr, uint64_t new)
{
if (((uintptr_t) addr & 7) != 0)
- addr = NULL;
+ panicmem ();
return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST);
}
@@ -215,7 +215,7 @@ LoadInt64 (int64_t *addr)
int64_t v;
if (((uintptr_t) addr & 7) != 0)
- addr = NULL;
+ panicmem ();
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, v))
v = *addr;
@@ -247,7 +247,7 @@ LoadUint64 (uint64_t *addr)
uint64_t v;
if (((uintptr_t) addr & 7) != 0)
- addr = NULL;
+ panicmem ();
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, v))
v = *addr;
@@ -308,7 +308,7 @@ StoreInt64 (int64_t *addr, int64_t val)
int64_t v;
if (((uintptr_t) addr & 7) != 0)
- addr = NULL;
+ panicmem ();
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, val))
v = *addr;
@@ -338,7 +338,7 @@ StoreUint64 (uint64_t *addr, uint64_t val)
uint64_t v;
if (((uintptr_t) addr & 7) != 0)
- addr = NULL;
+ panicmem ();
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, val))
v = *addr;
diff --git a/libgo/mkrsysinfo.sh b/libgo/mkrsysinfo.sh
index aee0163c365..40bc34bbe4e 100755
--- a/libgo/mkrsysinfo.sh
+++ b/libgo/mkrsysinfo.sh
@@ -168,22 +168,22 @@ grep '^type _zone_net_addr_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr/[16]byte/' \
>> ${OUT}
-# The Solaris 12 _flow_arp_desc_t struct.
+# The Solaris 11.4 _flow_arp_desc_t struct.
grep '^type _flow_arp_desc_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
-# The Solaris 12 _flow_l3_desc_t struct.
+# The Solaris 11.4 _flow_l3_desc_t struct.
grep '^type _flow_l3_desc_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
-# The Solaris 12 _mac_ipaddr_t struct.
+# The Solaris 11.3 _mac_ipaddr_t struct.
grep '^type _mac_ipaddr_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
-# The Solaris 12 _mactun_info_t struct.
+# The Solaris 11.3 _mactun_info_t struct.
grep '^type _mactun_info_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
diff --git a/libgo/mksysinfo.sh b/libgo/mksysinfo.sh
index cbe5b979418..c3495de72f6 100755
--- a/libgo/mksysinfo.sh
+++ b/libgo/mksysinfo.sh
@@ -1295,22 +1295,22 @@ grep '^type _zone_net_addr_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr/[16]byte/' \
>> ${OUT}
-# The Solaris 12 _flow_arp_desc_t struct.
+# The Solaris 11.4 _flow_arp_desc_t struct.
grep '^type _flow_arp_desc_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
-# The Solaris 12 _flow_l3_desc_t struct.
+# The Solaris 11.4 _flow_l3_desc_t struct.
grep '^type _flow_l3_desc_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
-# The Solaris 12 _mac_ipaddr_t struct.
+# The Solaris 11.3 _mac_ipaddr_t struct.
grep '^type _mac_ipaddr_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
-# The Solaris 12 _mactun_info_t struct.
+# The Solaris 11.3 _mactun_info_t struct.
grep '^type _mactun_info_t ' gen-sysinfo.go | \
sed -e 's/_in6_addr_t/[16]byte/g' \
>> ${OUT}
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index dd5a958888f..39b5ef883bd 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -211,6 +211,8 @@ extern uint32 runtime_panicking(void)
extern bool runtime_isstarted;
extern bool runtime_isarchive;
+extern void panicmem(void) __asm__ (GOSYM_PREFIX "runtime.panicmem");
+
/*
* common functions and data
*/
diff --git a/libgomp/ChangeLog b/libgomp/ChangeLog
index f68604ca424..7331d412a82 100644
--- a/libgomp/ChangeLog
+++ b/libgomp/ChangeLog
@@ -1,3 +1,33 @@
+2017-11-15 Tom de Vries <tom@codesourcery.com>
+
+ * testsuite/libgomp.oacc-c-c++-common/f-asyncwait-1.c: New test, copied
+ from asyncwait-1.f90. Rewrite into C. Rewrite from float to int.
+ * testsuite/libgomp.oacc-c-c++-common/f-asyncwait-2.c: New test, copied
+ from asyncwait-2.f90. Rewrite into C. Rewrite from float to int.
+ * testsuite/libgomp.oacc-c-c++-common/f-asyncwait-3.c: New test, copied
+ from asyncwait-3.f90. Rewrite into C. Rewrite from float to int.
+
+2017-11-14 Tom de Vries <tom@codesourcery.com>
+
+ * testsuite/libgomp.oacc-c-c++-common/asyncwait-1.c: Allow to run for
+ non-nvidia devices.
+
+2017-11-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/82835
+ * testsuite/libgomp.c++/pr82835.C: New test.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ * testsuite/libgomp.c++/loop-2.C: Return a value
+ for functions with non-void return type, or change type to void,
+ or add -Wno-return-type for test.
+ * testsuite/libgomp.c++/loop-4.C: Likewise.
+ * testsuite/libgomp.c++/parallel-1.C: Likewise.
+ * testsuite/libgomp.c++/shared-1.C: Likewise.
+ * testsuite/libgomp.c++/single-1.C: Likewise.
+ * testsuite/libgomp.c++/single-2.C: Likewise.
+
2017-10-31 Tom de Vries <tom@codesourcery.com>
* plugin/plugin-hsa.c (HSA_LOG): Remove semicolon after
diff --git a/libgomp/testsuite/libgomp.c++/loop-2.C b/libgomp/testsuite/libgomp.c++/loop-2.C
index ea3dc588afd..77144b8e7c6 100644
--- a/libgomp/testsuite/libgomp.c++/loop-2.C
+++ b/libgomp/testsuite/libgomp.c++/loop-2.C
@@ -15,6 +15,7 @@ void parloop (int *a)
a[i] = i + 3;
}
+int
main()
{
int i, a[N];
diff --git a/libgomp/testsuite/libgomp.c++/loop-4.C b/libgomp/testsuite/libgomp.c++/loop-4.C
index 731f2345021..a940854c637 100644
--- a/libgomp/testsuite/libgomp.c++/loop-4.C
+++ b/libgomp/testsuite/libgomp.c++/loop-4.C
@@ -1,5 +1,6 @@
extern "C" void abort (void);
+int
main()
{
int i, a;
diff --git a/libgomp/testsuite/libgomp.c++/parallel-1.C b/libgomp/testsuite/libgomp.c++/parallel-1.C
index 3c931471328..ce338d0ddf9 100644
--- a/libgomp/testsuite/libgomp.c++/parallel-1.C
+++ b/libgomp/testsuite/libgomp.c++/parallel-1.C
@@ -8,6 +8,7 @@ foo (void)
return 10;
}
+int
main ()
{
int A = 0;
diff --git a/libgomp/testsuite/libgomp.c++/pr82835.C b/libgomp/testsuite/libgomp.c++/pr82835.C
new file mode 100644
index 00000000000..df64ecfb1cf
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/pr82835.C
@@ -0,0 +1,34 @@
+// PR c++/82835
+// { dg-do run }
+
+int a, b;
+
+template <class>
+struct C {
+ C (int x = a) : c (5) { if (x != 137) __builtin_abort (); }
+ int c;
+};
+
+struct D {
+ void foo ();
+ int d;
+};
+
+void
+D::foo ()
+{
+ C<int> c;
+#pragma omp for private (c)
+ for (b = 0; b < d; b++)
+ c.c++;
+}
+
+int
+main ()
+{
+ a = 137;
+ D d;
+ d.d = 16;
+ d.foo ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c++/shared-1.C b/libgomp/testsuite/libgomp.c++/shared-1.C
index 334a553ce23..2f61daa1213 100644
--- a/libgomp/testsuite/libgomp.c++/shared-1.C
+++ b/libgomp/testsuite/libgomp.c++/shared-1.C
@@ -53,6 +53,7 @@ parallel (int a, int b)
abort ();
}
+int
main()
{
parallel (1, 2);
diff --git a/libgomp/testsuite/libgomp.c++/single-1.C b/libgomp/testsuite/libgomp.c++/single-1.C
index e318a48ca5c..221236f24f2 100644
--- a/libgomp/testsuite/libgomp.c++/single-1.C
+++ b/libgomp/testsuite/libgomp.c++/single-1.C
@@ -1,5 +1,6 @@
extern "C" void abort (void);
+int
main()
{
int i = 0;
diff --git a/libgomp/testsuite/libgomp.c++/single-2.C b/libgomp/testsuite/libgomp.c++/single-2.C
index c2dd228568d..d24b1d85e66 100644
--- a/libgomp/testsuite/libgomp.c++/single-2.C
+++ b/libgomp/testsuite/libgomp.c++/single-2.C
@@ -7,6 +7,7 @@ struct X
int c;
};
+int
main()
{
int i = 0;
diff --git a/libgomp/testsuite/libgomp.oacc-c-c++-common/asyncwait-1.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/asyncwait-1.c
index d478ce2eef5..e780845a793 100644
--- a/libgomp/testsuite/libgomp.oacc-c-c++-common/asyncwait-1.c
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/asyncwait-1.c
@@ -1,9 +1,11 @@
-/* { dg-do run { target openacc_nvidia_accel_selected } } */
-/* { dg-additional-options "-lcuda" } */
+/* { dg-do run } */
+/* { dg-additional-options "-lcuda" { target openacc_nvidia_accel_selected } } */
#include <openacc.h>
#include <stdlib.h>
+#if defined ACC_DEVICE_TYPE_nvidia
#include "cuda.h"
+#endif
#include <stdio.h>
#include <sys/time.h>
@@ -11,14 +13,18 @@
int
main (int argc, char **argv)
{
+#if defined ACC_DEVICE_TYPE_nvidia
CUresult r;
CUstream stream1;
+#endif
int N = 128; //1024 * 1024;
float *a, *b, *c, *d, *e;
int i;
int nbytes;
+#if defined ACC_DEVICE_TYPE_nvidia
acc_init (acc_device_nvidia);
+#endif
nbytes = N * sizeof (float);
@@ -210,6 +216,7 @@ main (int argc, char **argv)
}
+#if defined ACC_DEVICE_TYPE_nvidia
r = cuStreamCreate (&stream1, CU_STREAM_NON_BLOCKING);
if (r != CUDA_SUCCESS)
{
@@ -218,6 +225,7 @@ main (int argc, char **argv)
}
acc_set_cuda_stream (1, stream1);
+#endif
for (i = 0; i < N; i++)
{
@@ -642,6 +650,7 @@ main (int argc, char **argv)
}
+#if defined ACC_DEVICE_TYPE_nvidia
r = cuStreamCreate (&stream1, CU_STREAM_NON_BLOCKING);
if (r != CUDA_SUCCESS)
{
@@ -650,6 +659,7 @@ main (int argc, char **argv)
}
acc_set_cuda_stream (1, stream1);
+#endif
for (i = 0; i < N; i++)
{
@@ -892,7 +902,9 @@ main (int argc, char **argv)
abort ();
}
+#if defined ACC_DEVICE_TYPE_nvidia
acc_shutdown (acc_device_nvidia);
+#endif
return 0;
}
diff --git a/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-1.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-1.c
new file mode 100644
index 00000000000..cf851707dc7
--- /dev/null
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-1.c
@@ -0,0 +1,297 @@
+/* { dg-do run } */
+
+/* Based on asyncwait-1.f90. */
+
+#include <stdlib.h>
+
+#define N 64
+
+int
+main (void)
+{
+ int *a, *b, *c, *d, *e;
+
+ a = (int*)malloc (N * sizeof (*a));
+ b = (int*)malloc (N * sizeof (*b));
+ c = (int*)malloc (N * sizeof (*c));
+ d = (int*)malloc (N * sizeof (*d));
+ e = (int*)malloc (N * sizeof (*e));
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 3;
+ b[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N]) copy (b[0:N])
+ {
+
+#pragma acc parallel async
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = a[i];
+
+#pragma acc wait
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 3)
+ abort ();
+ if (b[i] != 3)
+ abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 2;
+ b[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N]) copy (b[0:N])
+ {
+#pragma acc parallel async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = a[i];
+
+#pragma acc wait (1)
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 2) abort ();
+ if (b[i] != 2) abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 3;
+ b[i] = 0;
+ c[i] = 0;
+ d[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N]) copy (b[0:N]) copy (c[0:N]) copy (d[0:N])
+ {
+
+#pragma acc parallel async (1)
+ for (int i = 0; i < N; ++i)
+ b[i] = (a[i] * a[i] * a[i]) / a[i];
+
+#pragma acc parallel async (1)
+ for (int i = 0; i < N; ++i)
+ c[i] = (a[i] * 4) / a[i];
+
+
+#pragma acc parallel async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ d[i] = ((a[i] * a[i] + a[i]) / a[i]) - a[i];
+
+#pragma acc wait (1)
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 3)
+ abort ();
+ if (b[i] != 9)
+ abort ();
+ if (c[i] != 4)
+ abort ();
+ if (d[i] != 1)
+ abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 2;
+ b[i] = 0;
+ c[i] = 0;
+ d[i] = 0;
+ e[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N], b[0:N], c[0:N], d[0:N], e[0:N])
+ {
+
+#pragma acc parallel async (1)
+ for (int i = 0; i < N; ++i)
+ b[i] = (a[i] * a[i] * a[i]) / a[i];
+
+#pragma acc parallel async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ c[i] = (a[i] * 4) / a[i];
+
+#pragma acc parallel async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ d[i] = ((a[i] * a[i] + a[i]) / a[i]) - a[i];
+
+
+#pragma acc parallel wait (1) async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ e[i] = a[i] + b[i] + c[i] + d[i];
+
+#pragma acc wait (1)
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 2)
+ abort ();
+ if (b[i] != 4)
+ abort ();
+ if (c[i] != 4)
+ abort ();
+ if (d[i] != 1)
+ abort ();
+ if (e[i] != 11)
+ abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 3;
+ b[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N]) copy (b[0:N])
+ {
+
+#pragma acc kernels async
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = a[i];
+
+#pragma acc wait
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 3)
+ abort ();
+ if (b[i] != 3)
+ abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 2;
+ b[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N]) copy (b[0:N])
+ {
+#pragma acc kernels async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = a[i];
+
+#pragma acc wait (1)
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 2)
+ abort ();
+ if (b[i] != 2)
+ abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 3;
+ b[i] = 0;
+ c[i] = 0;
+ d[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N]) copy (b[0:N]) copy (c[0:N]) copy (d[0:N])
+ {
+#pragma acc kernels async (1)
+ for (int i = 0; i < N; ++i)
+ b[i] = (a[i] * a[i] * a[i]) / a[i];
+
+#pragma acc kernels async (1)
+ for (int i = 0; i < N; ++i)
+ c[i] = (a[i] * 4) / a[i];
+
+#pragma acc kernels async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ d[i] = ((a[i] * a[i] + a[i]) / a[i]) - a[i];
+
+#pragma acc wait (1)
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 3)
+ abort ();
+ if (b[i] != 9)
+ abort ();
+ if (c[i] != 4)
+ abort ();
+ if (d[i] != 1)
+ abort ();
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ a[i] = 2;
+ b[i] = 0;
+ c[i] = 0;
+ d[i] = 0;
+ e[i] = 0;
+ }
+
+#pragma acc data copy (a[0:N], b[0:N], c[0:N], d[0:N], e[0:N])
+ {
+#pragma acc kernels async (1)
+ for (int i = 0; i < N; ++i)
+ b[i] = (a[i] * a[i] * a[i]) / a[i];
+
+#pragma acc kernels async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ c[i] = (a[i] * 4) / a[i];
+
+#pragma acc kernels async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ d[i] = ((a[i] * a[i] + a[i]) / a[i]) - a[i];
+
+#pragma acc kernels wait (1) async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ e[i] = a[i] + b[i] + c[i] + d[i];
+
+#pragma acc wait (1)
+ }
+
+ for (int i = 0; i < N; ++i)
+ {
+ if (a[i] != 2)
+ abort ();
+ if (b[i] != 4)
+ abort ();
+ if (c[i] != 4)
+ abort ();
+ if (d[i] != 1)
+ abort ();
+ if (e[i] != 11)
+ abort ();
+ }
+
+ free (a);
+ free (b);
+ free (c);
+ free (d);
+ free (e);
+
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-2.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-2.c
new file mode 100644
index 00000000000..5298e4c54f7
--- /dev/null
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-2.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+
+/* Based on asyncwait-2.f90. */
+
+#include <stdlib.h>
+
+#define N 64
+
+int *a, *b, *c;
+
+int
+main (void)
+{
+ a = (int *)malloc (N * sizeof (*a));
+ b = (int *)malloc (N * sizeof (*b));
+ c = (int *)malloc (N * sizeof (*c));
+
+#pragma acc parallel copy (a[0:N]) async (0)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ a[i] = 1;
+
+#pragma acc parallel copy (b[0:N]) async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = 1;
+
+#pragma acc parallel copy (a[0:N], b[0:N], c[0:N]) wait (0, 1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ c[i] = a[i] + b[i];
+
+ for (int i = 0; i < N; ++i)
+ if (c[i] != 2)
+ abort ();
+
+#if 1
+#pragma acc kernels copy (a[0:N]) async (0)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ a[i] = 1;
+
+#pragma acc kernels copy (b[0:N]) async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = 1;
+
+#pragma acc kernels copy (a[0:N], b[0:N], c[0:N]) wait (0, 1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ c[i] = a[i] + b[i];
+
+ for (int i = 0; i < N; ++i)
+ if (c[i] != 2)
+ abort ();
+#endif
+
+ free (a);
+ free (b);
+ free (c);
+}
diff --git a/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-3.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-3.c
new file mode 100644
index 00000000000..319eea61dc7
--- /dev/null
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/f-asyncwait-3.c
@@ -0,0 +1,63 @@
+/* { dg-do run } */
+
+/* Based on asyncwait-3.f90. */
+
+#include <stdlib.h>
+
+#define N 64
+
+int
+main (void)
+{
+ int *a, *b, *c;
+
+ a = (int *)malloc (N * sizeof (*a));
+ b = (int *)malloc (N * sizeof (*b));
+ c = (int *)malloc (N * sizeof (*c));
+
+#pragma acc parallel copy (a[0:N]) async (0)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ a[i] = 1;
+
+#pragma acc parallel copy (b[0:N]) async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = 1;
+
+#pragma acc wait (0, 1)
+
+#pragma acc parallel copy (a[0:N], b[0:N], c[0:N])
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ c[i] = a[i] + b[i];
+
+ for (int i = 0; i < N; ++i)
+ if (c[i] != 2)
+ abort ();
+
+#pragma acc kernels copy (a[0:N]) async (0)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ a[i] = 1;
+
+#pragma acc kernels copy (b[0:N]) async (1)
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ b[i] = 1;
+
+#pragma acc wait (0, 1)
+
+#pragma acc kernels copy (a[0:N], b[0:N], c[0:N])
+#pragma acc loop
+ for (int i = 0; i < N; ++i)
+ c[i] = a[i] + b[i];
+
+ for (int i = 0; i < N; ++i)
+ if (c[i] != 2)
+ abort ();
+
+ free (a);
+ free (b);
+ free (c);
+}
diff --git a/libquadmath/ChangeLog b/libquadmath/ChangeLog
index c897d33a2bc..1d152bbd8b6 100644
--- a/libquadmath/ChangeLog
+++ b/libquadmath/ChangeLog
@@ -1,3 +1,9 @@
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * printf/gmp-impl.h (MPN_MUL_N_RECURSE): Remove semicolon after
+ "do {} while (0)".
+
2017-09-01 Michael Meissner <meissner@linux.vnet.ibm.com>
PR libquadmath/81848
diff --git a/libquadmath/printf/gmp-impl.h b/libquadmath/printf/gmp-impl.h
index 969574c853a..94d88efc57f 100644
--- a/libquadmath/printf/gmp-impl.h
+++ b/libquadmath/printf/gmp-impl.h
@@ -91,7 +91,7 @@ typedef unsigned int UHWtype;
impn_mul_n_basecase (prodp, up, vp, size); \
else \
impn_mul_n (prodp, up, vp, size, tspace); \
- } while (0);
+ } while (0)
#define __MPN(x) __quadmath_mpn_##x
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index 63e71317cbf..f94a53bcd37 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,3 +1,17 @@
+2017-11-08 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/82670
+ * ubsan/Makefile.am (ubsan_files): Remove ubsan_init_standalone.cc
+ and ubsan_signals_standalone.cc.
+ * ubsan/Makefile.in: Regenerated.
+
+2017-11-05 Tom de Vries <tom@codesourcery.com>
+
+ PR other/82784
+ * asan/asan_poisoning.cc (CHECK_SMALL_REGION): Remove semicolon after
+ "do {} while (0)".
+ * lsan/lsan_common.cc (LOG_POINTERS, LOG_THREADS): Same.
+
2017-10-20 Jakub Jelinek <jakub@redhat.com>
PR sanitizer/82595
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 15cd8eaac3e..1343dfbd39e 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -215,7 +215,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr __bad = __asan_region_is_poisoned(__p, __size); \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
} \
- } while (false); \
+ } while (false)
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index a3274d5c1c3..4afce9df071 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -55,12 +55,12 @@ void RegisterLsanFlags(FlagParser *parser, Flags *f) {
#define LOG_POINTERS(...) \
do { \
if (flags()->log_pointers) Report(__VA_ARGS__); \
- } while (0);
+ } while (0)
#define LOG_THREADS(...) \
do { \
if (flags()->log_threads) Report(__VA_ARGS__); \
- } while (0);
+ } while (0)
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
diff --git a/libsanitizer/ubsan/Makefile.am b/libsanitizer/ubsan/Makefile.am
index cce728ceeca..fea000c0c8e 100644
--- a/libsanitizer/ubsan/Makefile.am
+++ b/libsanitizer/ubsan/Makefile.am
@@ -22,10 +22,7 @@ ubsan_plugin_files = \
ubsan_type_hash_win.cc \
ubsan_value.cc
-ubsan_files = \
- $(ubsan_plugin_files) \
- ubsan_init_standalone.cc \
- ubsan_signals_standalone.cc
+ubsan_files = $(ubsan_plugin_files)
libubsan_la_SOURCES = $(ubsan_files)
libubsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la
diff --git a/libsanitizer/ubsan/Makefile.in b/libsanitizer/ubsan/Makefile.in
index 9552ec1425d..e757d29cd4a 100644
--- a/libsanitizer/ubsan/Makefile.in
+++ b/libsanitizer/ubsan/Makefile.in
@@ -111,8 +111,7 @@ am__objects_1 = ubsan_diag.lo ubsan_flags.lo ubsan_handlers.lo \
ubsan_handlers_cxx.lo ubsan_init.lo ubsan_type_hash.lo \
ubsan_type_hash_itanium.lo ubsan_type_hash_win.lo \
ubsan_value.lo
-am__objects_2 = $(am__objects_1) ubsan_init_standalone.lo \
- ubsan_signals_standalone.lo
+am__objects_2 = $(am__objects_1)
am_libubsan_la_OBJECTS = $(am__objects_2)
libubsan_la_OBJECTS = $(am_libubsan_la_OBJECTS)
libubsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -306,11 +305,7 @@ ubsan_plugin_files = \
ubsan_type_hash_win.cc \
ubsan_value.cc
-ubsan_files = \
- $(ubsan_plugin_files) \
- ubsan_init_standalone.cc \
- ubsan_signals_standalone.cc
-
+ubsan_files = $(ubsan_plugin_files)
libubsan_la_SOURCES = $(ubsan_files)
libubsan_la_LIBADD = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
@@ -436,8 +431,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers_cxx.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init_standalone.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_signals_standalone.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_itanium.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_win.Plo@am__quote@
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index b12fa76551d..99bfbbad707 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,82 @@
+2017-11-15 Jason Merrill <jason@redhat.com>
+
+ * include/std/numeric (gcd): Fix typo.
+
+2017-11-15 Ville Voutilainen <ville.voutilainen@gmail.com>
+
+ Fix gcd breakage.
+ * include/std/numeric (gcd): Use remove_cv_t, not remove_cv.
+
+2017-11-15 Jonathan Wakely <jwakely@redhat.com>
+
+ * testsuite/27_io/filesystem/iterators/directory_iterator.cc: Leave
+ error_code unset.
+ * testsuite/27_io/filesystem/iterators/recursive_directory_iterator.cc:
+ Check for past-the-end before dereferencing.
+ * testsuite/experimental/filesystem/iterators/
+ recursive_directory_iterator.cc: Likewise.
+
+ * include/bits/range_access.h (size, empty, data): Add conditional
+ noexcept to generic overloads.
+
+2017-11-14 Ville Voutilainen <ville.voutilainen@gmail.com>
+
+ Implement LWG 2733 and LWG 2759
+ * include/experimental/numeric (gcd): Reject cv-qualified bool.
+ (lcm): Likewise.
+ * include/std/numeric (gcd): Likewise.
+ (lcm): Likewise.
+ * testsuite/26_numerics/gcd/gcd_neg.cc: Add tests and adjust.
+ * testsuite/26_numerics/lcm/lcm_neg.cc: Likewise.
+
+2017-11-14 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/locale_conv.h (wbuffer_convert::_M_conv_get): Fix typo.
+ * testsuite/22_locale/conversions/buffer/3.cc: New test.
+
+2017-11-10 Jonathan Wakely <jwakely@redhat.com>
+
+ * testsuite/util/testsuite_tr1.h (ThrowMoveConsClass): Use noexcept.
+
+ PR libstdc++/82917
+ * include/std/fstream (basic_ifstream::open, basic_ofstream::open)
+ (basic_fstream::open): Fix missing return.
+
+2017-11-07 Jonathan Wakely <jwakely@redhat.com>
+
+ * src/filesystem/ops-common.h (make_file_type) [S_ISSOCK]: Only use
+ S_ISSOCK when defined.
+
+2017-11-06 François Dumont <fdumont@gcc.gnu.org>
+
+ * testsuite/libstdc++-prettyprinters/tr1.cc: Compile with -O0.
+
+2017-11-06 Martin Liska <mliska@suse.cz>
+
+ * testsuite/27_io/basic_fstream/cons/char/path.cc (main):
+ Return a value for functions with non-void return type,
+ or change type to void, or add -Wno-return-type for test.
+ * testsuite/27_io/basic_ifstream/cons/char/path.cc (main):
+ Likewise.
+ * testsuite/27_io/basic_ofstream/open/char/path.cc (main):
+ Likewise.
+
+2017-11-06 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * testsuite/20_util/optional/cons/deduction.cc: Avoid -Wreturn-type
+ warnings.
+ * testsuite/20_util/pair/cons/deduction.cc: Likewise.
+ * testsuite/20_util/pair/traits.cc: Likewise.
+ * testsuite/20_util/tuple/cons/deduction.cc: Likewise.
+ * testsuite/20_util/variant/compile.cc: Likewise.
+ * testsuite/23_containers/map/modifiers/try_emplace/1.cc: Likewise.
+ * testsuite/23_containers/unordered_map/modifiers/try_emplace.cc:
+ Likewise.
+
+2017-11-05 Gerald Pfeifer <gerald@pfeifer.com>
+
+ * doc/xml/manual/abi.xml: Move docs.oracle.com references to https.
+
2017-11-03 Jonathan Wakely <jwakely@redhat.com>
* include/std/type_traits (endian): Define new enumeration type.
diff --git a/libstdc++-v3/doc/xml/manual/abi.xml b/libstdc++-v3/doc/xml/manual/abi.xml
index 58950c990ea..ba286a28a79 100644
--- a/libstdc++-v3/doc/xml/manual/abi.xml
+++ b/libstdc++-v3/doc/xml/manual/abi.xml
@@ -1140,7 +1140,7 @@ gcc test.c -g -O2 -L. -lone -ltwo /usr/lib/libstdc++.so.5 /usr/lib/libstdc++.so.
<biblioentry>
<title>
<link xmlns:xlink="http://www.w3.org/1999/xlink"
- xlink:href="http://docs.oracle.com/cd/E23824_01/html/819-0690/index.html">
+ xlink:href="https://docs.oracle.com/cd/E23824_01/html/819-0690/index.html">
Linker and Libraries Guide (document 819-0690)
</link>
</title>
@@ -1150,7 +1150,7 @@ gcc test.c -g -O2 -L. -lone -ltwo /usr/lib/libstdc++.so.5 /usr/lib/libstdc++.so.
<biblioentry>
<title>
<link xmlns:xlink="http://www.w3.org/1999/xlink"
- xlink:href="http://docs.oracle.com/cd/E19422-01/819-3689/">
+ xlink:href="https://docs.oracle.com/cd/E19422-01/819-3689/">
Sun Studio 11: C++ Migration Guide (document 819-3689)
</link>
</title>
diff --git a/libstdc++-v3/include/bits/locale_conv.h b/libstdc++-v3/include/bits/locale_conv.h
index 47c8dee53cb..b8f77dcaca9 100644
--- a/libstdc++-v3/include/bits/locale_conv.h
+++ b/libstdc++-v3/include/bits/locale_conv.h
@@ -431,7 +431,7 @@ _GLIBCXX_END_NAMESPACE_CXX11
streamsize __nbytes = sizeof(_M_get_buf) - _M_unconv;
__nbytes = std::min(__nbytes, _M_buf->in_avail());
if (__nbytes < 1)
- __nbytes == 1;
+ __nbytes = 1;
__nbytes = _M_buf->sgetn(_M_get_buf + _M_unconv, __nbytes);
if (__nbytes < 1)
return false;
diff --git a/libstdc++-v3/include/bits/range_access.h b/libstdc++-v3/include/bits/range_access.h
index 3987c2addf1..2a037ad8082 100644
--- a/libstdc++-v3/include/bits/range_access.h
+++ b/libstdc++-v3/include/bits/range_access.h
@@ -230,7 +230,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif // C++14
-#if __cplusplus > 201402L
+#if __cplusplus >= 201703L
#define __cpp_lib_nonmember_container_access 201411
/**
@@ -239,7 +239,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
*/
template <typename _Container>
constexpr auto
- size(const _Container& __cont) -> decltype(__cont.size())
+ size(const _Container& __cont) noexcept(noexcept(__cont.size()))
+ -> decltype(__cont.size())
{ return __cont.size(); }
/**
@@ -257,7 +258,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
*/
template <typename _Container>
constexpr auto
- empty(const _Container& __cont) -> decltype(__cont.empty())
+ empty(const _Container& __cont) noexcept(noexcept(__cont.empty()))
+ -> decltype(__cont.empty())
{ return __cont.empty(); }
/**
@@ -284,7 +286,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
*/
template <typename _Container>
constexpr auto
- data(_Container& __cont) -> decltype(__cont.data())
+ data(_Container& __cont) noexcept(noexcept(__cont.data()))
+ -> decltype(__cont.data())
{ return __cont.data(); }
/**
@@ -293,7 +296,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
*/
template <typename _Container>
constexpr auto
- data(const _Container& __cont) -> decltype(__cont.data())
+ data(const _Container& __cont) noexcept(noexcept(__cont.data()))
+ -> decltype(__cont.data())
{ return __cont.data(); }
/**
diff --git a/libstdc++-v3/include/experimental/numeric b/libstdc++-v3/include/experimental/numeric
index c8597fce06d..f037d8e3b3d 100644
--- a/libstdc++-v3/include/experimental/numeric
+++ b/libstdc++-v3/include/experimental/numeric
@@ -55,10 +55,12 @@ inline namespace fundamentals_v2
constexpr common_type_t<_Mn, _Nn>
gcd(_Mn __m, _Nn __n)
{
- static_assert(is_integral<_Mn>::value, "gcd arguments are integers");
- static_assert(is_integral<_Nn>::value, "gcd arguments are integers");
- static_assert(!is_same<_Mn, bool>::value, "gcd arguments are not bools");
- static_assert(!is_same<_Nn, bool>::value, "gcd arguments are not bools");
+ static_assert(is_integral_v<_Mn>, "gcd arguments are integers");
+ static_assert(is_integral_v<_Nn>, "gcd arguments are integers");
+ static_assert(!is_same_v<remove_cv_t<_Mn>, bool>,
+ "gcd arguments are not bools");
+ static_assert(!is_same_v<remove_cv_t<_Nn>, bool>,
+ "gcd arguments are not bools");
return std::__detail::__gcd(__m, __n);
}
@@ -67,10 +69,12 @@ inline namespace fundamentals_v2
constexpr common_type_t<_Mn, _Nn>
lcm(_Mn __m, _Nn __n)
{
- static_assert(is_integral<_Mn>::value, "lcm arguments are integers");
- static_assert(is_integral<_Nn>::value, "lcm arguments are integers");
- static_assert(!is_same<_Mn, bool>::value, "lcm arguments are not bools");
- static_assert(!is_same<_Nn, bool>::value, "lcm arguments are not bools");
+ static_assert(is_integral_v<_Mn>, "lcm arguments are integers");
+ static_assert(is_integral_v<_Nn>, "lcm arguments are integers");
+ static_assert(!is_same_v<remove_cv_t<_Mn>, bool>,
+ "lcm arguments are not bools");
+ static_assert(!is_same_v<remove_cv_t<_Nn>, bool>,
+ "lcm arguments are not bools");
return std::__detail::__lcm(__m, __n);
}
} // namespace fundamentals_v2
diff --git a/libstdc++-v3/include/std/fstream b/libstdc++-v3/include/std/fstream
index a3324c004d7..26176afccd0 100644
--- a/libstdc++-v3/include/std/fstream
+++ b/libstdc++-v3/include/std/fstream
@@ -663,7 +663,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Path>
auto
open(const _Path& __s, ios_base::openmode __mode = ios_base::in)
- -> decltype(_M_filebuf.open(__s, __mode))
+ -> decltype((void)_M_filebuf.open(__s, __mode))
{ open(__s.c_str(), __mode); }
#endif // C++17
#endif // C++11
@@ -891,7 +891,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Path>
auto
open(const _Path& __s, ios_base::openmode __mode = ios_base::out)
- -> decltype(_M_filebuf.open(__s, __mode))
+ -> decltype((void)_M_filebuf.open(__s, __mode))
{ open(__s.c_str(), __mode); }
#endif // C++17
#endif // C++11
@@ -1118,7 +1118,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
open(const _Path& __s,
ios_base::openmode __mode = ios_base::in | ios_base::out)
- -> decltype(_M_filebuf.open(__s, __mode))
+ -> decltype((void)_M_filebuf.open(__s, __mode))
{ open(__s.c_str(), __mode); }
#endif // C++17
#endif // C++11
diff --git a/libstdc++-v3/include/std/numeric b/libstdc++-v3/include/std/numeric
index 2b804199c7e..8864c953233 100644
--- a/libstdc++-v3/include/std/numeric
+++ b/libstdc++-v3/include/std/numeric
@@ -131,10 +131,12 @@ namespace __detail
constexpr common_type_t<_Mn, _Nn>
gcd(_Mn __m, _Nn __n)
{
- static_assert(is_integral<_Mn>::value, "gcd arguments are integers");
- static_assert(is_integral<_Nn>::value, "gcd arguments are integers");
- static_assert(!is_same<_Mn, bool>::value, "gcd arguments are not bools");
- static_assert(!is_same<_Nn, bool>::value, "gcd arguments are not bools");
+ static_assert(is_integral_v<_Mn>, "gcd arguments are integers");
+ static_assert(is_integral_v<_Nn>, "gcd arguments are integers");
+ static_assert(!is_same_v<remove_cv_t<_Mn>, bool>,
+ "gcd arguments are not bools");
+ static_assert(!is_same_v<remove_cv_t<_Nn>, bool>,
+ "gcd arguments are not bools");
return __detail::__gcd(__m, __n);
}
@@ -143,10 +145,12 @@ namespace __detail
constexpr common_type_t<_Mn, _Nn>
lcm(_Mn __m, _Nn __n)
{
- static_assert(is_integral<_Mn>::value, "lcm arguments are integers");
- static_assert(is_integral<_Nn>::value, "lcm arguments are integers");
- static_assert(!is_same<_Mn, bool>::value, "lcm arguments are not bools");
- static_assert(!is_same<_Nn, bool>::value, "lcm arguments are not bools");
+ static_assert(is_integral_v<_Mn>, "lcm arguments are integers");
+ static_assert(is_integral_v<_Nn>, "lcm arguments are integers");
+ static_assert(!is_same_v<remove_cv_t<_Mn>, bool>,
+ "lcm arguments are not bools");
+ static_assert(!is_same_v<remove_cv_t<_Nn>, bool>,
+ "lcm arguments are not bools");
return __detail::__lcm(__m, __n);
}
diff --git a/libstdc++-v3/src/filesystem/ops-common.h b/libstdc++-v3/src/filesystem/ops-common.h
index 12c12b08f8c..f96a999264f 100644
--- a/libstdc++-v3/src/filesystem/ops-common.h
+++ b/libstdc++-v3/src/filesystem/ops-common.h
@@ -113,9 +113,11 @@ _GLIBCXX_BEGIN_NAMESPACE_FILESYSTEM
return file_type::fifo;
else if (S_ISLNK(st.st_mode))
return file_type::symlink;
+#ifdef S_ISSOCK // not present until POSIX:2001
else if (S_ISSOCK(st.st_mode))
return file_type::socket;
#endif
+#endif
return file_type::unknown;
}
diff --git a/libstdc++-v3/testsuite/20_util/optional/cons/deduction.cc b/libstdc++-v3/testsuite/20_util/optional/cons/deduction.cc
index e15db0bc1ec..77d4ff82fc8 100644
--- a/libstdc++-v3/testsuite/20_util/optional/cons/deduction.cc
+++ b/libstdc++-v3/testsuite/20_util/optional/cons/deduction.cc
@@ -24,8 +24,8 @@
struct MoveOnly
{
MoveOnly() = default;
- MoveOnly(MoveOnly&&) {}
- MoveOnly& operator=(MoveOnly&&) {}
+ MoveOnly(MoveOnly&&);
+ MoveOnly& operator=(MoveOnly&&);
};
int main()
diff --git a/libstdc++-v3/testsuite/20_util/pair/cons/deduction.cc b/libstdc++-v3/testsuite/20_util/pair/cons/deduction.cc
index 3831cf2316f..5071b13655d 100644
--- a/libstdc++-v3/testsuite/20_util/pair/cons/deduction.cc
+++ b/libstdc++-v3/testsuite/20_util/pair/cons/deduction.cc
@@ -30,8 +30,8 @@ template<typename T, typename U>
struct MoveOnly
{
MoveOnly() = default;
- MoveOnly(MoveOnly&&) {}
- MoveOnly& operator=(MoveOnly&&) {}
+ MoveOnly(MoveOnly&&);
+ MoveOnly& operator=(MoveOnly&&);
};
void
diff --git a/libstdc++-v3/testsuite/20_util/pair/traits.cc b/libstdc++-v3/testsuite/20_util/pair/traits.cc
index 25ba7ffa125..69481544c75 100644
--- a/libstdc++-v3/testsuite/20_util/pair/traits.cc
+++ b/libstdc++-v3/testsuite/20_util/pair/traits.cc
@@ -31,8 +31,8 @@ struct Poison
struct ThrowingCopy
{
- ThrowingCopy(const ThrowingCopy&) {}
- ThrowingCopy& operator=(const ThrowingCopy&) {}
+ ThrowingCopy(const ThrowingCopy&);
+ ThrowingCopy& operator=(const ThrowingCopy&);
};
int main()
diff --git a/libstdc++-v3/testsuite/20_util/tuple/cons/deduction.cc b/libstdc++-v3/testsuite/20_util/tuple/cons/deduction.cc
index 0804d5584e3..2ca97098ee3 100644
--- a/libstdc++-v3/testsuite/20_util/tuple/cons/deduction.cc
+++ b/libstdc++-v3/testsuite/20_util/tuple/cons/deduction.cc
@@ -30,8 +30,8 @@ template<typename T, typename U>
struct MoveOnly
{
MoveOnly() = default;
- MoveOnly(MoveOnly&&) {}
- MoveOnly& operator=(MoveOnly&&) {}
+ MoveOnly(MoveOnly&&);
+ MoveOnly& operator=(MoveOnly&&);
};
void
diff --git a/libstdc++-v3/testsuite/20_util/variant/compile.cc b/libstdc++-v3/testsuite/20_util/variant/compile.cc
index e5f7538ba42..2aeec7d2696 100644
--- a/libstdc++-v3/testsuite/20_util/variant/compile.cc
+++ b/libstdc++-v3/testsuite/20_util/variant/compile.cc
@@ -481,37 +481,37 @@ void test_triviality()
static_assert(MA_VAL == is_trivially_move_assignable_v<variant<A>>, ""); \
}
TEST_TEMPLATE(=default, =default, =default, =default, =default, true, true, true, true)
- TEST_TEMPLATE(=default, =default, =default, =default, {}, true, true, true, false)
- TEST_TEMPLATE(=default, =default, =default, {}, =default, true, true, false, true)
- TEST_TEMPLATE(=default, =default, =default, {}, {}, true, true, false, false)
- TEST_TEMPLATE(=default, =default, {}, =default, =default, true, false, true, true)
- TEST_TEMPLATE(=default, =default, {}, =default, {}, true, false, true, false)
- TEST_TEMPLATE(=default, =default, {}, {}, =default, true, false, false, true)
- TEST_TEMPLATE(=default, =default, {}, {}, {}, true, false, false, false)
- TEST_TEMPLATE(=default, {}, =default, =default, =default, false, true, true, true)
- TEST_TEMPLATE(=default, {}, =default, =default, {}, false, true, true, false)
- TEST_TEMPLATE(=default, {}, =default, {}, =default, false, true, false, true)
- TEST_TEMPLATE(=default, {}, =default, {}, {}, false, true, false, false)
- TEST_TEMPLATE(=default, {}, {}, =default, =default, false, false, true, true)
- TEST_TEMPLATE(=default, {}, {}, =default, {}, false, false, true, false)
- TEST_TEMPLATE(=default, {}, {}, {}, =default, false, false, false, true)
- TEST_TEMPLATE(=default, {}, {}, {}, {}, false, false, false, false)
- TEST_TEMPLATE( {}, =default, =default, =default, =default, false, false, false, false)
- TEST_TEMPLATE( {}, =default, =default, =default, {}, false, false, false, false)
- TEST_TEMPLATE( {}, =default, =default, {}, =default, false, false, false, false)
- TEST_TEMPLATE( {}, =default, =default, {}, {}, false, false, false, false)
- TEST_TEMPLATE( {}, =default, {}, =default, =default, false, false, false, false)
- TEST_TEMPLATE( {}, =default, {}, =default, {}, false, false, false, false)
- TEST_TEMPLATE( {}, =default, {}, {}, =default, false, false, false, false)
- TEST_TEMPLATE( {}, =default, {}, {}, {}, false, false, false, false)
- TEST_TEMPLATE( {}, {}, =default, =default, =default, false, false, false, false)
- TEST_TEMPLATE( {}, {}, =default, =default, {}, false, false, false, false)
- TEST_TEMPLATE( {}, {}, =default, {}, =default, false, false, false, false)
- TEST_TEMPLATE( {}, {}, =default, {}, {}, false, false, false, false)
- TEST_TEMPLATE( {}, {}, {}, =default, =default, false, false, false, false)
- TEST_TEMPLATE( {}, {}, {}, =default, {}, false, false, false, false)
- TEST_TEMPLATE( {}, {}, {}, {}, =default, false, false, false, false)
- TEST_TEMPLATE( {}, {}, {}, {}, {}, false, false, false, false)
+ TEST_TEMPLATE(=default, =default, =default, =default, , true, true, true, false)
+ TEST_TEMPLATE(=default, =default, =default, , =default, true, true, false, true)
+ TEST_TEMPLATE(=default, =default, =default, , , true, true, false, false)
+ TEST_TEMPLATE(=default, =default, , =default, =default, true, false, true, true)
+ TEST_TEMPLATE(=default, =default, , =default, , true, false, true, false)
+ TEST_TEMPLATE(=default, =default, , , =default, true, false, false, true)
+ TEST_TEMPLATE(=default, =default, , , , true, false, false, false)
+ TEST_TEMPLATE(=default, , =default, =default, =default, false, true, true, true)
+ TEST_TEMPLATE(=default, , =default, =default, , false, true, true, false)
+ TEST_TEMPLATE(=default, , =default, , =default, false, true, false, true)
+ TEST_TEMPLATE(=default, , =default, , , false, true, false, false)
+ TEST_TEMPLATE(=default, , , =default, =default, false, false, true, true)
+ TEST_TEMPLATE(=default, , , =default, , false, false, true, false)
+ TEST_TEMPLATE(=default, , , , =default, false, false, false, true)
+ TEST_TEMPLATE(=default, , , , , false, false, false, false)
+ TEST_TEMPLATE( , =default, =default, =default, =default, false, false, false, false)
+ TEST_TEMPLATE( , =default, =default, =default, , false, false, false, false)
+ TEST_TEMPLATE( , =default, =default, , =default, false, false, false, false)
+ TEST_TEMPLATE( , =default, =default, , , false, false, false, false)
+ TEST_TEMPLATE( , =default, , =default, =default, false, false, false, false)
+ TEST_TEMPLATE( , =default, , =default, , false, false, false, false)
+ TEST_TEMPLATE( , =default, , , =default, false, false, false, false)
+ TEST_TEMPLATE( , =default, , , , false, false, false, false)
+ TEST_TEMPLATE( , , =default, =default, =default, false, false, false, false)
+ TEST_TEMPLATE( , , =default, =default, , false, false, false, false)
+ TEST_TEMPLATE( , , =default, , =default, false, false, false, false)
+ TEST_TEMPLATE( , , =default, , , false, false, false, false)
+ TEST_TEMPLATE( , , , =default, =default, false, false, false, false)
+ TEST_TEMPLATE( , , , =default, , false, false, false, false)
+ TEST_TEMPLATE( , , , , =default, false, false, false, false)
+ TEST_TEMPLATE( , , , , , false, false, false, false)
#undef TEST_TEMPLATE
#define TEST_TEMPLATE(CC, MC, CA, MA) \
@@ -529,21 +529,21 @@ void test_triviality()
static_assert(!is_trivially_move_assignable_v<variant<AllDeleted, A>>, ""); \
}
TEST_TEMPLATE(=default, =default, =default, =default)
- TEST_TEMPLATE(=default, =default, =default, {})
- TEST_TEMPLATE(=default, =default, {}, =default)
- TEST_TEMPLATE(=default, =default, {}, {})
- TEST_TEMPLATE(=default, {}, =default, =default)
- TEST_TEMPLATE(=default, {}, =default, {})
- TEST_TEMPLATE(=default, {}, {}, =default)
- TEST_TEMPLATE(=default, {}, {}, {})
- TEST_TEMPLATE( {}, =default, =default, =default)
- TEST_TEMPLATE( {}, =default, =default, {})
- TEST_TEMPLATE( {}, =default, {}, =default)
- TEST_TEMPLATE( {}, =default, {}, {})
- TEST_TEMPLATE( {}, {}, =default, =default)
- TEST_TEMPLATE( {}, {}, =default, {})
- TEST_TEMPLATE( {}, {}, {}, =default)
- TEST_TEMPLATE( {}, {}, {}, {})
+ TEST_TEMPLATE(=default, =default, =default, )
+ TEST_TEMPLATE(=default, =default, , =default)
+ TEST_TEMPLATE(=default, =default, , )
+ TEST_TEMPLATE(=default, , =default, =default)
+ TEST_TEMPLATE(=default, , =default, )
+ TEST_TEMPLATE(=default, , , =default)
+ TEST_TEMPLATE(=default, , , )
+ TEST_TEMPLATE( , =default, =default, =default)
+ TEST_TEMPLATE( , =default, =default, )
+ TEST_TEMPLATE( , =default, , =default)
+ TEST_TEMPLATE( , =default, , )
+ TEST_TEMPLATE( , , =default, =default)
+ TEST_TEMPLATE( , , =default, )
+ TEST_TEMPLATE( , , , =default)
+ TEST_TEMPLATE( , , , )
#undef TEST_TEMPLATE
static_assert(is_trivially_copy_constructible_v<variant<DefaultNoexcept, int, char, float, double>>, "");
diff --git a/libstdc++-v3/testsuite/22_locale/conversions/buffer/3.cc b/libstdc++-v3/testsuite/22_locale/conversions/buffer/3.cc
new file mode 100644
index 00000000000..99a679dc124
--- /dev/null
+++ b/libstdc++-v3/testsuite/22_locale/conversions/buffer/3.cc
@@ -0,0 +1,58 @@
+// Copyright (C) 2017 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <locale>
+#include <streambuf>
+#include <testsuite_hooks.h>
+
+struct streambuf : std::streambuf
+{
+ int_type underflow() override
+ {
+ if (c != '\0')
+ {
+ this->setg(&c, &c, &c + 1);
+ return *this->gptr();
+ }
+ c = '\0';
+ return traits_type::eof();
+ }
+
+private:
+ char c = 'a';
+};
+
+struct codecvt : std::codecvt<wchar_t, char, std::mbstate_t> { };
+
+void
+test01()
+{
+ // https://gcc.gnu.org/ml/libstdc++/2017-11/msg00022.html
+ streambuf sb;
+ std::wbuffer_convert<codecvt> conv(&sb);
+ VERIFY( sb.in_avail() == 0 );
+ wchar_t c = conv.sgetc();
+ VERIFY( c == L'a' );
+}
+
+int
+main()
+{
+ test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/map/modifiers/try_emplace/1.cc b/libstdc++-v3/testsuite/23_containers/map/modifiers/try_emplace/1.cc
index e4c883c2a51..22aa9d4234e 100644
--- a/libstdc++-v3/testsuite/23_containers/map/modifiers/try_emplace/1.cc
+++ b/libstdc++-v3/testsuite/23_containers/map/modifiers/try_emplace/1.cc
@@ -39,6 +39,7 @@ struct Val
{
val = other.val;
other.moved_from_assign = true;
+ return *this;
}
};
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/try_emplace.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/try_emplace.cc
index f123850ecf1..b07d44ba9bb 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/try_emplace.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/try_emplace.cc
@@ -39,6 +39,7 @@ struct Val
{
val = other.val;
other.moved_from_assign = true;
+ return *this;
}
};
diff --git a/libstdc++-v3/testsuite/26_numerics/gcd/gcd_neg.cc b/libstdc++-v3/testsuite/26_numerics/gcd/gcd_neg.cc
index 30524a1b06c..63a8afa526f 100644
--- a/libstdc++-v3/testsuite/26_numerics/gcd/gcd_neg.cc
+++ b/libstdc++-v3/testsuite/26_numerics/gcd/gcd_neg.cc
@@ -26,14 +26,29 @@ test01()
std::gcd(true, 1); // { dg-error "from here" }
std::gcd(1, true); // { dg-error "from here" }
std::gcd(true, true); // { dg-error "from here" }
+ std::gcd<const bool, int>(true, 1); // { dg-error "from here" }
+ std::gcd<int, const bool>(1, true); // { dg-error "from here" }
+ std::gcd<const bool, const bool>(true, true); // { dg-error "from here" }
+ std::gcd<const bool&, int>(true, 1); // { dg-error "from here" }
+ std::gcd<int, const bool&>(1, true); // { dg-error "from here" }
+ std::gcd<const bool&, const bool&>(true, true); // { dg-error "from here" }
+ std::gcd<const volatile bool, int>(true, 1); // { dg-error "from here" }
+ std::gcd<int, const volatile bool>(1, true); // { dg-error "from here" }
+ std::gcd<const volatile bool,
+ const volatile bool>(true, true); // { dg-error "from here" }
+ std::gcd<volatile bool, int>(true, 1); // { dg-error "from here" }
+ std::gcd<int, volatile bool>(1, true); // { dg-error "from here" }
+ std::gcd<volatile bool,
+ volatile bool>(true, true); // { dg-error "from here" }
std::gcd(0.1, 1); // { dg-error "from here" }
std::gcd(1, 0.1); // { dg-error "from here" }
std::gcd(0.1, 0.1); // { dg-error "from here" }
+ std::gcd<const int&, const int&>(0.1, 0.1); // { dg-error "from here" }
}
// { dg-error "integers" "" { target *-*-* } 134 }
// { dg-error "integers" "" { target *-*-* } 135 }
// { dg-error "not bools" "" { target *-*-* } 136 }
-// { dg-error "not bools" "" { target *-*-* } 137 }
+// { dg-error "not bools" "" { target *-*-* } 138 }
// { dg-prune-output "deleted function" }
// { dg-prune-output "invalid operands" }
diff --git a/libstdc++-v3/testsuite/26_numerics/lcm/lcm_neg.cc b/libstdc++-v3/testsuite/26_numerics/lcm/lcm_neg.cc
index e16e6ae1ee9..d25a92df74d 100644
--- a/libstdc++-v3/testsuite/26_numerics/lcm/lcm_neg.cc
+++ b/libstdc++-v3/testsuite/26_numerics/lcm/lcm_neg.cc
@@ -26,14 +26,29 @@ test01()
std::lcm(true, 1); // { dg-error "from here" }
std::lcm(1, true); // { dg-error "from here" }
std::lcm(true, true); // { dg-error "from here" }
+ std::lcm<const bool, int>(true, 1); // { dg-error "from here" }
+ std::lcm<int, const bool>(1, true); // { dg-error "from here" }
+ std::lcm<const bool, const bool>(true, true); // { dg-error "from here" }
+ std::lcm<const bool&, int>(true, 1); // { dg-error "from here" }
+ std::lcm<int, const bool&>(1, true); // { dg-error "from here" }
+ std::lcm<const bool&, const bool&>(true, true); // { dg-error "from here" }
+ std::lcm<const volatile bool, int>(true, 1); // { dg-error "from here" }
+ std::lcm<int, const volatile bool>(1, true); // { dg-error "from here" }
+ std::lcm<const volatile bool,
+ const volatile bool>(true, true); // { dg-error "from here" }
+ std::lcm<volatile bool, int>(true, 1); // { dg-error "from here" }
+ std::lcm<int, volatile bool>(1, true); // { dg-error "from here" }
+ std::lcm<volatile bool,
+ volatile bool>(true, true); // { dg-error "from here" }
std::lcm(0.1, 1); // { dg-error "from here" }
std::lcm(1, 0.1); // { dg-error "from here" }
std::lcm(0.1, 0.1); // { dg-error "from here" }
+ std::lcm<const int&, const int&>(0.1, 0.1); // { dg-error "from here" }
}
-// { dg-error "integers" "" { target *-*-* } 146 }
-// { dg-error "integers" "" { target *-*-* } 147 }
-// { dg-error "not bools" "" { target *-*-* } 148 }
-// { dg-error "not bools" "" { target *-*-* } 149 }
+// { dg-error "integers" "" { target *-*-* } 148 }
+// { dg-error "integers" "" { target *-*-* } 149 }
+// { dg-error "not bools" "" { target *-*-* } 150 }
+// { dg-error "not bools" "" { target *-*-* } 152 }
// { dg-prune-output "deleted function" }
// { dg-prune-output "invalid operands" }
diff --git a/libstdc++-v3/testsuite/27_io/basic_fstream/cons/char/path.cc b/libstdc++-v3/testsuite/27_io/basic_fstream/cons/char/path.cc
index 4442c28c56b..51337ebd909 100644
--- a/libstdc++-v3/testsuite/27_io/basic_fstream/cons/char/path.cc
+++ b/libstdc++-v3/testsuite/27_io/basic_fstream/cons/char/path.cc
@@ -45,4 +45,5 @@ main()
{
test01();
test02();
+ return 0;
}
diff --git a/libstdc++-v3/testsuite/27_io/basic_ifstream/cons/char/path.cc b/libstdc++-v3/testsuite/27_io/basic_ifstream/cons/char/path.cc
index 24286f5eeaf..a0de4ba55b2 100644
--- a/libstdc++-v3/testsuite/27_io/basic_ifstream/cons/char/path.cc
+++ b/libstdc++-v3/testsuite/27_io/basic_ifstream/cons/char/path.cc
@@ -45,4 +45,5 @@ main()
{
test01();
test02();
+ return 0;
}
diff --git a/libstdc++-v3/testsuite/27_io/basic_ofstream/open/char/path.cc b/libstdc++-v3/testsuite/27_io/basic_ofstream/open/char/path.cc
index a3fc0c7ff68..38078c97ef7 100644
--- a/libstdc++-v3/testsuite/27_io/basic_ofstream/open/char/path.cc
+++ b/libstdc++-v3/testsuite/27_io/basic_ofstream/open/char/path.cc
@@ -47,4 +47,5 @@ main()
{
test01();
test02();
+ return 0;
}
diff --git a/libstdc++-v3/testsuite/27_io/filesystem/iterators/directory_iterator.cc b/libstdc++-v3/testsuite/27_io/filesystem/iterators/directory_iterator.cc
index c3e6f01670a..9cdbd7aafa0 100644
--- a/libstdc++-v3/testsuite/27_io/filesystem/iterators/directory_iterator.cc
+++ b/libstdc++-v3/testsuite/27_io/filesystem/iterators/directory_iterator.cc
@@ -61,7 +61,6 @@ test01()
ec = bad_ec;
permissions(p, fs::perms::none, ec);
VERIFY( !ec );
- ec = bad_ec;
iter = fs::directory_iterator(p, ec);
VERIFY( ec );
VERIFY( iter == end(iter) );
diff --git a/libstdc++-v3/testsuite/27_io/filesystem/iterators/recursive_directory_iterator.cc b/libstdc++-v3/testsuite/27_io/filesystem/iterators/recursive_directory_iterator.cc
index 1ef450fc907..d41a1506d3b 100644
--- a/libstdc++-v3/testsuite/27_io/filesystem/iterators/recursive_directory_iterator.cc
+++ b/libstdc++-v3/testsuite/27_io/filesystem/iterators/recursive_directory_iterator.cc
@@ -87,6 +87,7 @@ test01()
VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1" );
++iter; // should recurse into d1
+ VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1/d2" );
iter.increment(ec); // should fail to recurse into p/d1/d2
VERIFY( ec );
@@ -99,6 +100,7 @@ test01()
VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1" );
++iter; // should recurse into d1
+ VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1/d2" );
ec = bad_ec;
iter.increment(ec); // should fail to recurse into p/d1/d2, so skip it
diff --git a/libstdc++-v3/testsuite/experimental/filesystem/iterators/recursive_directory_iterator.cc b/libstdc++-v3/testsuite/experimental/filesystem/iterators/recursive_directory_iterator.cc
index 50cc7d45de8..584cfeed839 100644
--- a/libstdc++-v3/testsuite/experimental/filesystem/iterators/recursive_directory_iterator.cc
+++ b/libstdc++-v3/testsuite/experimental/filesystem/iterators/recursive_directory_iterator.cc
@@ -56,6 +56,7 @@ test01()
VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1" );
++iter;
+ VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1/d2" );
++iter;
VERIFY( iter == end(iter) );
@@ -88,6 +89,7 @@ test01()
VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1" );
++iter; // should recurse into d1
+ VERIFY( iter != end(iter) );
VERIFY( iter->path() == p/"d1/d2" );
iter.increment(ec); // should fail to recurse into p/d1/d2
VERIFY( ec );
diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/tr1.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/tr1.cc
index f4ef32e8230..1c8f8af43d3 100644
--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/tr1.cc
+++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/tr1.cc
@@ -1,5 +1,5 @@
// { dg-do run }
-// { dg-options "-g" }
+// { dg-options "-g -O0" }
// Copyright (C) 2013-2017 Free Software Foundation, Inc.
//
diff --git a/libstdc++-v3/testsuite/util/testsuite_tr1.h b/libstdc++-v3/testsuite/util/testsuite_tr1.h
index 377bb8636ed..b1f6c6d2350 100644
--- a/libstdc++-v3/testsuite/util/testsuite_tr1.h
+++ b/libstdc++-v3/testsuite/util/testsuite_tr1.h
@@ -162,7 +162,7 @@ namespace __gnu_test
#if __cplusplus >= 201103L
struct ThrowMoveConsClass
{
- ThrowMoveConsClass(ThrowMoveConsClass&&) THROW(int);
+ ThrowMoveConsClass(ThrowMoveConsClass&&) noexcept(false);
};
struct NoexceptExplicitClass